The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    5  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    6  * All rights reserved.
    7  *
    8  * This code is derived from software contributed to The NetBSD Foundation
    9  * by Paul Kranenburg.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *        This product includes software developed by the NetBSD
   22  *        Foundation, Inc. and its contributors.
   23  * 4. Neither the name of The NetBSD Foundation nor the names of its
   24  *    contributors may be used to endorse or promote products derived
   25  *    from this software without specific prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   37  * POSSIBILITY OF SUCH DAMAGE.
   38  *
   39  *      from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
   40  */
   41 
   42 #include <sys/cdefs.h>
   43 __FBSDID("$FreeBSD$");
   44 
   45 /*
   46  * HME Ethernet module driver.
   47  *
   48  * The HME is e.g. part of the PCIO PCI multi function device.
   49  * It supports TX gathering and TX and RX checksum offloading.
   50  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   51  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   52  * are skipped to make sure the header after the ethernet header is aligned on a
   53  * natural boundary, so this ensures minimal wastage in the most common case.
   54  *
   55  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   56  * maximum packet size (this is not verified). Buffers starting on odd
   57  * boundaries must be mapped so that the burst can start on a natural boundary.
   58  *
   59  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   60  * In reality, we can do the same technique for UDP datagram too. However,
   61  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   62  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   63  * can be reactivated by setting special link option link0 with ifconfig(8).
   64  */
   65 #define HME_CSUM_FEATURES       (CSUM_TCP)
   66 #if 0
   67 #define HMEDEBUG
   68 #endif
   69 #define KTR_HME         KTR_SPARE2      /* XXX */
   70 
   71 #include <sys/param.h>
   72 #include <sys/systm.h>
   73 #include <sys/bus.h>
   74 #include <sys/endian.h>
   75 #include <sys/kernel.h>
   76 #include <sys/module.h>
   77 #include <sys/ktr.h>
   78 #include <sys/mbuf.h>
   79 #include <sys/malloc.h>
   80 #include <sys/socket.h>
   81 #include <sys/sockio.h>
   82 
   83 #include <net/bpf.h>
   84 #include <net/ethernet.h>
   85 #include <net/if.h>
   86 #include <net/if_var.h>
   87 #include <net/if_arp.h>
   88 #include <net/if_dl.h>
   89 #include <net/if_media.h>
   90 #include <net/if_types.h>
   91 #include <net/if_vlan_var.h>
   92 
   93 #include <netinet/in.h>
   94 #include <netinet/in_systm.h>
   95 #include <netinet/ip.h>
   96 #include <netinet/tcp.h>
   97 #include <netinet/udp.h>
   98 
   99 #include <dev/mii/mii.h>
  100 #include <dev/mii/miivar.h>
  101 
  102 #include <machine/bus.h>
  103 
  104 #include <dev/hme/if_hmereg.h>
  105 #include <dev/hme/if_hmevar.h>
  106 
  107 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
  108 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
  109 
  110 static void     hme_start(struct ifnet *);
  111 static void     hme_start_locked(struct ifnet *);
  112 static void     hme_stop(struct hme_softc *);
  113 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  114 static void     hme_tick(void *);
  115 static int      hme_watchdog(struct hme_softc *);
  116 static void     hme_init(void *);
  117 static void     hme_init_locked(struct hme_softc *);
  118 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  119 static int      hme_meminit(struct hme_softc *);
  120 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  121     u_int32_t, u_int32_t);
  122 static void     hme_mifinit(struct hme_softc *);
  123 static void     hme_setladrf(struct hme_softc *, int);
  124 
  125 static int      hme_mediachange(struct ifnet *);
  126 static int      hme_mediachange_locked(struct hme_softc *);
  127 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  128 
  129 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf **);
  130 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  131 static void     hme_eint(struct hme_softc *, u_int);
  132 static void     hme_rint(struct hme_softc *);
  133 static void     hme_tint(struct hme_softc *);
  134 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  135 
  136 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  137 
  138 devclass_t hme_devclass;
  139 
  140 static int hme_nerr;
  141 
  142 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  143 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  144 
  145 #define HME_SPC_READ_4(spc, sc, offs) \
  146         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  147             (offs))
  148 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  149         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  150             (offs), (v))
  151 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
  152         bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  153             (offs), (l), (f))
  154 
  155 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  156 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  157 #define HME_SEB_BARRIER(sc, offs, l, f) \
  158         HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
  159 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  160 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  161 #define HME_ERX_BARRIER(sc, offs, l, f) \
  162         HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
  163 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  164 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  165 #define HME_ETX_BARRIER(sc, offs, l, f) \
  166         HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
  167 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  168 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  169 #define HME_MAC_BARRIER(sc, offs, l, f) \
  170         HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
  171 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  172 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  173 #define HME_MIF_BARRIER(sc, offs, l, f) \
  174         HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
  175 
  176 #define HME_MAXERR      5
  177 #define HME_WHINE(dev, ...) do {                                        \
  178         if (hme_nerr++ < HME_MAXERR)                                    \
  179                 device_printf(dev, __VA_ARGS__);                        \
  180         if (hme_nerr == HME_MAXERR) {                                   \
  181                 device_printf(dev, "too many errors; not reporting "    \
  182                     "any more\n");                                      \
  183         }                                                               \
  184 } while(0)
  185 
  186 /* Support oversized VLAN frames. */
  187 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  188 
  189 int
  190 hme_config(struct hme_softc *sc)
  191 {
  192         struct ifnet *ifp;
  193         struct mii_softc *child;
  194         bus_size_t size;
  195         int error, rdesc, tdesc, i;
  196 
  197         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  198         if (ifp == NULL)
  199                 return (ENOSPC);
  200 
  201         /*
  202          * HME common initialization.
  203          *
  204          * hme_softc fields that must be initialized by the front-end:
  205          *
  206          * the DMA bus tag:
  207          *      sc_dmatag
  208          *
  209          * the bus handles, tags and offsets (splitted for SBus compatibility):
  210          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  211          *      sc_erx{t,h,o}   (Receiver Unit registers)
  212          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  213          *      sc_mac{t,h,o}   (MAC registers)
  214          *      sc_mif{t,h,o}   (Management Interface registers)
  215          *
  216          * the maximum bus burst size:
  217          *      sc_burst
  218          *
  219          */
  220 
  221         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
  222 
  223         /* Make sure the chip is stopped. */
  224         HME_LOCK(sc);
  225         hme_stop(sc);
  226         HME_UNLOCK(sc);
  227 
  228         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
  229             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  230             BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
  231             NULL, NULL, &sc->sc_pdmatag);
  232         if (error)
  233                 goto fail_ifnet;
  234 
  235         /*
  236          * Create control, RX and TX mbuf DMA tags.
  237          * Buffer descriptors must be aligned on a 2048 byte boundary;
  238          * take this into account when calculating the size. Note that
  239          * the maximum number of descriptors (256) occupies 2048 bytes,
  240          * so we allocate that much regardless of HME_N*DESC.
  241          */
  242         size = 4096;
  243         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  244             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  245             1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
  246         if (error)
  247                 goto fail_ptag;
  248 
  249         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  250             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  251             1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
  252         if (error)
  253                 goto fail_ctag;
  254 
  255         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  256             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  257             MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
  258             NULL, NULL, &sc->sc_tdmatag);
  259         if (error)
  260                 goto fail_rtag;
  261 
  262         /* Allocate the control DMA buffer. */
  263         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  264             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
  265         if (error != 0) {
  266                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  267                 goto fail_ttag;
  268         }
  269 
  270         /* Load the control DMA buffer. */
  271         sc->sc_rb.rb_dmabase = 0;
  272         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  273             sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  274             sc->sc_rb.rb_dmabase == 0) {
  275                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  276                     error);
  277                 goto fail_free;
  278         }
  279         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  280             sc->sc_rb.rb_dmabase);
  281 
  282         /*
  283          * Prepare the RX descriptors. rdesc serves as marker for the last
  284          * processed descriptor and may be used later on.
  285          */
  286         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  287                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  288                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  289                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  290                 if (error != 0)
  291                         goto fail_rxdesc;
  292         }
  293         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  294             &sc->sc_rb.rb_spare_dmamap);
  295         if (error != 0)
  296                 goto fail_rxdesc;
  297         /* Same for the TX descs. */
  298         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  299                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  300                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  301                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  302                 if (error != 0)
  303                         goto fail_txdesc;
  304         }
  305 
  306         sc->sc_csum_features = HME_CSUM_FEATURES;
  307         /* Initialize ifnet structure. */
  308         ifp->if_softc = sc;
  309         if_initname(ifp, device_get_name(sc->sc_dev),
  310             device_get_unit(sc->sc_dev));
  311         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  312         ifp->if_start = hme_start;
  313         ifp->if_ioctl = hme_ioctl;
  314         ifp->if_init = hme_init;
  315         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  316         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  317         IFQ_SET_READY(&ifp->if_snd);
  318 
  319         hme_mifinit(sc);
  320 
  321         /*
  322          * DP83840A used with HME chips don't advertise their media
  323          * capabilities themselves properly so force writing the ANAR
  324          * according to the BMSR in mii_phy_setmedia().
  325          */
  326         error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  327             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
  328             MII_OFFSET_ANY, MIIF_FORCEANEG);
  329         i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  330             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
  331             MII_OFFSET_ANY, MIIF_FORCEANEG);
  332         if (error != 0 && i != 0) {
  333                 error = ENXIO;
  334                 device_printf(sc->sc_dev, "attaching PHYs failed\n");
  335                 goto fail_rxdesc;
  336         }
  337         sc->sc_mii = device_get_softc(sc->sc_miibus);
  338 
  339         /*
  340          * Walk along the list of attached MII devices and
  341          * establish an `MII instance' to `PHY number'
  342          * mapping. We'll use this mapping to enable the MII
  343          * drivers of the external transceiver according to
  344          * the currently selected media.
  345          */
  346         sc->sc_phys[0] = sc->sc_phys[1] = -1;
  347         LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
  348                 /*
  349                  * Note: we support just two PHYs: the built-in
  350                  * internal device and an external on the MII
  351                  * connector.
  352                  */
  353                 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
  354                     child->mii_phy != HME_PHYAD_INTERNAL) ||
  355                     child->mii_inst > 1) {
  356                         device_printf(sc->sc_dev, "cannot accommodate "
  357                             "MII device %s at phy %d, instance %d\n",
  358                             device_get_name(child->mii_dev),
  359                             child->mii_phy, child->mii_inst);
  360                         continue;
  361                 }
  362 
  363                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  364         }
  365 
  366         /* Attach the interface. */
  367         ether_ifattach(ifp, sc->sc_enaddr);
  368 
  369         /*
  370          * Tell the upper layer(s) we support long frames/checksum offloads.
  371          */
  372         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
  373         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  374         ifp->if_hwassist |= sc->sc_csum_features;
  375         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  376 
  377         gone_in_dev(sc->sc_dev, 13, "10/100 NIC almost exclusively for sparc64");
  378         return (0);
  379 
  380 fail_txdesc:
  381         for (i = 0; i < tdesc; i++) {
  382                 bus_dmamap_destroy(sc->sc_tdmatag,
  383                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  384         }
  385         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  386 fail_rxdesc:
  387         for (i = 0; i < rdesc; i++) {
  388                 bus_dmamap_destroy(sc->sc_rdmatag,
  389                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  390         }
  391         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  392 fail_free:
  393         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  394 fail_ttag:
  395         bus_dma_tag_destroy(sc->sc_tdmatag);
  396 fail_rtag:
  397         bus_dma_tag_destroy(sc->sc_rdmatag);
  398 fail_ctag:
  399         bus_dma_tag_destroy(sc->sc_cdmatag);
  400 fail_ptag:
  401         bus_dma_tag_destroy(sc->sc_pdmatag);
  402 fail_ifnet:
  403         if_free(ifp);
  404         return (error);
  405 }
  406 
  407 void
  408 hme_detach(struct hme_softc *sc)
  409 {
  410         struct ifnet *ifp = sc->sc_ifp;
  411         int i;
  412 
  413         HME_LOCK(sc);
  414         hme_stop(sc);
  415         HME_UNLOCK(sc);
  416         callout_drain(&sc->sc_tick_ch);
  417         ether_ifdetach(ifp);
  418         if_free(ifp);
  419         device_delete_child(sc->sc_dev, sc->sc_miibus);
  420 
  421         for (i = 0; i < HME_NTXQ; i++) {
  422                 bus_dmamap_destroy(sc->sc_tdmatag,
  423                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  424         }
  425         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  426         for (i = 0; i < HME_NRXDESC; i++) {
  427                 bus_dmamap_destroy(sc->sc_rdmatag,
  428                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  429         }
  430         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  431             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  432         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  433         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  434         bus_dma_tag_destroy(sc->sc_tdmatag);
  435         bus_dma_tag_destroy(sc->sc_rdmatag);
  436         bus_dma_tag_destroy(sc->sc_cdmatag);
  437         bus_dma_tag_destroy(sc->sc_pdmatag);
  438 }
  439 
  440 void
  441 hme_suspend(struct hme_softc *sc)
  442 {
  443 
  444         HME_LOCK(sc);
  445         hme_stop(sc);
  446         HME_UNLOCK(sc);
  447 }
  448 
  449 void
  450 hme_resume(struct hme_softc *sc)
  451 {
  452         struct ifnet *ifp = sc->sc_ifp;
  453 
  454         HME_LOCK(sc);
  455         if ((ifp->if_flags & IFF_UP) != 0)
  456                 hme_init_locked(sc);
  457         HME_UNLOCK(sc);
  458 }
  459 
  460 static void
  461 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  462 {
  463         struct hme_softc *sc = (struct hme_softc *)xsc;
  464 
  465         if (error != 0)
  466                 return;
  467         KASSERT(nsegs == 1,
  468             ("%s: too many DMA segments (%d)", __func__, nsegs));
  469         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  470 }
  471 
  472 static void
  473 hme_tick(void *arg)
  474 {
  475         struct hme_softc *sc = arg;
  476         struct ifnet *ifp;
  477 
  478         HME_LOCK_ASSERT(sc, MA_OWNED);
  479 
  480         ifp = sc->sc_ifp;
  481         /*
  482          * Unload collision counters
  483          */
  484         if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
  485                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
  486                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
  487                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
  488                 HME_MAC_READ_4(sc, HME_MACI_LTCNT));
  489 
  490         /*
  491          * then clear the hardware counters.
  492          */
  493         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  494         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  495         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  496         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  497 
  498         mii_tick(sc->sc_mii);
  499 
  500         if (hme_watchdog(sc) == EJUSTRETURN)
  501                 return;
  502 
  503         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  504 }
  505 
  506 static void
  507 hme_stop(struct hme_softc *sc)
  508 {
  509         u_int32_t v;
  510         int n;
  511 
  512         callout_stop(&sc->sc_tick_ch);
  513         sc->sc_wdog_timer = 0;
  514         sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  515         sc->sc_flags &= ~HME_LINK;
  516 
  517         /* Mask all interrupts */
  518         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
  519 
  520         /* Reset transmitter and receiver */
  521         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  522             HME_SEB_RESET_ERX);
  523         HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
  524             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  525         for (n = 0; n < 20; n++) {
  526                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  527                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  528                         return;
  529                 DELAY(20);
  530         }
  531 
  532         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  533 }
  534 
  535 /*
  536  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  537  * ring for subsequent use.
  538  */
  539 static __inline void
  540 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  541 {
  542 
  543         /*
  544          * Dropped a packet, reinitialize the descriptor and turn the
  545          * ownership back to the hardware.
  546          */
  547         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
  548             ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
  549             &sc->sc_rb.rb_rxdesc[ix])));
  550 }
  551 
  552 static int
  553 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  554 {
  555         struct hme_rxdesc *rd;
  556         struct mbuf *m;
  557         bus_dma_segment_t segs[1];
  558         bus_dmamap_t map;
  559         uintptr_t b;
  560         int a, unmap, nsegs;
  561 
  562         rd = &sc->sc_rb.rb_rxdesc[ri];
  563         unmap = rd->hrx_m != NULL;
  564         if (unmap && keepold) {
  565                 /*
  566                  * Reinitialize the descriptor flags, as they may have been
  567                  * altered by the hardware.
  568                  */
  569                 hme_discard_rxbuf(sc, ri);
  570                 return (0);
  571         }
  572         if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
  573                 return (ENOBUFS);
  574         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  575         b = mtod(m, uintptr_t);
  576         /*
  577          * Required alignment boundary. At least 16 is needed, but since
  578          * the mapping must be done in a way that a burst can start on a
  579          * natural boundary we might need to extend this.
  580          */
  581         a = imax(HME_MINRXALIGN, sc->sc_burst);
  582         /*
  583          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  584          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  585          * alignment of the header adjacent to the ethernet header, which
  586          * should be sufficient in all cases. Nevertheless, this second-guesses
  587          * ALIGN().
  588          */
  589         m_adj(m, roundup2(b, a) - b);
  590         if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  591             m, segs, &nsegs, 0) != 0) {
  592                 m_freem(m);
  593                 return (ENOBUFS);
  594         }
  595         /* If nsegs is wrong then the stack is corrupt. */
  596         KASSERT(nsegs == 1,
  597             ("%s: too many DMA segments (%d)", __func__, nsegs));
  598         if (unmap) {
  599                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  600                     BUS_DMASYNC_POSTREAD);
  601                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  602         }
  603         map = rd->hrx_dmamap;
  604         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  605         sc->sc_rb.rb_spare_dmamap = map;
  606         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  607         HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  608             segs[0].ds_addr);
  609         rd->hrx_m = m;
  610         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  611             HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  612         return (0);
  613 }
  614 
  615 static int
  616 hme_meminit(struct hme_softc *sc)
  617 {
  618         struct hme_ring *hr = &sc->sc_rb;
  619         struct hme_txdesc *td;
  620         bus_addr_t dma;
  621         caddr_t p;
  622         unsigned int i;
  623         int error;
  624 
  625         p = hr->rb_membase;
  626         dma = hr->rb_dmabase;
  627 
  628         /*
  629          * Allocate transmit descriptors
  630          */
  631         hr->rb_txd = p;
  632         hr->rb_txddma = dma;
  633         p += HME_NTXDESC * HME_XD_SIZE;
  634         dma += HME_NTXDESC * HME_XD_SIZE;
  635         /*
  636          * We have reserved descriptor space until the next 2048 byte
  637          * boundary.
  638          */
  639         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  640         p = (caddr_t)roundup((u_long)p, 2048);
  641 
  642         /*
  643          * Allocate receive descriptors
  644          */
  645         hr->rb_rxd = p;
  646         hr->rb_rxddma = dma;
  647         p += HME_NRXDESC * HME_XD_SIZE;
  648         dma += HME_NRXDESC * HME_XD_SIZE;
  649         /* Again move forward to the next 2048 byte boundary.*/
  650         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  651         p = (caddr_t)roundup((u_long)p, 2048);
  652 
  653         /*
  654          * Initialize transmit buffer descriptors
  655          */
  656         for (i = 0; i < HME_NTXDESC; i++) {
  657                 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  658                 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  659         }
  660 
  661         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  662         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  663         for (i = 0; i < HME_NTXQ; i++) {
  664                 td = &sc->sc_rb.rb_txdesc[i];
  665                 if (td->htx_m != NULL) {
  666                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  667                             BUS_DMASYNC_POSTWRITE);
  668                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  669                         m_freem(td->htx_m);
  670                         td->htx_m = NULL;
  671                 }
  672                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  673         }
  674 
  675         /*
  676          * Initialize receive buffer descriptors
  677          */
  678         for (i = 0; i < HME_NRXDESC; i++) {
  679                 error = hme_add_rxbuf(sc, i, 1);
  680                 if (error != 0)
  681                         return (error);
  682         }
  683 
  684         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  685             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  686 
  687         hr->rb_tdhead = hr->rb_tdtail = 0;
  688         hr->rb_td_nbusy = 0;
  689         hr->rb_rdtail = 0;
  690         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  691             hr->rb_txddma);
  692         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  693             hr->rb_rxddma);
  694         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  695             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  696         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  697             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  698         return (0);
  699 }
  700 
  701 static int
  702 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  703     u_int32_t clr, u_int32_t set)
  704 {
  705         int i = 0;
  706 
  707         val &= ~clr;
  708         val |= set;
  709         HME_MAC_WRITE_4(sc, reg, val);
  710         HME_MAC_BARRIER(sc, reg, 4,
  711             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  712         if (clr == 0 && set == 0)
  713                 return (1);     /* just write, no bits to wait for */
  714         do {
  715                 DELAY(100);
  716                 i++;
  717                 val = HME_MAC_READ_4(sc, reg);
  718                 if (i > 40) {
  719                         /* After 3.5ms, we should have been done. */
  720                         device_printf(sc->sc_dev, "timeout while writing to "
  721                             "MAC configuration register\n");
  722                         return (0);
  723                 }
  724         } while ((val & clr) != 0 && (val & set) != set);
  725         return (1);
  726 }
  727 
  728 /*
  729  * Initialization of interface; set up initialization block
  730  * and transmit/receive descriptor rings.
  731  */
  732 static void
  733 hme_init(void *xsc)
  734 {
  735         struct hme_softc *sc = (struct hme_softc *)xsc;
  736 
  737         HME_LOCK(sc);
  738         hme_init_locked(sc);
  739         HME_UNLOCK(sc);
  740 }
  741 
  742 static void
  743 hme_init_locked(struct hme_softc *sc)
  744 {
  745         struct ifnet *ifp = sc->sc_ifp;
  746         u_int8_t *ea;
  747         u_int32_t n, v;
  748 
  749         HME_LOCK_ASSERT(sc, MA_OWNED);
  750 
  751         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  752                 return;
  753 
  754         /*
  755          * Initialization sequence. The numbered steps below correspond
  756          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  757          * Channel Engine manual (part of the PCIO manual).
  758          * See also the STP2002-STQ document from Sun Microsystems.
  759          */
  760 
  761         /* step 1 & 2. Reset the Ethernet Channel */
  762         hme_stop(sc);
  763 
  764         /* Re-initialize the MIF */
  765         hme_mifinit(sc);
  766 
  767 #if 0
  768         /* Mask all MIF interrupts, just in case */
  769         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  770 #endif
  771 
  772         /* step 3. Setup data structures in host memory */
  773         if (hme_meminit(sc) != 0) {
  774                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  775                 return;
  776         }
  777 
  778         /* step 4. TX MAC registers & counters */
  779         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  780         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  781         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  782         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  783         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  784 
  785         /* Load station MAC address */
  786         ea = IF_LLADDR(ifp);
  787         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  788         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  789         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  790 
  791         /*
  792          * Init seed for backoff
  793          * (source suggested by manual: low 10 bits of MAC address)
  794          */
  795         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  796         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  797 
  798         /* Note: Accepting power-on default for other MAC registers here.. */
  799 
  800         /* step 5. RX MAC registers & counters */
  801         hme_setladrf(sc, 0);
  802 
  803         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  804         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  805         /* Transmit Descriptor ring size: in increments of 16 */
  806         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  807 
  808         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  809         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  810 
  811         /* step 8. Global Configuration & Interrupt Mask */
  812         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  813             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  814                 HME_SEB_STAT_HOSTTOTX |
  815                 HME_SEB_STAT_RXTOHOST |
  816                 HME_SEB_STAT_TXALL |
  817                 HME_SEB_STAT_TXPERR |
  818                 HME_SEB_STAT_RCNTEXP |
  819                 HME_SEB_STAT_ALL_ERRORS ));
  820 
  821         switch (sc->sc_burst) {
  822         default:
  823                 v = 0;
  824                 break;
  825         case 16:
  826                 v = HME_SEB_CFG_BURST16;
  827                 break;
  828         case 32:
  829                 v = HME_SEB_CFG_BURST32;
  830                 break;
  831         case 64:
  832                 v = HME_SEB_CFG_BURST64;
  833                 break;
  834         }
  835         /*
  836          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  837          * Allowing 64bit transfers breaks TX checksum offload as well.
  838          * Don't know this comes from hardware bug or driver's DMAing
  839          * scheme.
  840          *
  841          * if (sc->sc_flags & HME_PCI == 0)
  842          *      v |= HME_SEB_CFG_64BIT;
  843          */
  844         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  845 
  846         /* step 9. ETX Configuration: use mostly default values */
  847 
  848         /* Enable DMA */
  849         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  850         v |= HME_ETX_CFG_DMAENABLE;
  851         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  852 
  853         /* step 10. ERX Configuration */
  854         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  855 
  856         /* Encode Receive Descriptor ring size: four possible values */
  857         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  858         switch (HME_NRXDESC) {
  859         case 32:
  860                 v |= HME_ERX_CFG_RINGSIZE32;
  861                 break;
  862         case 64:
  863                 v |= HME_ERX_CFG_RINGSIZE64;
  864                 break;
  865         case 128:
  866                 v |= HME_ERX_CFG_RINGSIZE128;
  867                 break;
  868         case 256:
  869                 v |= HME_ERX_CFG_RINGSIZE256;
  870                 break;
  871         default:
  872                 printf("hme: invalid Receive Descriptor ring size\n");
  873                 break;
  874         }
  875 
  876         /* Enable DMA, fix RX first byte offset. */
  877         v &= ~HME_ERX_CFG_FBO_MASK;
  878         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  879         /* RX TCP/UDP checksum offset */
  880         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  881         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  882         v |= n;
  883         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  884         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  885 
  886         /* step 11. XIF Configuration */
  887         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  888         v |= HME_MAC_XIF_OE;
  889         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  890         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  891 
  892         /* step 12. RX_MAC Configuration Register */
  893         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  894         v |= HME_MAC_RXCFG_ENABLE;
  895         v &= ~(HME_MAC_RXCFG_DCRCS);
  896         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  897         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  898 
  899         /* step 13. TX_MAC Configuration Register */
  900         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  901         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  902         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  903         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  904 
  905         /* step 14. Issue Transmit Pending command */
  906 
  907 #ifdef HMEDEBUG
  908         /* Debug: double-check. */
  909         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  910             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  911             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  912             HME_ERX_READ_4(sc, HME_ERXI_RING),
  913             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  914         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  915             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  916             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  917             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  918         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  919             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  920             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  921 #endif
  922 
  923         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  924         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  925 
  926         /* Set the current media. */
  927         hme_mediachange_locked(sc);
  928 
  929         /* Start the one second timer. */
  930         sc->sc_wdog_timer = 0;
  931         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  932 }
  933 
  934 /*
  935  * Routine to DMA map an mbuf chain, set up the descriptor rings
  936  * accordingly and start the transmission.
  937  * Returns 0 on success, -1 if there were not enough free descriptors
  938  * to map the packet, or an errno otherwise.
  939  *
  940  * XXX: this relies on the fact that segments returned by
  941  * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
  942  * boundary on (i.e. potentially before ds_addr) to the first
  943  * boundary beyond the end.  This is usually a safe assumption to
  944  * make, but is not documented.
  945  */
  946 static int
  947 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
  948 {
  949         bus_dma_segment_t segs[HME_NTXSEGS];
  950         struct hme_txdesc *htx;
  951         struct ip *ip;
  952         struct mbuf *m;
  953         caddr_t txd;
  954         int error, i, nsegs, pci, ri, si;
  955         uint32_t cflags, flags;
  956 
  957         if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  958                 return (ENOBUFS);
  959 
  960         cflags = 0;
  961         if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
  962                 if (M_WRITABLE(*m0) == 0) {
  963                         m = m_dup(*m0, M_NOWAIT);
  964                         m_freem(*m0);
  965                         *m0 = m;
  966                         if (m == NULL)
  967                                 return (ENOBUFS);
  968                 }
  969                 i = sizeof(struct ether_header);
  970                 m = m_pullup(*m0, i + sizeof(struct ip));
  971                 if (m == NULL) {
  972                         *m0 = NULL;
  973                         return (ENOBUFS);
  974                 }
  975                 ip = (struct ip *)(mtod(m, caddr_t) + i);
  976                 i += (ip->ip_hl << 2);
  977                 cflags = i << HME_XD_TXCKSUM_SSHIFT |
  978                     ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
  979                     HME_XD_TXCKSUM;
  980                 *m0 = m;
  981         }
  982 
  983         error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  984             *m0, segs, &nsegs, 0);
  985         if (error == EFBIG) {
  986                 m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS);
  987                 if (m == NULL) {
  988                         m_freem(*m0);
  989                         *m0 = NULL;
  990                         return (ENOMEM);
  991                 }
  992                 *m0 = m;
  993                 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  994                     *m0, segs, &nsegs, 0);
  995                 if (error != 0) {
  996                         m_freem(*m0);
  997                         *m0 = NULL;
  998                         return (error);
  999                 }
 1000         } else if (error != 0)
 1001                 return (error);
 1002         /* If nsegs is wrong then the stack is corrupt. */
 1003         KASSERT(nsegs <= HME_NTXSEGS,
 1004             ("%s: too many DMA segments (%d)", __func__, nsegs));
 1005         if (nsegs == 0) {
 1006                 m_freem(*m0);
 1007                 *m0 = NULL;
 1008                 return (EIO);
 1009         }
 1010         if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
 1011                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1012                 /* Retry with m_collapse(9)? */
 1013                 return (ENOBUFS);
 1014         }
 1015         bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
 1016 
 1017         si = ri = sc->sc_rb.rb_tdhead;
 1018         txd = sc->sc_rb.rb_txd;
 1019         pci = sc->sc_flags & HME_PCI;
 1020         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
 1021             HME_XD_GETFLAGS(pci, txd, ri));
 1022         for (i = 0; i < nsegs; i++) {
 1023                 /* Fill the ring entry. */
 1024                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
 1025                 if (i == 0)
 1026                         flags |= HME_XD_SOP | cflags;
 1027                 else
 1028                         flags |= HME_XD_OWN | cflags;
 1029                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
 1030                     ri, si, flags);
 1031                 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
 1032                 HME_XD_SETFLAGS(pci, txd, ri, flags);
 1033                 sc->sc_rb.rb_td_nbusy++;
 1034                 htx->htx_lastdesc = ri;
 1035                 ri = (ri + 1) % HME_NTXDESC;
 1036         }
 1037         sc->sc_rb.rb_tdhead = ri;
 1038 
 1039         /* set EOP on the last descriptor */
 1040         ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1041         flags = HME_XD_GETFLAGS(pci, txd, ri);
 1042         flags |= HME_XD_EOP;
 1043         CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
 1044             flags);
 1045         HME_XD_SETFLAGS(pci, txd, ri, flags);
 1046 
 1047         /* Turn the first descriptor ownership to the hme */
 1048         flags = HME_XD_GETFLAGS(pci, txd, si);
 1049         flags |= HME_XD_OWN;
 1050         CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
 1051             ri, flags);
 1052         HME_XD_SETFLAGS(pci, txd, si, flags);
 1053 
 1054         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
 1055         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
 1056         htx->htx_m = *m0;
 1057 
 1058         /* start the transmission. */
 1059         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1060 
 1061         return (0);
 1062 }
 1063 
 1064 /*
 1065  * Pass a packet to the higher levels.
 1066  */
 1067 static void
 1068 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1069 {
 1070         struct ifnet *ifp = sc->sc_ifp;
 1071         struct mbuf *m;
 1072 
 1073         if (len <= sizeof(struct ether_header) ||
 1074             len > HME_MAX_FRAMESIZE) {
 1075 #ifdef HMEDEBUG
 1076                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1077                     len);
 1078 #endif
 1079                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1080                 hme_discard_rxbuf(sc, ix);
 1081                 return;
 1082         }
 1083 
 1084         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1085         CTR1(KTR_HME, "hme_read: len %d", len);
 1086 
 1087         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1088                 /*
 1089                  * hme_add_rxbuf will leave the old buffer in the ring until
 1090                  * it is sure that a new buffer can be mapped. If it can not,
 1091                  * drop the packet, but leave the interface up.
 1092                  */
 1093                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 1094                 hme_discard_rxbuf(sc, ix);
 1095                 return;
 1096         }
 1097 
 1098         if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 1099 
 1100         m->m_pkthdr.rcvif = ifp;
 1101         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1102         m_adj(m, HME_RXOFFS);
 1103         /* RX TCP/UDP checksum */
 1104         if (ifp->if_capenable & IFCAP_RXCSUM)
 1105                 hme_rxcksum(m, flags);
 1106         /* Pass the packet up. */
 1107         HME_UNLOCK(sc);
 1108         (*ifp->if_input)(ifp, m);
 1109         HME_LOCK(sc);
 1110 }
 1111 
 1112 static void
 1113 hme_start(struct ifnet *ifp)
 1114 {
 1115         struct hme_softc *sc = ifp->if_softc;
 1116 
 1117         HME_LOCK(sc);
 1118         hme_start_locked(ifp);
 1119         HME_UNLOCK(sc);
 1120 }
 1121 
 1122 static void
 1123 hme_start_locked(struct ifnet *ifp)
 1124 {
 1125         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1126         struct mbuf *m;
 1127         int error, enq = 0;
 1128 
 1129         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1130             IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
 1131                 return;
 1132 
 1133         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1134             sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
 1135                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1136                 if (m == NULL)
 1137                         break;
 1138 
 1139                 error = hme_load_txmbuf(sc, &m);
 1140                 if (error != 0) {
 1141                         if (m == NULL)
 1142                                 break;
 1143                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1144                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1145                         break;
 1146                 }
 1147                 enq++;
 1148                 BPF_MTAP(ifp, m);
 1149         }
 1150 
 1151         if (enq > 0) {
 1152                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1153                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1154                 sc->sc_wdog_timer = 5;
 1155         }
 1156 }
 1157 
 1158 /*
 1159  * Transmit interrupt.
 1160  */
 1161 static void
 1162 hme_tint(struct hme_softc *sc)
 1163 {
 1164         caddr_t txd;
 1165         struct ifnet *ifp = sc->sc_ifp;
 1166         struct hme_txdesc *htx;
 1167         unsigned int ri, txflags;
 1168 
 1169         txd = sc->sc_rb.rb_txd;
 1170         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1171         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1172         /* Fetch current position in the transmit ring */
 1173         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1174                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1175                         CTR0(KTR_HME, "hme_tint: not busy!");
 1176                         break;
 1177                 }
 1178 
 1179                 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
 1180                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1181 
 1182                 if ((txflags & HME_XD_OWN) != 0)
 1183                         break;
 1184 
 1185                 CTR0(KTR_HME, "hme_tint: not owned");
 1186                 --sc->sc_rb.rb_td_nbusy;
 1187                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1188 
 1189                 /* Complete packet transmitted? */
 1190                 if ((txflags & HME_XD_EOP) == 0)
 1191                         continue;
 1192 
 1193                 KASSERT(htx->htx_lastdesc == ri,
 1194                     ("%s: ring indices skewed: %d != %d!",
 1195                     __func__, htx->htx_lastdesc, ri));
 1196                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1197                     BUS_DMASYNC_POSTWRITE);
 1198                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1199 
 1200                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 1201                 m_freem(htx->htx_m);
 1202                 htx->htx_m = NULL;
 1203                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1204                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1205                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1206         }
 1207         sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
 1208 
 1209         /* Update ring */
 1210         sc->sc_rb.rb_tdtail = ri;
 1211 
 1212         hme_start_locked(ifp);
 1213 }
 1214 
 1215 /*
 1216  * RX TCP/UDP checksum
 1217  */
 1218 static void
 1219 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1220 {
 1221         struct ether_header *eh;
 1222         struct ip *ip;
 1223         struct udphdr *uh;
 1224         int32_t hlen, len, pktlen;
 1225         u_int16_t cksum, *opts;
 1226         u_int32_t temp32;
 1227 
 1228         pktlen = m->m_pkthdr.len;
 1229         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1230                 return;
 1231         eh = mtod(m, struct ether_header *);
 1232         if (eh->ether_type != htons(ETHERTYPE_IP))
 1233                 return;
 1234         ip = (struct ip *)(eh + 1);
 1235         if (ip->ip_v != IPVERSION)
 1236                 return;
 1237 
 1238         hlen = ip->ip_hl << 2;
 1239         pktlen -= sizeof(struct ether_header);
 1240         if (hlen < sizeof(struct ip))
 1241                 return;
 1242         if (ntohs(ip->ip_len) < hlen)
 1243                 return;
 1244         if (ntohs(ip->ip_len) != pktlen)
 1245                 return;
 1246         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1247                 return; /* can't handle fragmented packet */
 1248 
 1249         switch (ip->ip_p) {
 1250         case IPPROTO_TCP:
 1251                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1252                         return;
 1253                 break;
 1254         case IPPROTO_UDP:
 1255                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1256                         return;
 1257                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1258                 if (uh->uh_sum == 0)
 1259                         return; /* no checksum */
 1260                 break;
 1261         default:
 1262                 return;
 1263         }
 1264 
 1265         cksum = ~(flags & HME_XD_RXCKSUM);
 1266         /* checksum fixup for IP options */
 1267         len = hlen - sizeof(struct ip);
 1268         if (len > 0) {
 1269                 opts = (u_int16_t *)(ip + 1);
 1270                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1271                         temp32 = cksum - *opts;
 1272                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1273                         cksum = temp32 & 65535;
 1274                 }
 1275         }
 1276         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1277         m->m_pkthdr.csum_data = cksum;
 1278 }
 1279 
 1280 /*
 1281  * Receive interrupt.
 1282  */
 1283 static void
 1284 hme_rint(struct hme_softc *sc)
 1285 {
 1286         caddr_t xdr = sc->sc_rb.rb_rxd;
 1287         struct ifnet *ifp = sc->sc_ifp;
 1288         unsigned int ri, len;
 1289         int progress = 0;
 1290         u_int32_t flags;
 1291 
 1292         /*
 1293          * Process all buffers with valid data.
 1294          */
 1295         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1296         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1297                 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
 1298                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1299                 if ((flags & HME_XD_OWN) != 0)
 1300                         break;
 1301 
 1302                 progress++;
 1303                 if ((flags & HME_XD_OFL) != 0) {
 1304                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1305                             "flags=0x%x\n", ri, flags);
 1306                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1307                         hme_discard_rxbuf(sc, ri);
 1308                 } else {
 1309                         len = HME_XD_DECODE_RSIZE(flags);
 1310                         hme_read(sc, ri, len, flags);
 1311                 }
 1312         }
 1313         if (progress) {
 1314                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1315                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1316         }
 1317         sc->sc_rb.rb_rdtail = ri;
 1318 }
 1319 
 1320 static void
 1321 hme_eint(struct hme_softc *sc, u_int status)
 1322 {
 1323 
 1324         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1325                 device_printf(sc->sc_dev, "XXXlink status changed: "
 1326                     "cfg=%#x, stat=%#x, sm=%#x\n",
 1327                     HME_MIF_READ_4(sc, HME_MIFI_CFG),
 1328                     HME_MIF_READ_4(sc, HME_MIFI_STAT),
 1329                     HME_MIF_READ_4(sc, HME_MIFI_SM));
 1330                 return;
 1331         }
 1332 
 1333         /* check for fatal errors that needs reset to unfreeze DMA engine */
 1334         if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
 1335                 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1336                 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1337                 hme_init_locked(sc);
 1338         }
 1339 }
 1340 
 1341 void
 1342 hme_intr(void *v)
 1343 {
 1344         struct hme_softc *sc = (struct hme_softc *)v;
 1345         u_int32_t status;
 1346 
 1347         HME_LOCK(sc);
 1348         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1349         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1350 
 1351         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1352                 hme_eint(sc, status);
 1353 
 1354         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1355                 hme_rint(sc);
 1356 
 1357         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1358                 hme_tint(sc);
 1359         HME_UNLOCK(sc);
 1360 }
 1361 
 1362 static int
 1363 hme_watchdog(struct hme_softc *sc)
 1364 {
 1365         struct ifnet *ifp = sc->sc_ifp;
 1366 
 1367         HME_LOCK_ASSERT(sc, MA_OWNED);
 1368 
 1369 #ifdef HMEDEBUG
 1370         CTR1(KTR_HME, "hme_watchdog: status %x",
 1371             (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
 1372 #endif
 1373 
 1374         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
 1375                 return (0);
 1376 
 1377         if ((sc->sc_flags & HME_LINK) != 0)
 1378                 device_printf(sc->sc_dev, "device timeout\n");
 1379         else if (bootverbose)
 1380                 device_printf(sc->sc_dev, "device timeout (no link)\n");
 1381         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1382 
 1383         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1384         hme_init_locked(sc);
 1385         hme_start_locked(ifp);
 1386         return (EJUSTRETURN);
 1387 }
 1388 
 1389 /*
 1390  * Initialize the MII Management Interface
 1391  */
 1392 static void
 1393 hme_mifinit(struct hme_softc *sc)
 1394 {
 1395         u_int32_t v;
 1396 
 1397         /*
 1398          * Configure the MIF in frame mode, polling disabled, internal PHY
 1399          * selected.
 1400          */
 1401         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
 1402 
 1403         /*
 1404          * If the currently selected media uses the external transceiver,
 1405          * enable its MII drivers (which basically isolates the internal
 1406          * one and vice versa). In case the current media hasn't been set,
 1407          * yet, we default to the internal transceiver.
 1408          */
 1409         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
 1410         if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
 1411             sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
 1412             HME_PHYAD_EXTERNAL)
 1413                 v |= HME_MAC_XIF_MIIENABLE;
 1414         else
 1415                 v &= ~HME_MAC_XIF_MIIENABLE;
 1416         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
 1417 }
 1418 
 1419 /*
 1420  * MII interface
 1421  */
 1422 int
 1423 hme_mii_readreg(device_t dev, int phy, int reg)
 1424 {
 1425         struct hme_softc *sc;
 1426         int n;
 1427         u_int32_t v;
 1428 
 1429         sc = device_get_softc(dev);
 1430         /* Select the desired PHY in the MIF configuration register */
 1431         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1432         if (phy == HME_PHYAD_EXTERNAL)
 1433                 v |= HME_MIF_CFG_PHY;
 1434         else
 1435                 v &= ~HME_MIF_CFG_PHY;
 1436         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1437 
 1438         /* Construct the frame command */
 1439         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1440             HME_MIF_FO_TAMSB |
 1441             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1442             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1443             (reg << HME_MIF_FO_REGAD_SHIFT);
 1444 
 1445         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1446         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1447             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1448         for (n = 0; n < 100; n++) {
 1449                 DELAY(1);
 1450                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1451                 if (v & HME_MIF_FO_TALSB)
 1452                         return (v & HME_MIF_FO_DATA);
 1453         }
 1454 
 1455         device_printf(sc->sc_dev, "mii_read timeout\n");
 1456         return (0);
 1457 }
 1458 
 1459 int
 1460 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1461 {
 1462         struct hme_softc *sc;
 1463         int n;
 1464         u_int32_t v;
 1465 
 1466         sc = device_get_softc(dev);
 1467         /* Select the desired PHY in the MIF configuration register */
 1468         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1469         if (phy == HME_PHYAD_EXTERNAL)
 1470                 v |= HME_MIF_CFG_PHY;
 1471         else
 1472                 v &= ~HME_MIF_CFG_PHY;
 1473         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1474 
 1475         /* Construct the frame command */
 1476         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1477             HME_MIF_FO_TAMSB                            |
 1478             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1479             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1480             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1481             (val & HME_MIF_FO_DATA);
 1482 
 1483         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1484         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1485             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1486         for (n = 0; n < 100; n++) {
 1487                 DELAY(1);
 1488                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1489                 if (v & HME_MIF_FO_TALSB)
 1490                         return (1);
 1491         }
 1492 
 1493         device_printf(sc->sc_dev, "mii_write timeout\n");
 1494         return (0);
 1495 }
 1496 
 1497 void
 1498 hme_mii_statchg(device_t dev)
 1499 {
 1500         struct hme_softc *sc;
 1501         uint32_t rxcfg, txcfg;
 1502 
 1503         sc = device_get_softc(dev);
 1504 
 1505 #ifdef HMEDEBUG
 1506         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1507                 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
 1508 #endif
 1509 
 1510         if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
 1511             IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
 1512                 sc->sc_flags |= HME_LINK;
 1513         else
 1514                 sc->sc_flags &= ~HME_LINK;
 1515 
 1516         txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1517         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
 1518             HME_MAC_TXCFG_ENABLE, 0))
 1519                 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
 1520         rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1521         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
 1522             HME_MAC_RXCFG_ENABLE, 0))
 1523                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1524 
 1525         /* Set the MAC Full Duplex bit appropriately. */
 1526         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1527                 txcfg |= HME_MAC_TXCFG_FULLDPLX;
 1528         else
 1529                 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
 1530         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
 1531 
 1532         if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1533             (sc->sc_flags & HME_LINK) != 0) {
 1534                 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
 1535                     HME_MAC_TXCFG_ENABLE))
 1536                         device_printf(sc->sc_dev, "cannot enable TX MAC\n");
 1537                 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
 1538                     HME_MAC_RXCFG_ENABLE))
 1539                         device_printf(sc->sc_dev, "cannot enable RX MAC\n");
 1540         }
 1541 }
 1542 
 1543 static int
 1544 hme_mediachange(struct ifnet *ifp)
 1545 {
 1546         struct hme_softc *sc = ifp->if_softc;
 1547         int error;
 1548 
 1549         HME_LOCK(sc);
 1550         error = hme_mediachange_locked(sc);
 1551         HME_UNLOCK(sc);
 1552         return (error);
 1553 }
 1554 
 1555 static int
 1556 hme_mediachange_locked(struct hme_softc *sc)
 1557 {
 1558         struct mii_softc *child;
 1559 
 1560         HME_LOCK_ASSERT(sc, MA_OWNED);
 1561 
 1562 #ifdef HMEDEBUG
 1563         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1564                 device_printf(sc->sc_dev, "hme_mediachange_locked");
 1565 #endif
 1566 
 1567         hme_mifinit(sc);
 1568 
 1569         /*
 1570          * If both PHYs are present reset them. This is required for
 1571          * unisolating the previously isolated PHY when switching PHYs.
 1572          * As the above hme_mifinit() call will set the MII drivers in
 1573          * the XIF configuration register according to the currently
 1574          * selected media, there should be no window during which the
 1575          * data paths of both transceivers are open at the same time,
 1576          * even if the PHY device drivers use MIIF_NOISOLATE.
 1577          */
 1578         if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
 1579                 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
 1580                         PHY_RESET(child);
 1581         return (mii_mediachg(sc->sc_mii));
 1582 }
 1583 
 1584 static void
 1585 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1586 {
 1587         struct hme_softc *sc = ifp->if_softc;
 1588 
 1589         HME_LOCK(sc);
 1590         if ((ifp->if_flags & IFF_UP) == 0) {
 1591                 HME_UNLOCK(sc);
 1592                 return;
 1593         }
 1594 
 1595         mii_pollstat(sc->sc_mii);
 1596         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1597         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1598         HME_UNLOCK(sc);
 1599 }
 1600 
 1601 /*
 1602  * Process an ioctl request.
 1603  */
 1604 static int
 1605 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1606 {
 1607         struct hme_softc *sc = ifp->if_softc;
 1608         struct ifreq *ifr = (struct ifreq *)data;
 1609         int error = 0;
 1610 
 1611         switch (cmd) {
 1612         case SIOCSIFFLAGS:
 1613                 HME_LOCK(sc);
 1614                 if ((ifp->if_flags & IFF_UP) != 0) {
 1615                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1616                             ((ifp->if_flags ^ sc->sc_ifflags) &
 1617                             (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 1618                                 hme_setladrf(sc, 1);
 1619                         else
 1620                                 hme_init_locked(sc);
 1621                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1622                         hme_stop(sc);
 1623                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1624                         sc->sc_csum_features |= CSUM_UDP;
 1625                 else
 1626                         sc->sc_csum_features &= ~CSUM_UDP;
 1627                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1628                         ifp->if_hwassist = sc->sc_csum_features;
 1629                 sc->sc_ifflags = ifp->if_flags;
 1630                 HME_UNLOCK(sc);
 1631                 break;
 1632 
 1633         case SIOCADDMULTI:
 1634         case SIOCDELMULTI:
 1635                 HME_LOCK(sc);
 1636                 hme_setladrf(sc, 1);
 1637                 HME_UNLOCK(sc);
 1638                 error = 0;
 1639                 break;
 1640         case SIOCGIFMEDIA:
 1641         case SIOCSIFMEDIA:
 1642                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1643                 break;
 1644         case SIOCSIFCAP:
 1645                 HME_LOCK(sc);
 1646                 ifp->if_capenable = ifr->ifr_reqcap;
 1647                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1648                         ifp->if_hwassist = sc->sc_csum_features;
 1649                 else
 1650                         ifp->if_hwassist = 0;
 1651                 HME_UNLOCK(sc);
 1652                 break;
 1653         default:
 1654                 error = ether_ioctl(ifp, cmd, data);
 1655                 break;
 1656         }
 1657 
 1658         return (error);
 1659 }
 1660 
 1661 /*
 1662  * Set up the logical address filter.
 1663  */
 1664 static void
 1665 hme_setladrf(struct hme_softc *sc, int reenable)
 1666 {
 1667         struct ifnet *ifp = sc->sc_ifp;
 1668         struct ifmultiaddr *inm;
 1669         u_int32_t crc;
 1670         u_int32_t hash[4];
 1671         u_int32_t macc;
 1672 
 1673         HME_LOCK_ASSERT(sc, MA_OWNED);
 1674         /* Clear the hash table. */
 1675         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1676 
 1677         /* Get the current RX configuration. */
 1678         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1679 
 1680         /*
 1681          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 1682          * and hash filter.  Depending on the case, the right bit will be
 1683          * enabled.
 1684          */
 1685         macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
 1686 
 1687         /*
 1688          * Disable the receiver while changing it's state as the documentation
 1689          * mandates.
 1690          * We then must wait until the bit clears in the register. This should
 1691          * take at most 3.5ms.
 1692          */
 1693         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1694             HME_MAC_RXCFG_ENABLE, 0))
 1695                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1696         /* Disable the hash filter before writing to the filter registers. */
 1697         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1698             HME_MAC_RXCFG_HENABLE, 0))
 1699                 device_printf(sc->sc_dev, "cannot disable hash filter\n");
 1700 
 1701         /* Make the RX MAC really SIMPLEX. */
 1702         macc |= HME_MAC_RXCFG_ME;
 1703         if (reenable)
 1704                 macc |= HME_MAC_RXCFG_ENABLE;
 1705         else
 1706                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1707 
 1708         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1709                 macc |= HME_MAC_RXCFG_PMISC;
 1710                 goto chipit;
 1711         }
 1712         if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
 1713                 macc |= HME_MAC_RXCFG_PGRP;
 1714                 goto chipit;
 1715         }
 1716 
 1717         macc |= HME_MAC_RXCFG_HENABLE;
 1718 
 1719         /*
 1720          * Set up multicast address filter by passing all multicast addresses
 1721          * through a crc generator, and then using the high order 6 bits as an
 1722          * index into the 64 bit logical address filter.  The high order bit
 1723          * selects the word, while the rest of the bits select the bit within
 1724          * the word.
 1725          */
 1726 
 1727         if_maddr_rlock(ifp);
 1728         CK_STAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
 1729                 if (inm->ifma_addr->sa_family != AF_LINK)
 1730                         continue;
 1731                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1732                     inm->ifma_addr), ETHER_ADDR_LEN);
 1733 
 1734                 /* Just want the 6 most significant bits. */
 1735                 crc >>= 26;
 1736 
 1737                 /* Set the corresponding bit in the filter. */
 1738                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1739         }
 1740         if_maddr_runlock(ifp);
 1741 
 1742 chipit:
 1743         /* Now load the hash table into the chip */
 1744         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1745         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1746         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1747         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1748         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1749             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
 1750             HME_MAC_RXCFG_ME)))
 1751                 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
 1752 }

Cache object: 998fadf6c7f40ca2d89b2434fd0c386c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.