The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    5  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    6  * All rights reserved.
    7  *
    8  * This code is derived from software contributed to The NetBSD Foundation
    9  * by Paul Kranenburg.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *        This product includes software developed by the NetBSD
   22  *        Foundation, Inc. and its contributors.
   23  * 4. Neither the name of The NetBSD Foundation nor the names of its
   24  *    contributors may be used to endorse or promote products derived
   25  *    from this software without specific prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   37  * POSSIBILITY OF SUCH DAMAGE.
   38  *
   39  *      from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
   40  */
   41 
   42 #include <sys/cdefs.h>
   43 __FBSDID("$FreeBSD: releng/12.0/sys/dev/hme/if_hme.c 333813 2018-05-18 20:13:34Z mmacy $");
   44 
   45 /*
   46  * HME Ethernet module driver.
   47  *
   48  * The HME is e.g. part of the PCIO PCI multi function device.
   49  * It supports TX gathering and TX and RX checksum offloading.
   50  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   51  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   52  * are skipped to make sure the header after the ethernet header is aligned on a
   53  * natural boundary, so this ensures minimal wastage in the most common case.
   54  *
   55  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   56  * maximum packet size (this is not verified). Buffers starting on odd
   57  * boundaries must be mapped so that the burst can start on a natural boundary.
   58  *
   59  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   60  * In reality, we can do the same technique for UDP datagram too. However,
   61  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   62  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   63  * can be reactivated by setting special link option link0 with ifconfig(8).
   64  */
   65 #define HME_CSUM_FEATURES       (CSUM_TCP)
   66 #if 0
   67 #define HMEDEBUG
   68 #endif
   69 #define KTR_HME         KTR_SPARE2      /* XXX */
   70 
   71 #include <sys/param.h>
   72 #include <sys/systm.h>
   73 #include <sys/bus.h>
   74 #include <sys/endian.h>
   75 #include <sys/kernel.h>
   76 #include <sys/module.h>
   77 #include <sys/ktr.h>
   78 #include <sys/mbuf.h>
   79 #include <sys/malloc.h>
   80 #include <sys/socket.h>
   81 #include <sys/sockio.h>
   82 
   83 #include <net/bpf.h>
   84 #include <net/ethernet.h>
   85 #include <net/if.h>
   86 #include <net/if_var.h>
   87 #include <net/if_arp.h>
   88 #include <net/if_dl.h>
   89 #include <net/if_media.h>
   90 #include <net/if_types.h>
   91 #include <net/if_vlan_var.h>
   92 
   93 #include <netinet/in.h>
   94 #include <netinet/in_systm.h>
   95 #include <netinet/ip.h>
   96 #include <netinet/tcp.h>
   97 #include <netinet/udp.h>
   98 
   99 #include <dev/mii/mii.h>
  100 #include <dev/mii/miivar.h>
  101 
  102 #include <machine/bus.h>
  103 
  104 #include <dev/hme/if_hmereg.h>
  105 #include <dev/hme/if_hmevar.h>
  106 
  107 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
  108 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
  109 
  110 static void     hme_start(struct ifnet *);
  111 static void     hme_start_locked(struct ifnet *);
  112 static void     hme_stop(struct hme_softc *);
  113 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  114 static void     hme_tick(void *);
  115 static int      hme_watchdog(struct hme_softc *);
  116 static void     hme_init(void *);
  117 static void     hme_init_locked(struct hme_softc *);
  118 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  119 static int      hme_meminit(struct hme_softc *);
  120 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  121     u_int32_t, u_int32_t);
  122 static void     hme_mifinit(struct hme_softc *);
  123 static void     hme_setladrf(struct hme_softc *, int);
  124 
  125 static int      hme_mediachange(struct ifnet *);
  126 static int      hme_mediachange_locked(struct hme_softc *);
  127 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  128 
  129 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf **);
  130 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  131 static void     hme_eint(struct hme_softc *, u_int);
  132 static void     hme_rint(struct hme_softc *);
  133 static void     hme_tint(struct hme_softc *);
  134 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  135 
  136 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  137 
  138 devclass_t hme_devclass;
  139 
  140 static int hme_nerr;
  141 
  142 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  143 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  144 
  145 #define HME_SPC_READ_4(spc, sc, offs) \
  146         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  147             (offs))
  148 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  149         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  150             (offs), (v))
  151 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
  152         bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  153             (offs), (l), (f))
  154 
  155 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  156 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  157 #define HME_SEB_BARRIER(sc, offs, l, f) \
  158         HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
  159 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  160 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  161 #define HME_ERX_BARRIER(sc, offs, l, f) \
  162         HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
  163 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  164 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  165 #define HME_ETX_BARRIER(sc, offs, l, f) \
  166         HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
  167 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  168 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  169 #define HME_MAC_BARRIER(sc, offs, l, f) \
  170         HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
  171 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  172 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  173 #define HME_MIF_BARRIER(sc, offs, l, f) \
  174         HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
  175 
  176 #define HME_MAXERR      5
  177 #define HME_WHINE(dev, ...) do {                                        \
  178         if (hme_nerr++ < HME_MAXERR)                                    \
  179                 device_printf(dev, __VA_ARGS__);                        \
  180         if (hme_nerr == HME_MAXERR) {                                   \
  181                 device_printf(dev, "too many errors; not reporting "    \
  182                     "any more\n");                                      \
  183         }                                                               \
  184 } while(0)
  185 
  186 /* Support oversized VLAN frames. */
  187 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  188 
  189 int
  190 hme_config(struct hme_softc *sc)
  191 {
  192         struct ifnet *ifp;
  193         struct mii_softc *child;
  194         bus_size_t size;
  195         int error, rdesc, tdesc, i;
  196 
  197         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  198         if (ifp == NULL)
  199                 return (ENOSPC);
  200 
  201         /*
  202          * HME common initialization.
  203          *
  204          * hme_softc fields that must be initialized by the front-end:
  205          *
  206          * the DMA bus tag:
  207          *      sc_dmatag
  208          *
  209          * the bus handles, tags and offsets (splitted for SBus compatibility):
  210          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  211          *      sc_erx{t,h,o}   (Receiver Unit registers)
  212          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  213          *      sc_mac{t,h,o}   (MAC registers)
  214          *      sc_mif{t,h,o}   (Management Interface registers)
  215          *
  216          * the maximum bus burst size:
  217          *      sc_burst
  218          *
  219          */
  220 
  221         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
  222 
  223         /* Make sure the chip is stopped. */
  224         HME_LOCK(sc);
  225         hme_stop(sc);
  226         HME_UNLOCK(sc);
  227 
  228         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
  229             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  230             BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
  231             NULL, NULL, &sc->sc_pdmatag);
  232         if (error)
  233                 goto fail_ifnet;
  234 
  235         /*
  236          * Create control, RX and TX mbuf DMA tags.
  237          * Buffer descriptors must be aligned on a 2048 byte boundary;
  238          * take this into account when calculating the size. Note that
  239          * the maximum number of descriptors (256) occupies 2048 bytes,
  240          * so we allocate that much regardless of HME_N*DESC.
  241          */
  242         size = 4096;
  243         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  244             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  245             1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
  246         if (error)
  247                 goto fail_ptag;
  248 
  249         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  250             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  251             1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
  252         if (error)
  253                 goto fail_ctag;
  254 
  255         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  256             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  257             MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
  258             NULL, NULL, &sc->sc_tdmatag);
  259         if (error)
  260                 goto fail_rtag;
  261 
  262         /* Allocate the control DMA buffer. */
  263         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  264             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
  265         if (error != 0) {
  266                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  267                 goto fail_ttag;
  268         }
  269 
  270         /* Load the control DMA buffer. */
  271         sc->sc_rb.rb_dmabase = 0;
  272         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  273             sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  274             sc->sc_rb.rb_dmabase == 0) {
  275                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  276                     error);
  277                 goto fail_free;
  278         }
  279         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  280             sc->sc_rb.rb_dmabase);
  281 
  282         /*
  283          * Prepare the RX descriptors. rdesc serves as marker for the last
  284          * processed descriptor and may be used later on.
  285          */
  286         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  287                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  288                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  289                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  290                 if (error != 0)
  291                         goto fail_rxdesc;
  292         }
  293         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  294             &sc->sc_rb.rb_spare_dmamap);
  295         if (error != 0)
  296                 goto fail_rxdesc;
  297         /* Same for the TX descs. */
  298         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  299                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  300                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  301                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  302                 if (error != 0)
  303                         goto fail_txdesc;
  304         }
  305 
  306         sc->sc_csum_features = HME_CSUM_FEATURES;
  307         /* Initialize ifnet structure. */
  308         ifp->if_softc = sc;
  309         if_initname(ifp, device_get_name(sc->sc_dev),
  310             device_get_unit(sc->sc_dev));
  311         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  312         ifp->if_start = hme_start;
  313         ifp->if_ioctl = hme_ioctl;
  314         ifp->if_init = hme_init;
  315         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  316         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  317         IFQ_SET_READY(&ifp->if_snd);
  318 
  319         hme_mifinit(sc);
  320 
  321         /*
  322          * DP83840A used with HME chips don't advertise their media
  323          * capabilities themselves properly so force writing the ANAR
  324          * according to the BMSR in mii_phy_setmedia().
  325          */
  326         error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  327             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
  328             MII_OFFSET_ANY, MIIF_FORCEANEG);
  329         i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  330             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
  331             MII_OFFSET_ANY, MIIF_FORCEANEG);
  332         if (error != 0 && i != 0) {
  333                 error = ENXIO;
  334                 device_printf(sc->sc_dev, "attaching PHYs failed\n");
  335                 goto fail_rxdesc;
  336         }
  337         sc->sc_mii = device_get_softc(sc->sc_miibus);
  338 
  339         /*
  340          * Walk along the list of attached MII devices and
  341          * establish an `MII instance' to `PHY number'
  342          * mapping. We'll use this mapping to enable the MII
  343          * drivers of the external transceiver according to
  344          * the currently selected media.
  345          */
  346         sc->sc_phys[0] = sc->sc_phys[1] = -1;
  347         LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
  348                 /*
  349                  * Note: we support just two PHYs: the built-in
  350                  * internal device and an external on the MII
  351                  * connector.
  352                  */
  353                 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
  354                     child->mii_phy != HME_PHYAD_INTERNAL) ||
  355                     child->mii_inst > 1) {
  356                         device_printf(sc->sc_dev, "cannot accommodate "
  357                             "MII device %s at phy %d, instance %d\n",
  358                             device_get_name(child->mii_dev),
  359                             child->mii_phy, child->mii_inst);
  360                         continue;
  361                 }
  362 
  363                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  364         }
  365 
  366         /* Attach the interface. */
  367         ether_ifattach(ifp, sc->sc_enaddr);
  368 
  369         /*
  370          * Tell the upper layer(s) we support long frames/checksum offloads.
  371          */
  372         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
  373         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  374         ifp->if_hwassist |= sc->sc_csum_features;
  375         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  376         return (0);
  377 
  378 fail_txdesc:
  379         for (i = 0; i < tdesc; i++) {
  380                 bus_dmamap_destroy(sc->sc_tdmatag,
  381                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  382         }
  383         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  384 fail_rxdesc:
  385         for (i = 0; i < rdesc; i++) {
  386                 bus_dmamap_destroy(sc->sc_rdmatag,
  387                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  388         }
  389         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  390 fail_free:
  391         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  392 fail_ttag:
  393         bus_dma_tag_destroy(sc->sc_tdmatag);
  394 fail_rtag:
  395         bus_dma_tag_destroy(sc->sc_rdmatag);
  396 fail_ctag:
  397         bus_dma_tag_destroy(sc->sc_cdmatag);
  398 fail_ptag:
  399         bus_dma_tag_destroy(sc->sc_pdmatag);
  400 fail_ifnet:
  401         if_free(ifp);
  402         return (error);
  403 }
  404 
  405 void
  406 hme_detach(struct hme_softc *sc)
  407 {
  408         struct ifnet *ifp = sc->sc_ifp;
  409         int i;
  410 
  411         HME_LOCK(sc);
  412         hme_stop(sc);
  413         HME_UNLOCK(sc);
  414         callout_drain(&sc->sc_tick_ch);
  415         ether_ifdetach(ifp);
  416         if_free(ifp);
  417         device_delete_child(sc->sc_dev, sc->sc_miibus);
  418 
  419         for (i = 0; i < HME_NTXQ; i++) {
  420                 bus_dmamap_destroy(sc->sc_tdmatag,
  421                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  422         }
  423         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  424         for (i = 0; i < HME_NRXDESC; i++) {
  425                 bus_dmamap_destroy(sc->sc_rdmatag,
  426                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  427         }
  428         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  429             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  430         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  431         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  432         bus_dma_tag_destroy(sc->sc_tdmatag);
  433         bus_dma_tag_destroy(sc->sc_rdmatag);
  434         bus_dma_tag_destroy(sc->sc_cdmatag);
  435         bus_dma_tag_destroy(sc->sc_pdmatag);
  436 }
  437 
  438 void
  439 hme_suspend(struct hme_softc *sc)
  440 {
  441 
  442         HME_LOCK(sc);
  443         hme_stop(sc);
  444         HME_UNLOCK(sc);
  445 }
  446 
  447 void
  448 hme_resume(struct hme_softc *sc)
  449 {
  450         struct ifnet *ifp = sc->sc_ifp;
  451 
  452         HME_LOCK(sc);
  453         if ((ifp->if_flags & IFF_UP) != 0)
  454                 hme_init_locked(sc);
  455         HME_UNLOCK(sc);
  456 }
  457 
  458 static void
  459 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  460 {
  461         struct hme_softc *sc = (struct hme_softc *)xsc;
  462 
  463         if (error != 0)
  464                 return;
  465         KASSERT(nsegs == 1,
  466             ("%s: too many DMA segments (%d)", __func__, nsegs));
  467         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  468 }
  469 
  470 static void
  471 hme_tick(void *arg)
  472 {
  473         struct hme_softc *sc = arg;
  474         struct ifnet *ifp;
  475 
  476         HME_LOCK_ASSERT(sc, MA_OWNED);
  477 
  478         ifp = sc->sc_ifp;
  479         /*
  480          * Unload collision counters
  481          */
  482         if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
  483                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
  484                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
  485                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
  486                 HME_MAC_READ_4(sc, HME_MACI_LTCNT));
  487 
  488         /*
  489          * then clear the hardware counters.
  490          */
  491         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  492         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  493         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  494         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  495 
  496         mii_tick(sc->sc_mii);
  497 
  498         if (hme_watchdog(sc) == EJUSTRETURN)
  499                 return;
  500 
  501         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  502 }
  503 
  504 static void
  505 hme_stop(struct hme_softc *sc)
  506 {
  507         u_int32_t v;
  508         int n;
  509 
  510         callout_stop(&sc->sc_tick_ch);
  511         sc->sc_wdog_timer = 0;
  512         sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  513         sc->sc_flags &= ~HME_LINK;
  514 
  515         /* Mask all interrupts */
  516         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
  517 
  518         /* Reset transmitter and receiver */
  519         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  520             HME_SEB_RESET_ERX);
  521         HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
  522             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  523         for (n = 0; n < 20; n++) {
  524                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  525                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  526                         return;
  527                 DELAY(20);
  528         }
  529 
  530         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  531 }
  532 
  533 /*
  534  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  535  * ring for subsequent use.
  536  */
  537 static __inline void
  538 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  539 {
  540 
  541         /*
  542          * Dropped a packet, reinitialize the descriptor and turn the
  543          * ownership back to the hardware.
  544          */
  545         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
  546             ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
  547             &sc->sc_rb.rb_rxdesc[ix])));
  548 }
  549 
  550 static int
  551 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  552 {
  553         struct hme_rxdesc *rd;
  554         struct mbuf *m;
  555         bus_dma_segment_t segs[1];
  556         bus_dmamap_t map;
  557         uintptr_t b;
  558         int a, unmap, nsegs;
  559 
  560         rd = &sc->sc_rb.rb_rxdesc[ri];
  561         unmap = rd->hrx_m != NULL;
  562         if (unmap && keepold) {
  563                 /*
  564                  * Reinitialize the descriptor flags, as they may have been
  565                  * altered by the hardware.
  566                  */
  567                 hme_discard_rxbuf(sc, ri);
  568                 return (0);
  569         }
  570         if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
  571                 return (ENOBUFS);
  572         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  573         b = mtod(m, uintptr_t);
  574         /*
  575          * Required alignment boundary. At least 16 is needed, but since
  576          * the mapping must be done in a way that a burst can start on a
  577          * natural boundary we might need to extend this.
  578          */
  579         a = imax(HME_MINRXALIGN, sc->sc_burst);
  580         /*
  581          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  582          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  583          * alignment of the header adjacent to the ethernet header, which
  584          * should be sufficient in all cases. Nevertheless, this second-guesses
  585          * ALIGN().
  586          */
  587         m_adj(m, roundup2(b, a) - b);
  588         if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  589             m, segs, &nsegs, 0) != 0) {
  590                 m_freem(m);
  591                 return (ENOBUFS);
  592         }
  593         /* If nsegs is wrong then the stack is corrupt. */
  594         KASSERT(nsegs == 1,
  595             ("%s: too many DMA segments (%d)", __func__, nsegs));
  596         if (unmap) {
  597                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  598                     BUS_DMASYNC_POSTREAD);
  599                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  600         }
  601         map = rd->hrx_dmamap;
  602         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  603         sc->sc_rb.rb_spare_dmamap = map;
  604         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  605         HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  606             segs[0].ds_addr);
  607         rd->hrx_m = m;
  608         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  609             HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  610         return (0);
  611 }
  612 
  613 static int
  614 hme_meminit(struct hme_softc *sc)
  615 {
  616         struct hme_ring *hr = &sc->sc_rb;
  617         struct hme_txdesc *td;
  618         bus_addr_t dma;
  619         caddr_t p;
  620         unsigned int i;
  621         int error;
  622 
  623         p = hr->rb_membase;
  624         dma = hr->rb_dmabase;
  625 
  626         /*
  627          * Allocate transmit descriptors
  628          */
  629         hr->rb_txd = p;
  630         hr->rb_txddma = dma;
  631         p += HME_NTXDESC * HME_XD_SIZE;
  632         dma += HME_NTXDESC * HME_XD_SIZE;
  633         /*
  634          * We have reserved descriptor space until the next 2048 byte
  635          * boundary.
  636          */
  637         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  638         p = (caddr_t)roundup((u_long)p, 2048);
  639 
  640         /*
  641          * Allocate receive descriptors
  642          */
  643         hr->rb_rxd = p;
  644         hr->rb_rxddma = dma;
  645         p += HME_NRXDESC * HME_XD_SIZE;
  646         dma += HME_NRXDESC * HME_XD_SIZE;
  647         /* Again move forward to the next 2048 byte boundary.*/
  648         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  649         p = (caddr_t)roundup((u_long)p, 2048);
  650 
  651         /*
  652          * Initialize transmit buffer descriptors
  653          */
  654         for (i = 0; i < HME_NTXDESC; i++) {
  655                 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  656                 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  657         }
  658 
  659         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  660         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  661         for (i = 0; i < HME_NTXQ; i++) {
  662                 td = &sc->sc_rb.rb_txdesc[i];
  663                 if (td->htx_m != NULL) {
  664                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  665                             BUS_DMASYNC_POSTWRITE);
  666                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  667                         m_freem(td->htx_m);
  668                         td->htx_m = NULL;
  669                 }
  670                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  671         }
  672 
  673         /*
  674          * Initialize receive buffer descriptors
  675          */
  676         for (i = 0; i < HME_NRXDESC; i++) {
  677                 error = hme_add_rxbuf(sc, i, 1);
  678                 if (error != 0)
  679                         return (error);
  680         }
  681 
  682         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  683             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  684 
  685         hr->rb_tdhead = hr->rb_tdtail = 0;
  686         hr->rb_td_nbusy = 0;
  687         hr->rb_rdtail = 0;
  688         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  689             hr->rb_txddma);
  690         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  691             hr->rb_rxddma);
  692         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  693             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  694         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  695             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  696         return (0);
  697 }
  698 
  699 static int
  700 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  701     u_int32_t clr, u_int32_t set)
  702 {
  703         int i = 0;
  704 
  705         val &= ~clr;
  706         val |= set;
  707         HME_MAC_WRITE_4(sc, reg, val);
  708         HME_MAC_BARRIER(sc, reg, 4,
  709             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  710         if (clr == 0 && set == 0)
  711                 return (1);     /* just write, no bits to wait for */
  712         do {
  713                 DELAY(100);
  714                 i++;
  715                 val = HME_MAC_READ_4(sc, reg);
  716                 if (i > 40) {
  717                         /* After 3.5ms, we should have been done. */
  718                         device_printf(sc->sc_dev, "timeout while writing to "
  719                             "MAC configuration register\n");
  720                         return (0);
  721                 }
  722         } while ((val & clr) != 0 && (val & set) != set);
  723         return (1);
  724 }
  725 
  726 /*
  727  * Initialization of interface; set up initialization block
  728  * and transmit/receive descriptor rings.
  729  */
  730 static void
  731 hme_init(void *xsc)
  732 {
  733         struct hme_softc *sc = (struct hme_softc *)xsc;
  734 
  735         HME_LOCK(sc);
  736         hme_init_locked(sc);
  737         HME_UNLOCK(sc);
  738 }
  739 
  740 static void
  741 hme_init_locked(struct hme_softc *sc)
  742 {
  743         struct ifnet *ifp = sc->sc_ifp;
  744         u_int8_t *ea;
  745         u_int32_t n, v;
  746 
  747         HME_LOCK_ASSERT(sc, MA_OWNED);
  748 
  749         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  750                 return;
  751 
  752         /*
  753          * Initialization sequence. The numbered steps below correspond
  754          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  755          * Channel Engine manual (part of the PCIO manual).
  756          * See also the STP2002-STQ document from Sun Microsystems.
  757          */
  758 
  759         /* step 1 & 2. Reset the Ethernet Channel */
  760         hme_stop(sc);
  761 
  762         /* Re-initialize the MIF */
  763         hme_mifinit(sc);
  764 
  765 #if 0
  766         /* Mask all MIF interrupts, just in case */
  767         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  768 #endif
  769 
  770         /* step 3. Setup data structures in host memory */
  771         if (hme_meminit(sc) != 0) {
  772                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  773                 return;
  774         }
  775 
  776         /* step 4. TX MAC registers & counters */
  777         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  778         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  779         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  780         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  781         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  782 
  783         /* Load station MAC address */
  784         ea = IF_LLADDR(ifp);
  785         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  786         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  787         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  788 
  789         /*
  790          * Init seed for backoff
  791          * (source suggested by manual: low 10 bits of MAC address)
  792          */
  793         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  794         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  795 
  796         /* Note: Accepting power-on default for other MAC registers here.. */
  797 
  798         /* step 5. RX MAC registers & counters */
  799         hme_setladrf(sc, 0);
  800 
  801         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  802         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  803         /* Transmit Descriptor ring size: in increments of 16 */
  804         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  805 
  806         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  807         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  808 
  809         /* step 8. Global Configuration & Interrupt Mask */
  810         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  811             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  812                 HME_SEB_STAT_HOSTTOTX |
  813                 HME_SEB_STAT_RXTOHOST |
  814                 HME_SEB_STAT_TXALL |
  815                 HME_SEB_STAT_TXPERR |
  816                 HME_SEB_STAT_RCNTEXP |
  817                 HME_SEB_STAT_ALL_ERRORS ));
  818 
  819         switch (sc->sc_burst) {
  820         default:
  821                 v = 0;
  822                 break;
  823         case 16:
  824                 v = HME_SEB_CFG_BURST16;
  825                 break;
  826         case 32:
  827                 v = HME_SEB_CFG_BURST32;
  828                 break;
  829         case 64:
  830                 v = HME_SEB_CFG_BURST64;
  831                 break;
  832         }
  833         /*
  834          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  835          * Allowing 64bit transfers breaks TX checksum offload as well.
  836          * Don't know this comes from hardware bug or driver's DMAing
  837          * scheme.
  838          *
  839          * if (sc->sc_flags & HME_PCI == 0)
  840          *      v |= HME_SEB_CFG_64BIT;
  841          */
  842         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  843 
  844         /* step 9. ETX Configuration: use mostly default values */
  845 
  846         /* Enable DMA */
  847         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  848         v |= HME_ETX_CFG_DMAENABLE;
  849         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  850 
  851         /* step 10. ERX Configuration */
  852         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  853 
  854         /* Encode Receive Descriptor ring size: four possible values */
  855         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  856         switch (HME_NRXDESC) {
  857         case 32:
  858                 v |= HME_ERX_CFG_RINGSIZE32;
  859                 break;
  860         case 64:
  861                 v |= HME_ERX_CFG_RINGSIZE64;
  862                 break;
  863         case 128:
  864                 v |= HME_ERX_CFG_RINGSIZE128;
  865                 break;
  866         case 256:
  867                 v |= HME_ERX_CFG_RINGSIZE256;
  868                 break;
  869         default:
  870                 printf("hme: invalid Receive Descriptor ring size\n");
  871                 break;
  872         }
  873 
  874         /* Enable DMA, fix RX first byte offset. */
  875         v &= ~HME_ERX_CFG_FBO_MASK;
  876         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  877         /* RX TCP/UDP checksum offset */
  878         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  879         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  880         v |= n;
  881         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  882         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  883 
  884         /* step 11. XIF Configuration */
  885         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  886         v |= HME_MAC_XIF_OE;
  887         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  888         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  889 
  890         /* step 12. RX_MAC Configuration Register */
  891         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  892         v |= HME_MAC_RXCFG_ENABLE;
  893         v &= ~(HME_MAC_RXCFG_DCRCS);
  894         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  895         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  896 
  897         /* step 13. TX_MAC Configuration Register */
  898         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  899         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  900         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  901         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  902 
  903         /* step 14. Issue Transmit Pending command */
  904 
  905 #ifdef HMEDEBUG
  906         /* Debug: double-check. */
  907         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  908             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  909             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  910             HME_ERX_READ_4(sc, HME_ERXI_RING),
  911             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  912         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  913             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  914             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  915             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  916         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  917             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  918             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  919 #endif
  920 
  921         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  922         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  923 
  924         /* Set the current media. */
  925         hme_mediachange_locked(sc);
  926 
  927         /* Start the one second timer. */
  928         sc->sc_wdog_timer = 0;
  929         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  930 }
  931 
  932 /*
  933  * Routine to DMA map an mbuf chain, set up the descriptor rings
  934  * accordingly and start the transmission.
  935  * Returns 0 on success, -1 if there were not enough free descriptors
  936  * to map the packet, or an errno otherwise.
  937  *
  938  * XXX: this relies on the fact that segments returned by
  939  * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
  940  * boundary on (i.e. potentially before ds_addr) to the first
  941  * boundary beyond the end.  This is usually a safe assumption to
  942  * make, but is not documented.
  943  */
  944 static int
  945 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
  946 {
  947         bus_dma_segment_t segs[HME_NTXSEGS];
  948         struct hme_txdesc *htx;
  949         struct ip *ip;
  950         struct mbuf *m;
  951         caddr_t txd;
  952         int error, i, nsegs, pci, ri, si;
  953         uint32_t cflags, flags;
  954 
  955         if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  956                 return (ENOBUFS);
  957 
  958         cflags = 0;
  959         if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
  960                 if (M_WRITABLE(*m0) == 0) {
  961                         m = m_dup(*m0, M_NOWAIT);
  962                         m_freem(*m0);
  963                         *m0 = m;
  964                         if (m == NULL)
  965                                 return (ENOBUFS);
  966                 }
  967                 i = sizeof(struct ether_header);
  968                 m = m_pullup(*m0, i + sizeof(struct ip));
  969                 if (m == NULL) {
  970                         *m0 = NULL;
  971                         return (ENOBUFS);
  972                 }
  973                 ip = (struct ip *)(mtod(m, caddr_t) + i);
  974                 i += (ip->ip_hl << 2);
  975                 cflags = i << HME_XD_TXCKSUM_SSHIFT |
  976                     ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
  977                     HME_XD_TXCKSUM;
  978                 *m0 = m;
  979         }
  980 
  981         error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  982             *m0, segs, &nsegs, 0);
  983         if (error == EFBIG) {
  984                 m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS);
  985                 if (m == NULL) {
  986                         m_freem(*m0);
  987                         *m0 = NULL;
  988                         return (ENOMEM);
  989                 }
  990                 *m0 = m;
  991                 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  992                     *m0, segs, &nsegs, 0);
  993                 if (error != 0) {
  994                         m_freem(*m0);
  995                         *m0 = NULL;
  996                         return (error);
  997                 }
  998         } else if (error != 0)
  999                 return (error);
 1000         /* If nsegs is wrong then the stack is corrupt. */
 1001         KASSERT(nsegs <= HME_NTXSEGS,
 1002             ("%s: too many DMA segments (%d)", __func__, nsegs));
 1003         if (nsegs == 0) {
 1004                 m_freem(*m0);
 1005                 *m0 = NULL;
 1006                 return (EIO);
 1007         }
 1008         if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
 1009                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1010                 /* Retry with m_collapse(9)? */
 1011                 return (ENOBUFS);
 1012         }
 1013         bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
 1014 
 1015         si = ri = sc->sc_rb.rb_tdhead;
 1016         txd = sc->sc_rb.rb_txd;
 1017         pci = sc->sc_flags & HME_PCI;
 1018         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
 1019             HME_XD_GETFLAGS(pci, txd, ri));
 1020         for (i = 0; i < nsegs; i++) {
 1021                 /* Fill the ring entry. */
 1022                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
 1023                 if (i == 0)
 1024                         flags |= HME_XD_SOP | cflags;
 1025                 else
 1026                         flags |= HME_XD_OWN | cflags;
 1027                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
 1028                     ri, si, flags);
 1029                 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
 1030                 HME_XD_SETFLAGS(pci, txd, ri, flags);
 1031                 sc->sc_rb.rb_td_nbusy++;
 1032                 htx->htx_lastdesc = ri;
 1033                 ri = (ri + 1) % HME_NTXDESC;
 1034         }
 1035         sc->sc_rb.rb_tdhead = ri;
 1036 
 1037         /* set EOP on the last descriptor */
 1038         ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1039         flags = HME_XD_GETFLAGS(pci, txd, ri);
 1040         flags |= HME_XD_EOP;
 1041         CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
 1042             flags);
 1043         HME_XD_SETFLAGS(pci, txd, ri, flags);
 1044 
 1045         /* Turn the first descriptor ownership to the hme */
 1046         flags = HME_XD_GETFLAGS(pci, txd, si);
 1047         flags |= HME_XD_OWN;
 1048         CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
 1049             ri, flags);
 1050         HME_XD_SETFLAGS(pci, txd, si, flags);
 1051 
 1052         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
 1053         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
 1054         htx->htx_m = *m0;
 1055 
 1056         /* start the transmission. */
 1057         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1058 
 1059         return (0);
 1060 }
 1061 
 1062 /*
 1063  * Pass a packet to the higher levels.
 1064  */
 1065 static void
 1066 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1067 {
 1068         struct ifnet *ifp = sc->sc_ifp;
 1069         struct mbuf *m;
 1070 
 1071         if (len <= sizeof(struct ether_header) ||
 1072             len > HME_MAX_FRAMESIZE) {
 1073 #ifdef HMEDEBUG
 1074                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1075                     len);
 1076 #endif
 1077                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1078                 hme_discard_rxbuf(sc, ix);
 1079                 return;
 1080         }
 1081 
 1082         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1083         CTR1(KTR_HME, "hme_read: len %d", len);
 1084 
 1085         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1086                 /*
 1087                  * hme_add_rxbuf will leave the old buffer in the ring until
 1088                  * it is sure that a new buffer can be mapped. If it can not,
 1089                  * drop the packet, but leave the interface up.
 1090                  */
 1091                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 1092                 hme_discard_rxbuf(sc, ix);
 1093                 return;
 1094         }
 1095 
 1096         if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 1097 
 1098         m->m_pkthdr.rcvif = ifp;
 1099         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1100         m_adj(m, HME_RXOFFS);
 1101         /* RX TCP/UDP checksum */
 1102         if (ifp->if_capenable & IFCAP_RXCSUM)
 1103                 hme_rxcksum(m, flags);
 1104         /* Pass the packet up. */
 1105         HME_UNLOCK(sc);
 1106         (*ifp->if_input)(ifp, m);
 1107         HME_LOCK(sc);
 1108 }
 1109 
 1110 static void
 1111 hme_start(struct ifnet *ifp)
 1112 {
 1113         struct hme_softc *sc = ifp->if_softc;
 1114 
 1115         HME_LOCK(sc);
 1116         hme_start_locked(ifp);
 1117         HME_UNLOCK(sc);
 1118 }
 1119 
 1120 static void
 1121 hme_start_locked(struct ifnet *ifp)
 1122 {
 1123         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1124         struct mbuf *m;
 1125         int error, enq = 0;
 1126 
 1127         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1128             IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
 1129                 return;
 1130 
 1131         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1132             sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
 1133                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1134                 if (m == NULL)
 1135                         break;
 1136 
 1137                 error = hme_load_txmbuf(sc, &m);
 1138                 if (error != 0) {
 1139                         if (m == NULL)
 1140                                 break;
 1141                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1142                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1143                         break;
 1144                 }
 1145                 enq++;
 1146                 BPF_MTAP(ifp, m);
 1147         }
 1148 
 1149         if (enq > 0) {
 1150                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1151                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1152                 sc->sc_wdog_timer = 5;
 1153         }
 1154 }
 1155 
 1156 /*
 1157  * Transmit interrupt.
 1158  */
 1159 static void
 1160 hme_tint(struct hme_softc *sc)
 1161 {
 1162         caddr_t txd;
 1163         struct ifnet *ifp = sc->sc_ifp;
 1164         struct hme_txdesc *htx;
 1165         unsigned int ri, txflags;
 1166 
 1167         txd = sc->sc_rb.rb_txd;
 1168         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1169         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1170         /* Fetch current position in the transmit ring */
 1171         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1172                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1173                         CTR0(KTR_HME, "hme_tint: not busy!");
 1174                         break;
 1175                 }
 1176 
 1177                 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
 1178                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1179 
 1180                 if ((txflags & HME_XD_OWN) != 0)
 1181                         break;
 1182 
 1183                 CTR0(KTR_HME, "hme_tint: not owned");
 1184                 --sc->sc_rb.rb_td_nbusy;
 1185                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1186 
 1187                 /* Complete packet transmitted? */
 1188                 if ((txflags & HME_XD_EOP) == 0)
 1189                         continue;
 1190 
 1191                 KASSERT(htx->htx_lastdesc == ri,
 1192                     ("%s: ring indices skewed: %d != %d!",
 1193                     __func__, htx->htx_lastdesc, ri));
 1194                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1195                     BUS_DMASYNC_POSTWRITE);
 1196                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1197 
 1198                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 1199                 m_freem(htx->htx_m);
 1200                 htx->htx_m = NULL;
 1201                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1202                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1203                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1204         }
 1205         sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
 1206 
 1207         /* Update ring */
 1208         sc->sc_rb.rb_tdtail = ri;
 1209 
 1210         hme_start_locked(ifp);
 1211 }
 1212 
 1213 /*
 1214  * RX TCP/UDP checksum
 1215  */
 1216 static void
 1217 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1218 {
 1219         struct ether_header *eh;
 1220         struct ip *ip;
 1221         struct udphdr *uh;
 1222         int32_t hlen, len, pktlen;
 1223         u_int16_t cksum, *opts;
 1224         u_int32_t temp32;
 1225 
 1226         pktlen = m->m_pkthdr.len;
 1227         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1228                 return;
 1229         eh = mtod(m, struct ether_header *);
 1230         if (eh->ether_type != htons(ETHERTYPE_IP))
 1231                 return;
 1232         ip = (struct ip *)(eh + 1);
 1233         if (ip->ip_v != IPVERSION)
 1234                 return;
 1235 
 1236         hlen = ip->ip_hl << 2;
 1237         pktlen -= sizeof(struct ether_header);
 1238         if (hlen < sizeof(struct ip))
 1239                 return;
 1240         if (ntohs(ip->ip_len) < hlen)
 1241                 return;
 1242         if (ntohs(ip->ip_len) != pktlen)
 1243                 return;
 1244         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1245                 return; /* can't handle fragmented packet */
 1246 
 1247         switch (ip->ip_p) {
 1248         case IPPROTO_TCP:
 1249                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1250                         return;
 1251                 break;
 1252         case IPPROTO_UDP:
 1253                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1254                         return;
 1255                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1256                 if (uh->uh_sum == 0)
 1257                         return; /* no checksum */
 1258                 break;
 1259         default:
 1260                 return;
 1261         }
 1262 
 1263         cksum = ~(flags & HME_XD_RXCKSUM);
 1264         /* checksum fixup for IP options */
 1265         len = hlen - sizeof(struct ip);
 1266         if (len > 0) {
 1267                 opts = (u_int16_t *)(ip + 1);
 1268                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1269                         temp32 = cksum - *opts;
 1270                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1271                         cksum = temp32 & 65535;
 1272                 }
 1273         }
 1274         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1275         m->m_pkthdr.csum_data = cksum;
 1276 }
 1277 
 1278 /*
 1279  * Receive interrupt.
 1280  */
 1281 static void
 1282 hme_rint(struct hme_softc *sc)
 1283 {
 1284         caddr_t xdr = sc->sc_rb.rb_rxd;
 1285         struct ifnet *ifp = sc->sc_ifp;
 1286         unsigned int ri, len;
 1287         int progress = 0;
 1288         u_int32_t flags;
 1289 
 1290         /*
 1291          * Process all buffers with valid data.
 1292          */
 1293         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1294         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1295                 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
 1296                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1297                 if ((flags & HME_XD_OWN) != 0)
 1298                         break;
 1299 
 1300                 progress++;
 1301                 if ((flags & HME_XD_OFL) != 0) {
 1302                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1303                             "flags=0x%x\n", ri, flags);
 1304                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1305                         hme_discard_rxbuf(sc, ri);
 1306                 } else {
 1307                         len = HME_XD_DECODE_RSIZE(flags);
 1308                         hme_read(sc, ri, len, flags);
 1309                 }
 1310         }
 1311         if (progress) {
 1312                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1313                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1314         }
 1315         sc->sc_rb.rb_rdtail = ri;
 1316 }
 1317 
 1318 static void
 1319 hme_eint(struct hme_softc *sc, u_int status)
 1320 {
 1321 
 1322         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1323                 device_printf(sc->sc_dev, "XXXlink status changed: "
 1324                     "cfg=%#x, stat=%#x, sm=%#x\n",
 1325                     HME_MIF_READ_4(sc, HME_MIFI_CFG),
 1326                     HME_MIF_READ_4(sc, HME_MIFI_STAT),
 1327                     HME_MIF_READ_4(sc, HME_MIFI_SM));
 1328                 return;
 1329         }
 1330 
 1331         /* check for fatal errors that needs reset to unfreeze DMA engine */
 1332         if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
 1333                 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1334                 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1335                 hme_init_locked(sc);
 1336         }
 1337 }
 1338 
 1339 void
 1340 hme_intr(void *v)
 1341 {
 1342         struct hme_softc *sc = (struct hme_softc *)v;
 1343         u_int32_t status;
 1344 
 1345         HME_LOCK(sc);
 1346         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1347         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1348 
 1349         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1350                 hme_eint(sc, status);
 1351 
 1352         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1353                 hme_rint(sc);
 1354 
 1355         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1356                 hme_tint(sc);
 1357         HME_UNLOCK(sc);
 1358 }
 1359 
 1360 static int
 1361 hme_watchdog(struct hme_softc *sc)
 1362 {
 1363         struct ifnet *ifp = sc->sc_ifp;
 1364 
 1365         HME_LOCK_ASSERT(sc, MA_OWNED);
 1366 
 1367 #ifdef HMEDEBUG
 1368         CTR1(KTR_HME, "hme_watchdog: status %x",
 1369             (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
 1370 #endif
 1371 
 1372         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
 1373                 return (0);
 1374 
 1375         if ((sc->sc_flags & HME_LINK) != 0)
 1376                 device_printf(sc->sc_dev, "device timeout\n");
 1377         else if (bootverbose)
 1378                 device_printf(sc->sc_dev, "device timeout (no link)\n");
 1379         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1380 
 1381         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1382         hme_init_locked(sc);
 1383         hme_start_locked(ifp);
 1384         return (EJUSTRETURN);
 1385 }
 1386 
 1387 /*
 1388  * Initialize the MII Management Interface
 1389  */
 1390 static void
 1391 hme_mifinit(struct hme_softc *sc)
 1392 {
 1393         u_int32_t v;
 1394 
 1395         /*
 1396          * Configure the MIF in frame mode, polling disabled, internal PHY
 1397          * selected.
 1398          */
 1399         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
 1400 
 1401         /*
 1402          * If the currently selected media uses the external transceiver,
 1403          * enable its MII drivers (which basically isolates the internal
 1404          * one and vice versa). In case the current media hasn't been set,
 1405          * yet, we default to the internal transceiver.
 1406          */
 1407         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
 1408         if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
 1409             sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
 1410             HME_PHYAD_EXTERNAL)
 1411                 v |= HME_MAC_XIF_MIIENABLE;
 1412         else
 1413                 v &= ~HME_MAC_XIF_MIIENABLE;
 1414         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
 1415 }
 1416 
 1417 /*
 1418  * MII interface
 1419  */
 1420 int
 1421 hme_mii_readreg(device_t dev, int phy, int reg)
 1422 {
 1423         struct hme_softc *sc;
 1424         int n;
 1425         u_int32_t v;
 1426 
 1427         sc = device_get_softc(dev);
 1428         /* Select the desired PHY in the MIF configuration register */
 1429         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1430         if (phy == HME_PHYAD_EXTERNAL)
 1431                 v |= HME_MIF_CFG_PHY;
 1432         else
 1433                 v &= ~HME_MIF_CFG_PHY;
 1434         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1435 
 1436         /* Construct the frame command */
 1437         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1438             HME_MIF_FO_TAMSB |
 1439             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1440             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1441             (reg << HME_MIF_FO_REGAD_SHIFT);
 1442 
 1443         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1444         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1445             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1446         for (n = 0; n < 100; n++) {
 1447                 DELAY(1);
 1448                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1449                 if (v & HME_MIF_FO_TALSB)
 1450                         return (v & HME_MIF_FO_DATA);
 1451         }
 1452 
 1453         device_printf(sc->sc_dev, "mii_read timeout\n");
 1454         return (0);
 1455 }
 1456 
 1457 int
 1458 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1459 {
 1460         struct hme_softc *sc;
 1461         int n;
 1462         u_int32_t v;
 1463 
 1464         sc = device_get_softc(dev);
 1465         /* Select the desired PHY in the MIF configuration register */
 1466         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1467         if (phy == HME_PHYAD_EXTERNAL)
 1468                 v |= HME_MIF_CFG_PHY;
 1469         else
 1470                 v &= ~HME_MIF_CFG_PHY;
 1471         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1472 
 1473         /* Construct the frame command */
 1474         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1475             HME_MIF_FO_TAMSB                            |
 1476             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1477             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1478             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1479             (val & HME_MIF_FO_DATA);
 1480 
 1481         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1482         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1483             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1484         for (n = 0; n < 100; n++) {
 1485                 DELAY(1);
 1486                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1487                 if (v & HME_MIF_FO_TALSB)
 1488                         return (1);
 1489         }
 1490 
 1491         device_printf(sc->sc_dev, "mii_write timeout\n");
 1492         return (0);
 1493 }
 1494 
 1495 void
 1496 hme_mii_statchg(device_t dev)
 1497 {
 1498         struct hme_softc *sc;
 1499         uint32_t rxcfg, txcfg;
 1500 
 1501         sc = device_get_softc(dev);
 1502 
 1503 #ifdef HMEDEBUG
 1504         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1505                 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
 1506 #endif
 1507 
 1508         if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
 1509             IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
 1510                 sc->sc_flags |= HME_LINK;
 1511         else
 1512                 sc->sc_flags &= ~HME_LINK;
 1513 
 1514         txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1515         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
 1516             HME_MAC_TXCFG_ENABLE, 0))
 1517                 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
 1518         rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1519         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
 1520             HME_MAC_RXCFG_ENABLE, 0))
 1521                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1522 
 1523         /* Set the MAC Full Duplex bit appropriately. */
 1524         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1525                 txcfg |= HME_MAC_TXCFG_FULLDPLX;
 1526         else
 1527                 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
 1528         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
 1529 
 1530         if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1531             (sc->sc_flags & HME_LINK) != 0) {
 1532                 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
 1533                     HME_MAC_TXCFG_ENABLE))
 1534                         device_printf(sc->sc_dev, "cannot enable TX MAC\n");
 1535                 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
 1536                     HME_MAC_RXCFG_ENABLE))
 1537                         device_printf(sc->sc_dev, "cannot enable RX MAC\n");
 1538         }
 1539 }
 1540 
 1541 static int
 1542 hme_mediachange(struct ifnet *ifp)
 1543 {
 1544         struct hme_softc *sc = ifp->if_softc;
 1545         int error;
 1546 
 1547         HME_LOCK(sc);
 1548         error = hme_mediachange_locked(sc);
 1549         HME_UNLOCK(sc);
 1550         return (error);
 1551 }
 1552 
 1553 static int
 1554 hme_mediachange_locked(struct hme_softc *sc)
 1555 {
 1556         struct mii_softc *child;
 1557 
 1558         HME_LOCK_ASSERT(sc, MA_OWNED);
 1559 
 1560 #ifdef HMEDEBUG
 1561         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1562                 device_printf(sc->sc_dev, "hme_mediachange_locked");
 1563 #endif
 1564 
 1565         hme_mifinit(sc);
 1566 
 1567         /*
 1568          * If both PHYs are present reset them. This is required for
 1569          * unisolating the previously isolated PHY when switching PHYs.
 1570          * As the above hme_mifinit() call will set the MII drivers in
 1571          * the XIF configuration register according to the currently
 1572          * selected media, there should be no window during which the
 1573          * data paths of both transceivers are open at the same time,
 1574          * even if the PHY device drivers use MIIF_NOISOLATE.
 1575          */
 1576         if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
 1577                 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
 1578                         PHY_RESET(child);
 1579         return (mii_mediachg(sc->sc_mii));
 1580 }
 1581 
 1582 static void
 1583 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1584 {
 1585         struct hme_softc *sc = ifp->if_softc;
 1586 
 1587         HME_LOCK(sc);
 1588         if ((ifp->if_flags & IFF_UP) == 0) {
 1589                 HME_UNLOCK(sc);
 1590                 return;
 1591         }
 1592 
 1593         mii_pollstat(sc->sc_mii);
 1594         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1595         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1596         HME_UNLOCK(sc);
 1597 }
 1598 
 1599 /*
 1600  * Process an ioctl request.
 1601  */
 1602 static int
 1603 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1604 {
 1605         struct hme_softc *sc = ifp->if_softc;
 1606         struct ifreq *ifr = (struct ifreq *)data;
 1607         int error = 0;
 1608 
 1609         switch (cmd) {
 1610         case SIOCSIFFLAGS:
 1611                 HME_LOCK(sc);
 1612                 if ((ifp->if_flags & IFF_UP) != 0) {
 1613                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1614                             ((ifp->if_flags ^ sc->sc_ifflags) &
 1615                             (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 1616                                 hme_setladrf(sc, 1);
 1617                         else
 1618                                 hme_init_locked(sc);
 1619                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1620                         hme_stop(sc);
 1621                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1622                         sc->sc_csum_features |= CSUM_UDP;
 1623                 else
 1624                         sc->sc_csum_features &= ~CSUM_UDP;
 1625                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1626                         ifp->if_hwassist = sc->sc_csum_features;
 1627                 sc->sc_ifflags = ifp->if_flags;
 1628                 HME_UNLOCK(sc);
 1629                 break;
 1630 
 1631         case SIOCADDMULTI:
 1632         case SIOCDELMULTI:
 1633                 HME_LOCK(sc);
 1634                 hme_setladrf(sc, 1);
 1635                 HME_UNLOCK(sc);
 1636                 error = 0;
 1637                 break;
 1638         case SIOCGIFMEDIA:
 1639         case SIOCSIFMEDIA:
 1640                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1641                 break;
 1642         case SIOCSIFCAP:
 1643                 HME_LOCK(sc);
 1644                 ifp->if_capenable = ifr->ifr_reqcap;
 1645                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1646                         ifp->if_hwassist = sc->sc_csum_features;
 1647                 else
 1648                         ifp->if_hwassist = 0;
 1649                 HME_UNLOCK(sc);
 1650                 break;
 1651         default:
 1652                 error = ether_ioctl(ifp, cmd, data);
 1653                 break;
 1654         }
 1655 
 1656         return (error);
 1657 }
 1658 
 1659 /*
 1660  * Set up the logical address filter.
 1661  */
 1662 static void
 1663 hme_setladrf(struct hme_softc *sc, int reenable)
 1664 {
 1665         struct ifnet *ifp = sc->sc_ifp;
 1666         struct ifmultiaddr *inm;
 1667         u_int32_t crc;
 1668         u_int32_t hash[4];
 1669         u_int32_t macc;
 1670 
 1671         HME_LOCK_ASSERT(sc, MA_OWNED);
 1672         /* Clear the hash table. */
 1673         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1674 
 1675         /* Get the current RX configuration. */
 1676         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1677 
 1678         /*
 1679          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 1680          * and hash filter.  Depending on the case, the right bit will be
 1681          * enabled.
 1682          */
 1683         macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
 1684 
 1685         /*
 1686          * Disable the receiver while changing it's state as the documentation
 1687          * mandates.
 1688          * We then must wait until the bit clears in the register. This should
 1689          * take at most 3.5ms.
 1690          */
 1691         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1692             HME_MAC_RXCFG_ENABLE, 0))
 1693                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1694         /* Disable the hash filter before writing to the filter registers. */
 1695         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1696             HME_MAC_RXCFG_HENABLE, 0))
 1697                 device_printf(sc->sc_dev, "cannot disable hash filter\n");
 1698 
 1699         /* Make the RX MAC really SIMPLEX. */
 1700         macc |= HME_MAC_RXCFG_ME;
 1701         if (reenable)
 1702                 macc |= HME_MAC_RXCFG_ENABLE;
 1703         else
 1704                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1705 
 1706         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1707                 macc |= HME_MAC_RXCFG_PMISC;
 1708                 goto chipit;
 1709         }
 1710         if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
 1711                 macc |= HME_MAC_RXCFG_PGRP;
 1712                 goto chipit;
 1713         }
 1714 
 1715         macc |= HME_MAC_RXCFG_HENABLE;
 1716 
 1717         /*
 1718          * Set up multicast address filter by passing all multicast addresses
 1719          * through a crc generator, and then using the high order 6 bits as an
 1720          * index into the 64 bit logical address filter.  The high order bit
 1721          * selects the word, while the rest of the bits select the bit within
 1722          * the word.
 1723          */
 1724 
 1725         if_maddr_rlock(ifp);
 1726         CK_STAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
 1727                 if (inm->ifma_addr->sa_family != AF_LINK)
 1728                         continue;
 1729                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1730                     inm->ifma_addr), ETHER_ADDR_LEN);
 1731 
 1732                 /* Just want the 6 most significant bits. */
 1733                 crc >>= 26;
 1734 
 1735                 /* Set the corresponding bit in the filter. */
 1736                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1737         }
 1738         if_maddr_runlock(ifp);
 1739 
 1740 chipit:
 1741         /* Now load the hash table into the chip */
 1742         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1743         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1744         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1745         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1746         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1747             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
 1748             HME_MAC_RXCFG_ME)))
 1749                 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
 1750 }

Cache object: 1161441bc661a57ea0f2e24ea1b8651e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.