The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: releng/10.2/sys/dev/hme/if_hme.c 253134 2013-07-10 06:46:46Z yongari $");
   42 
   43 /*
   44  * HME Ethernet module driver.
   45  *
   46  * The HME is e.g. part of the PCIO PCI multi function device.
   47  * It supports TX gathering and TX and RX checksum offloading.
   48  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   49  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   50  * are skipped to make sure the header after the ethernet header is aligned on a
   51  * natural boundary, so this ensures minimal wastage in the most common case.
   52  *
   53  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   54  * maximum packet size (this is not verified). Buffers starting on odd
   55  * boundaries must be mapped so that the burst can start on a natural boundary.
   56  *
   57  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   58  * In reality, we can do the same technique for UDP datagram too. However,
   59  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   60  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   61  * can be reactivated by setting special link option link0 with ifconfig(8).
   62  */
   63 #define HME_CSUM_FEATURES       (CSUM_TCP)
   64 #if 0
   65 #define HMEDEBUG
   66 #endif
   67 #define KTR_HME         KTR_SPARE2      /* XXX */
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/bus.h>
   72 #include <sys/endian.h>
   73 #include <sys/kernel.h>
   74 #include <sys/module.h>
   75 #include <sys/ktr.h>
   76 #include <sys/mbuf.h>
   77 #include <sys/malloc.h>
   78 #include <sys/socket.h>
   79 #include <sys/sockio.h>
   80 
   81 #include <net/bpf.h>
   82 #include <net/ethernet.h>
   83 #include <net/if.h>
   84 #include <net/if_arp.h>
   85 #include <net/if_dl.h>
   86 #include <net/if_media.h>
   87 #include <net/if_types.h>
   88 #include <net/if_vlan_var.h>
   89 
   90 #include <netinet/in.h>
   91 #include <netinet/in_systm.h>
   92 #include <netinet/ip.h>
   93 #include <netinet/tcp.h>
   94 #include <netinet/udp.h>
   95 
   96 #include <dev/mii/mii.h>
   97 #include <dev/mii/miivar.h>
   98 
   99 #include <machine/bus.h>
  100 
  101 #include <dev/hme/if_hmereg.h>
  102 #include <dev/hme/if_hmevar.h>
  103 
  104 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
  105 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
  106 
  107 static void     hme_start(struct ifnet *);
  108 static void     hme_start_locked(struct ifnet *);
  109 static void     hme_stop(struct hme_softc *);
  110 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  111 static void     hme_tick(void *);
  112 static int      hme_watchdog(struct hme_softc *);
  113 static void     hme_init(void *);
  114 static void     hme_init_locked(struct hme_softc *);
  115 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  116 static int      hme_meminit(struct hme_softc *);
  117 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  118     u_int32_t, u_int32_t);
  119 static void     hme_mifinit(struct hme_softc *);
  120 static void     hme_setladrf(struct hme_softc *, int);
  121 
  122 static int      hme_mediachange(struct ifnet *);
  123 static int      hme_mediachange_locked(struct hme_softc *);
  124 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  125 
  126 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf **);
  127 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  128 static void     hme_eint(struct hme_softc *, u_int);
  129 static void     hme_rint(struct hme_softc *);
  130 static void     hme_tint(struct hme_softc *);
  131 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  132 
  133 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  134 
  135 devclass_t hme_devclass;
  136 
  137 static int hme_nerr;
  138 
  139 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  140 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  141 
  142 #define HME_SPC_READ_4(spc, sc, offs) \
  143         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  144             (offs))
  145 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  146         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  147             (offs), (v))
  148 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
  149         bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  150             (offs), (l), (f))
  151 
  152 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  153 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  154 #define HME_SEB_BARRIER(sc, offs, l, f) \
  155         HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
  156 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  157 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  158 #define HME_ERX_BARRIER(sc, offs, l, f) \
  159         HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
  160 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  161 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  162 #define HME_ETX_BARRIER(sc, offs, l, f) \
  163         HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
  164 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  165 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  166 #define HME_MAC_BARRIER(sc, offs, l, f) \
  167         HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
  168 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  169 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  170 #define HME_MIF_BARRIER(sc, offs, l, f) \
  171         HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
  172 
  173 #define HME_MAXERR      5
  174 #define HME_WHINE(dev, ...) do {                                        \
  175         if (hme_nerr++ < HME_MAXERR)                                    \
  176                 device_printf(dev, __VA_ARGS__);                        \
  177         if (hme_nerr == HME_MAXERR) {                                   \
  178                 device_printf(dev, "too many errors; not reporting "    \
  179                     "any more\n");                                      \
  180         }                                                               \
  181 } while(0)
  182 
  183 /* Support oversized VLAN frames. */
  184 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  185 
  186 int
  187 hme_config(struct hme_softc *sc)
  188 {
  189         struct ifnet *ifp;
  190         struct mii_softc *child;
  191         bus_size_t size;
  192         int error, rdesc, tdesc, i;
  193 
  194         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  195         if (ifp == NULL)
  196                 return (ENOSPC);
  197 
  198         /*
  199          * HME common initialization.
  200          *
  201          * hme_softc fields that must be initialized by the front-end:
  202          *
  203          * the DMA bus tag:
  204          *      sc_dmatag
  205          *
  206          * the bus handles, tags and offsets (splitted for SBus compatibility):
  207          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  208          *      sc_erx{t,h,o}   (Receiver Unit registers)
  209          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  210          *      sc_mac{t,h,o}   (MAC registers)
  211          *      sc_mif{t,h,o}   (Management Interface registers)
  212          *
  213          * the maximum bus burst size:
  214          *      sc_burst
  215          *
  216          */
  217 
  218         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
  219 
  220         /* Make sure the chip is stopped. */
  221         HME_LOCK(sc);
  222         hme_stop(sc);
  223         HME_UNLOCK(sc);
  224 
  225         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
  226             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  227             BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
  228             NULL, NULL, &sc->sc_pdmatag);
  229         if (error)
  230                 goto fail_ifnet;
  231 
  232         /*
  233          * Create control, RX and TX mbuf DMA tags.
  234          * Buffer descriptors must be aligned on a 2048 byte boundary;
  235          * take this into account when calculating the size. Note that
  236          * the maximum number of descriptors (256) occupies 2048 bytes,
  237          * so we allocate that much regardless of HME_N*DESC.
  238          */
  239         size = 4096;
  240         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  241             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  242             1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
  243         if (error)
  244                 goto fail_ptag;
  245 
  246         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  247             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  248             1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
  249         if (error)
  250                 goto fail_ctag;
  251 
  252         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  253             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  254             MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
  255             NULL, NULL, &sc->sc_tdmatag);
  256         if (error)
  257                 goto fail_rtag;
  258 
  259         /* Allocate the control DMA buffer. */
  260         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  261             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
  262         if (error != 0) {
  263                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  264                 goto fail_ttag;
  265         }
  266 
  267         /* Load the control DMA buffer. */
  268         sc->sc_rb.rb_dmabase = 0;
  269         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  270             sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  271             sc->sc_rb.rb_dmabase == 0) {
  272                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  273                     error);
  274                 goto fail_free;
  275         }
  276         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  277             sc->sc_rb.rb_dmabase);
  278 
  279         /*
  280          * Prepare the RX descriptors. rdesc serves as marker for the last
  281          * processed descriptor and may be used later on.
  282          */
  283         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  284                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  285                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  286                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  287                 if (error != 0)
  288                         goto fail_rxdesc;
  289         }
  290         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  291             &sc->sc_rb.rb_spare_dmamap);
  292         if (error != 0)
  293                 goto fail_rxdesc;
  294         /* Same for the TX descs. */
  295         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  296                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  297                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  298                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  299                 if (error != 0)
  300                         goto fail_txdesc;
  301         }
  302 
  303         sc->sc_csum_features = HME_CSUM_FEATURES;
  304         /* Initialize ifnet structure. */
  305         ifp->if_softc = sc;
  306         if_initname(ifp, device_get_name(sc->sc_dev),
  307             device_get_unit(sc->sc_dev));
  308         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  309         ifp->if_start = hme_start;
  310         ifp->if_ioctl = hme_ioctl;
  311         ifp->if_init = hme_init;
  312         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  313         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  314         IFQ_SET_READY(&ifp->if_snd);
  315 
  316         hme_mifinit(sc);
  317 
  318         /*
  319          * DP83840A used with HME chips don't advertise their media
  320          * capabilities themselves properly so force writing the ANAR
  321          * according to the BMSR in mii_phy_setmedia().
  322          */
  323         error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  324             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
  325             MII_OFFSET_ANY, MIIF_FORCEANEG);
  326         i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  327             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
  328             MII_OFFSET_ANY, MIIF_FORCEANEG);
  329         if (error != 0 && i != 0) {
  330                 error = ENXIO;
  331                 device_printf(sc->sc_dev, "attaching PHYs failed\n");
  332                 goto fail_rxdesc;
  333         }
  334         sc->sc_mii = device_get_softc(sc->sc_miibus);
  335 
  336         /*
  337          * Walk along the list of attached MII devices and
  338          * establish an `MII instance' to `PHY number'
  339          * mapping. We'll use this mapping to enable the MII
  340          * drivers of the external transceiver according to
  341          * the currently selected media.
  342          */
  343         sc->sc_phys[0] = sc->sc_phys[1] = -1;
  344         LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
  345                 /*
  346                  * Note: we support just two PHYs: the built-in
  347                  * internal device and an external on the MII
  348                  * connector.
  349                  */
  350                 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
  351                     child->mii_phy != HME_PHYAD_INTERNAL) ||
  352                     child->mii_inst > 1) {
  353                         device_printf(sc->sc_dev, "cannot accommodate "
  354                             "MII device %s at phy %d, instance %d\n",
  355                             device_get_name(child->mii_dev),
  356                             child->mii_phy, child->mii_inst);
  357                         continue;
  358                 }
  359 
  360                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  361         }
  362 
  363         /* Attach the interface. */
  364         ether_ifattach(ifp, sc->sc_enaddr);
  365 
  366         /*
  367          * Tell the upper layer(s) we support long frames/checksum offloads.
  368          */
  369         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  370         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  371         ifp->if_hwassist |= sc->sc_csum_features;
  372         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  373         return (0);
  374 
  375 fail_txdesc:
  376         for (i = 0; i < tdesc; i++) {
  377                 bus_dmamap_destroy(sc->sc_tdmatag,
  378                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  379         }
  380         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  381 fail_rxdesc:
  382         for (i = 0; i < rdesc; i++) {
  383                 bus_dmamap_destroy(sc->sc_rdmatag,
  384                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  385         }
  386         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  387 fail_free:
  388         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  389 fail_ttag:
  390         bus_dma_tag_destroy(sc->sc_tdmatag);
  391 fail_rtag:
  392         bus_dma_tag_destroy(sc->sc_rdmatag);
  393 fail_ctag:
  394         bus_dma_tag_destroy(sc->sc_cdmatag);
  395 fail_ptag:
  396         bus_dma_tag_destroy(sc->sc_pdmatag);
  397 fail_ifnet:
  398         if_free(ifp);
  399         return (error);
  400 }
  401 
  402 void
  403 hme_detach(struct hme_softc *sc)
  404 {
  405         struct ifnet *ifp = sc->sc_ifp;
  406         int i;
  407 
  408         HME_LOCK(sc);
  409         hme_stop(sc);
  410         HME_UNLOCK(sc);
  411         callout_drain(&sc->sc_tick_ch);
  412         ether_ifdetach(ifp);
  413         if_free(ifp);
  414         device_delete_child(sc->sc_dev, sc->sc_miibus);
  415 
  416         for (i = 0; i < HME_NTXQ; i++) {
  417                 bus_dmamap_destroy(sc->sc_tdmatag,
  418                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  419         }
  420         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  421         for (i = 0; i < HME_NRXDESC; i++) {
  422                 bus_dmamap_destroy(sc->sc_rdmatag,
  423                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  424         }
  425         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  426             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  427         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  428         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  429         bus_dma_tag_destroy(sc->sc_tdmatag);
  430         bus_dma_tag_destroy(sc->sc_rdmatag);
  431         bus_dma_tag_destroy(sc->sc_cdmatag);
  432         bus_dma_tag_destroy(sc->sc_pdmatag);
  433 }
  434 
  435 void
  436 hme_suspend(struct hme_softc *sc)
  437 {
  438 
  439         HME_LOCK(sc);
  440         hme_stop(sc);
  441         HME_UNLOCK(sc);
  442 }
  443 
  444 void
  445 hme_resume(struct hme_softc *sc)
  446 {
  447         struct ifnet *ifp = sc->sc_ifp;
  448 
  449         HME_LOCK(sc);
  450         if ((ifp->if_flags & IFF_UP) != 0)
  451                 hme_init_locked(sc);
  452         HME_UNLOCK(sc);
  453 }
  454 
  455 static void
  456 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  457 {
  458         struct hme_softc *sc = (struct hme_softc *)xsc;
  459 
  460         if (error != 0)
  461                 return;
  462         KASSERT(nsegs == 1,
  463             ("%s: too many DMA segments (%d)", __func__, nsegs));
  464         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  465 }
  466 
  467 static void
  468 hme_tick(void *arg)
  469 {
  470         struct hme_softc *sc = arg;
  471         struct ifnet *ifp;
  472 
  473         HME_LOCK_ASSERT(sc, MA_OWNED);
  474 
  475         ifp = sc->sc_ifp;
  476         /*
  477          * Unload collision counters
  478          */
  479         ifp->if_collisions +=
  480                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
  481                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
  482                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
  483                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
  484 
  485         /*
  486          * then clear the hardware counters.
  487          */
  488         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  489         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  490         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  491         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  492 
  493         mii_tick(sc->sc_mii);
  494 
  495         if (hme_watchdog(sc) == EJUSTRETURN)
  496                 return;
  497 
  498         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  499 }
  500 
  501 static void
  502 hme_stop(struct hme_softc *sc)
  503 {
  504         u_int32_t v;
  505         int n;
  506 
  507         callout_stop(&sc->sc_tick_ch);
  508         sc->sc_wdog_timer = 0;
  509         sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  510         sc->sc_flags &= ~HME_LINK;
  511 
  512         /* Mask all interrupts */
  513         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
  514 
  515         /* Reset transmitter and receiver */
  516         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  517             HME_SEB_RESET_ERX);
  518         HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
  519             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  520         for (n = 0; n < 20; n++) {
  521                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  522                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  523                         return;
  524                 DELAY(20);
  525         }
  526 
  527         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  528 }
  529 
  530 /*
  531  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  532  * ring for subsequent use.
  533  */
  534 static __inline void
  535 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  536 {
  537 
  538         /*
  539          * Dropped a packet, reinitialize the descriptor and turn the
  540          * ownership back to the hardware.
  541          */
  542         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
  543             ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
  544             &sc->sc_rb.rb_rxdesc[ix])));
  545 }
  546 
  547 static int
  548 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  549 {
  550         struct hme_rxdesc *rd;
  551         struct mbuf *m;
  552         bus_dma_segment_t segs[1];
  553         bus_dmamap_t map;
  554         uintptr_t b;
  555         int a, unmap, nsegs;
  556 
  557         rd = &sc->sc_rb.rb_rxdesc[ri];
  558         unmap = rd->hrx_m != NULL;
  559         if (unmap && keepold) {
  560                 /*
  561                  * Reinitialize the descriptor flags, as they may have been
  562                  * altered by the hardware.
  563                  */
  564                 hme_discard_rxbuf(sc, ri);
  565                 return (0);
  566         }
  567         if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
  568                 return (ENOBUFS);
  569         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  570         b = mtod(m, uintptr_t);
  571         /*
  572          * Required alignment boundary. At least 16 is needed, but since
  573          * the mapping must be done in a way that a burst can start on a
  574          * natural boundary we might need to extend this.
  575          */
  576         a = imax(HME_MINRXALIGN, sc->sc_burst);
  577         /*
  578          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  579          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  580          * alignment of the header adjacent to the ethernet header, which
  581          * should be sufficient in all cases. Nevertheless, this second-guesses
  582          * ALIGN().
  583          */
  584         m_adj(m, roundup2(b, a) - b);
  585         if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  586             m, segs, &nsegs, 0) != 0) {
  587                 m_freem(m);
  588                 return (ENOBUFS);
  589         }
  590         /* If nsegs is wrong then the stack is corrupt. */
  591         KASSERT(nsegs == 1,
  592             ("%s: too many DMA segments (%d)", __func__, nsegs));
  593         if (unmap) {
  594                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  595                     BUS_DMASYNC_POSTREAD);
  596                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  597         }
  598         map = rd->hrx_dmamap;
  599         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  600         sc->sc_rb.rb_spare_dmamap = map;
  601         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  602         HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  603             segs[0].ds_addr);
  604         rd->hrx_m = m;
  605         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  606             HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  607         return (0);
  608 }
  609 
  610 static int
  611 hme_meminit(struct hme_softc *sc)
  612 {
  613         struct hme_ring *hr = &sc->sc_rb;
  614         struct hme_txdesc *td;
  615         bus_addr_t dma;
  616         caddr_t p;
  617         unsigned int i;
  618         int error;
  619 
  620         p = hr->rb_membase;
  621         dma = hr->rb_dmabase;
  622 
  623         /*
  624          * Allocate transmit descriptors
  625          */
  626         hr->rb_txd = p;
  627         hr->rb_txddma = dma;
  628         p += HME_NTXDESC * HME_XD_SIZE;
  629         dma += HME_NTXDESC * HME_XD_SIZE;
  630         /*
  631          * We have reserved descriptor space until the next 2048 byte
  632          * boundary.
  633          */
  634         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  635         p = (caddr_t)roundup((u_long)p, 2048);
  636 
  637         /*
  638          * Allocate receive descriptors
  639          */
  640         hr->rb_rxd = p;
  641         hr->rb_rxddma = dma;
  642         p += HME_NRXDESC * HME_XD_SIZE;
  643         dma += HME_NRXDESC * HME_XD_SIZE;
  644         /* Again move forward to the next 2048 byte boundary.*/
  645         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  646         p = (caddr_t)roundup((u_long)p, 2048);
  647 
  648         /*
  649          * Initialize transmit buffer descriptors
  650          */
  651         for (i = 0; i < HME_NTXDESC; i++) {
  652                 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  653                 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  654         }
  655 
  656         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  657         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  658         for (i = 0; i < HME_NTXQ; i++) {
  659                 td = &sc->sc_rb.rb_txdesc[i];
  660                 if (td->htx_m != NULL) {
  661                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  662                             BUS_DMASYNC_POSTWRITE);
  663                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  664                         m_freem(td->htx_m);
  665                         td->htx_m = NULL;
  666                 }
  667                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  668         }
  669 
  670         /*
  671          * Initialize receive buffer descriptors
  672          */
  673         for (i = 0; i < HME_NRXDESC; i++) {
  674                 error = hme_add_rxbuf(sc, i, 1);
  675                 if (error != 0)
  676                         return (error);
  677         }
  678 
  679         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  680             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  681 
  682         hr->rb_tdhead = hr->rb_tdtail = 0;
  683         hr->rb_td_nbusy = 0;
  684         hr->rb_rdtail = 0;
  685         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  686             hr->rb_txddma);
  687         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  688             hr->rb_rxddma);
  689         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  690             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  691         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  692             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  693         return (0);
  694 }
  695 
  696 static int
  697 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  698     u_int32_t clr, u_int32_t set)
  699 {
  700         int i = 0;
  701 
  702         val &= ~clr;
  703         val |= set;
  704         HME_MAC_WRITE_4(sc, reg, val);
  705         HME_MAC_BARRIER(sc, reg, 4,
  706             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  707         if (clr == 0 && set == 0)
  708                 return (1);     /* just write, no bits to wait for */
  709         do {
  710                 DELAY(100);
  711                 i++;
  712                 val = HME_MAC_READ_4(sc, reg);
  713                 if (i > 40) {
  714                         /* After 3.5ms, we should have been done. */
  715                         device_printf(sc->sc_dev, "timeout while writing to "
  716                             "MAC configuration register\n");
  717                         return (0);
  718                 }
  719         } while ((val & clr) != 0 && (val & set) != set);
  720         return (1);
  721 }
  722 
  723 /*
  724  * Initialization of interface; set up initialization block
  725  * and transmit/receive descriptor rings.
  726  */
  727 static void
  728 hme_init(void *xsc)
  729 {
  730         struct hme_softc *sc = (struct hme_softc *)xsc;
  731 
  732         HME_LOCK(sc);
  733         hme_init_locked(sc);
  734         HME_UNLOCK(sc);
  735 }
  736 
  737 static void
  738 hme_init_locked(struct hme_softc *sc)
  739 {
  740         struct ifnet *ifp = sc->sc_ifp;
  741         u_int8_t *ea;
  742         u_int32_t n, v;
  743 
  744         HME_LOCK_ASSERT(sc, MA_OWNED);
  745 
  746         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  747                 return;
  748 
  749         /*
  750          * Initialization sequence. The numbered steps below correspond
  751          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  752          * Channel Engine manual (part of the PCIO manual).
  753          * See also the STP2002-STQ document from Sun Microsystems.
  754          */
  755 
  756         /* step 1 & 2. Reset the Ethernet Channel */
  757         hme_stop(sc);
  758 
  759         /* Re-initialize the MIF */
  760         hme_mifinit(sc);
  761 
  762 #if 0
  763         /* Mask all MIF interrupts, just in case */
  764         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  765 #endif
  766 
  767         /* step 3. Setup data structures in host memory */
  768         if (hme_meminit(sc) != 0) {
  769                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  770                 return;
  771         }
  772 
  773         /* step 4. TX MAC registers & counters */
  774         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  775         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  776         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  777         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  778         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  779 
  780         /* Load station MAC address */
  781         ea = IF_LLADDR(ifp);
  782         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  783         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  784         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  785 
  786         /*
  787          * Init seed for backoff
  788          * (source suggested by manual: low 10 bits of MAC address)
  789          */
  790         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  791         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  792 
  793         /* Note: Accepting power-on default for other MAC registers here.. */
  794 
  795         /* step 5. RX MAC registers & counters */
  796         hme_setladrf(sc, 0);
  797 
  798         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  799         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  800         /* Transmit Descriptor ring size: in increments of 16 */
  801         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  802 
  803         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  804         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  805 
  806         /* step 8. Global Configuration & Interrupt Mask */
  807         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  808             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  809                 HME_SEB_STAT_HOSTTOTX |
  810                 HME_SEB_STAT_RXTOHOST |
  811                 HME_SEB_STAT_TXALL |
  812                 HME_SEB_STAT_TXPERR |
  813                 HME_SEB_STAT_RCNTEXP |
  814                 HME_SEB_STAT_ALL_ERRORS ));
  815 
  816         switch (sc->sc_burst) {
  817         default:
  818                 v = 0;
  819                 break;
  820         case 16:
  821                 v = HME_SEB_CFG_BURST16;
  822                 break;
  823         case 32:
  824                 v = HME_SEB_CFG_BURST32;
  825                 break;
  826         case 64:
  827                 v = HME_SEB_CFG_BURST64;
  828                 break;
  829         }
  830         /*
  831          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  832          * Allowing 64bit transfers breaks TX checksum offload as well.
  833          * Don't know this comes from hardware bug or driver's DMAing
  834          * scheme.
  835          *
  836          * if (sc->sc_flags & HME_PCI == 0)
  837          *      v |= HME_SEB_CFG_64BIT;
  838          */
  839         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  840 
  841         /* step 9. ETX Configuration: use mostly default values */
  842 
  843         /* Enable DMA */
  844         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  845         v |= HME_ETX_CFG_DMAENABLE;
  846         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  847 
  848         /* step 10. ERX Configuration */
  849         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  850 
  851         /* Encode Receive Descriptor ring size: four possible values */
  852         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  853         switch (HME_NRXDESC) {
  854         case 32:
  855                 v |= HME_ERX_CFG_RINGSIZE32;
  856                 break;
  857         case 64:
  858                 v |= HME_ERX_CFG_RINGSIZE64;
  859                 break;
  860         case 128:
  861                 v |= HME_ERX_CFG_RINGSIZE128;
  862                 break;
  863         case 256:
  864                 v |= HME_ERX_CFG_RINGSIZE256;
  865                 break;
  866         default:
  867                 printf("hme: invalid Receive Descriptor ring size\n");
  868                 break;
  869         }
  870 
  871         /* Enable DMA, fix RX first byte offset. */
  872         v &= ~HME_ERX_CFG_FBO_MASK;
  873         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  874         /* RX TCP/UDP checksum offset */
  875         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  876         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  877         v |= n;
  878         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  879         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  880 
  881         /* step 11. XIF Configuration */
  882         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  883         v |= HME_MAC_XIF_OE;
  884         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  885         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  886 
  887         /* step 12. RX_MAC Configuration Register */
  888         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  889         v |= HME_MAC_RXCFG_ENABLE;
  890         v &= ~(HME_MAC_RXCFG_DCRCS);
  891         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  892         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  893 
  894         /* step 13. TX_MAC Configuration Register */
  895         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  896         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  897         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  898         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  899 
  900         /* step 14. Issue Transmit Pending command */
  901 
  902 #ifdef HMEDEBUG
  903         /* Debug: double-check. */
  904         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  905             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  906             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  907             HME_ERX_READ_4(sc, HME_ERXI_RING),
  908             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  909         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  910             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  911             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  912             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  913         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  914             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  915             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  916 #endif
  917 
  918         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  919         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  920 
  921         /* Set the current media. */
  922         hme_mediachange_locked(sc);
  923 
  924         /* Start the one second timer. */
  925         sc->sc_wdog_timer = 0;
  926         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  927 }
  928 
  929 /*
  930  * Routine to DMA map an mbuf chain, set up the descriptor rings
  931  * accordingly and start the transmission.
  932  * Returns 0 on success, -1 if there were not enough free descriptors
  933  * to map the packet, or an errno otherwise.
  934  *
  935  * XXX: this relies on the fact that segments returned by
  936  * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
  937  * boundary on (i.e. potentially before ds_addr) to the first
  938  * boundary beyond the end.  This is usually a safe assumption to
  939  * make, but is not documented.
  940  */
  941 static int
  942 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
  943 {
  944         bus_dma_segment_t segs[HME_NTXSEGS];
  945         struct hme_txdesc *htx;
  946         struct ip *ip;
  947         struct mbuf *m;
  948         caddr_t txd;
  949         int error, i, nsegs, pci, ri, si;
  950         uint32_t cflags, flags;
  951 
  952         if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  953                 return (ENOBUFS);
  954 
  955         cflags = 0;
  956         if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
  957                 if (M_WRITABLE(*m0) == 0) {
  958                         m = m_dup(*m0, M_NOWAIT);
  959                         m_freem(*m0);
  960                         *m0 = m;
  961                         if (m == NULL)
  962                                 return (ENOBUFS);
  963                 }
  964                 i = sizeof(struct ether_header);
  965                 m = m_pullup(*m0, i + sizeof(struct ip));
  966                 if (m == NULL) {
  967                         *m0 = NULL;
  968                         return (ENOBUFS);
  969                 }
  970                 ip = (struct ip *)(mtod(m, caddr_t) + i);
  971                 i += (ip->ip_hl << 2);
  972                 cflags = i << HME_XD_TXCKSUM_SSHIFT |
  973                     ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
  974                     HME_XD_TXCKSUM;
  975                 *m0 = m;
  976         }
  977 
  978         error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  979             *m0, segs, &nsegs, 0);
  980         if (error == EFBIG) {
  981                 m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS);
  982                 if (m == NULL) {
  983                         m_freem(*m0);
  984                         *m0 = NULL;
  985                         return (ENOMEM);
  986                 }
  987                 *m0 = m;
  988                 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  989                     *m0, segs, &nsegs, 0);
  990                 if (error != 0) {
  991                         m_freem(*m0);
  992                         *m0 = NULL;
  993                         return (error);
  994                 }
  995         } else if (error != 0)
  996                 return (error);
  997         /* If nsegs is wrong then the stack is corrupt. */
  998         KASSERT(nsegs <= HME_NTXSEGS,
  999             ("%s: too many DMA segments (%d)", __func__, nsegs));
 1000         if (nsegs == 0) {
 1001                 m_freem(*m0);
 1002                 *m0 = NULL;
 1003                 return (EIO);
 1004         }
 1005         if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
 1006                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1007                 /* Retry with m_collapse(9)? */
 1008                 return (ENOBUFS);
 1009         }
 1010         bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
 1011 
 1012         si = ri = sc->sc_rb.rb_tdhead;
 1013         txd = sc->sc_rb.rb_txd;
 1014         pci = sc->sc_flags & HME_PCI;
 1015         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
 1016             HME_XD_GETFLAGS(pci, txd, ri));
 1017         for (i = 0; i < nsegs; i++) {
 1018                 /* Fill the ring entry. */
 1019                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
 1020                 if (i == 0)
 1021                         flags |= HME_XD_SOP | cflags;
 1022                 else
 1023                         flags |= HME_XD_OWN | cflags;
 1024                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
 1025                     ri, si, flags);
 1026                 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
 1027                 HME_XD_SETFLAGS(pci, txd, ri, flags);
 1028                 sc->sc_rb.rb_td_nbusy++;
 1029                 htx->htx_lastdesc = ri;
 1030                 ri = (ri + 1) % HME_NTXDESC;
 1031         }
 1032         sc->sc_rb.rb_tdhead = ri;
 1033 
 1034         /* set EOP on the last descriptor */
 1035         ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1036         flags = HME_XD_GETFLAGS(pci, txd, ri);
 1037         flags |= HME_XD_EOP;
 1038         CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
 1039             flags);
 1040         HME_XD_SETFLAGS(pci, txd, ri, flags);
 1041 
 1042         /* Turn the first descriptor ownership to the hme */
 1043         flags = HME_XD_GETFLAGS(pci, txd, si);
 1044         flags |= HME_XD_OWN;
 1045         CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
 1046             ri, flags);
 1047         HME_XD_SETFLAGS(pci, txd, si, flags);
 1048 
 1049         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
 1050         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
 1051         htx->htx_m = *m0;
 1052 
 1053         /* start the transmission. */
 1054         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1055 
 1056         return (0);
 1057 }
 1058 
 1059 /*
 1060  * Pass a packet to the higher levels.
 1061  */
 1062 static void
 1063 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1064 {
 1065         struct ifnet *ifp = sc->sc_ifp;
 1066         struct mbuf *m;
 1067 
 1068         if (len <= sizeof(struct ether_header) ||
 1069             len > HME_MAX_FRAMESIZE) {
 1070 #ifdef HMEDEBUG
 1071                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1072                     len);
 1073 #endif
 1074                 ifp->if_ierrors++;
 1075                 hme_discard_rxbuf(sc, ix);
 1076                 return;
 1077         }
 1078 
 1079         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1080         CTR1(KTR_HME, "hme_read: len %d", len);
 1081 
 1082         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1083                 /*
 1084                  * hme_add_rxbuf will leave the old buffer in the ring until
 1085                  * it is sure that a new buffer can be mapped. If it can not,
 1086                  * drop the packet, but leave the interface up.
 1087                  */
 1088                 ifp->if_iqdrops++;
 1089                 hme_discard_rxbuf(sc, ix);
 1090                 return;
 1091         }
 1092 
 1093         ifp->if_ipackets++;
 1094 
 1095         m->m_pkthdr.rcvif = ifp;
 1096         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1097         m_adj(m, HME_RXOFFS);
 1098         /* RX TCP/UDP checksum */
 1099         if (ifp->if_capenable & IFCAP_RXCSUM)
 1100                 hme_rxcksum(m, flags);
 1101         /* Pass the packet up. */
 1102         HME_UNLOCK(sc);
 1103         (*ifp->if_input)(ifp, m);
 1104         HME_LOCK(sc);
 1105 }
 1106 
 1107 static void
 1108 hme_start(struct ifnet *ifp)
 1109 {
 1110         struct hme_softc *sc = ifp->if_softc;
 1111 
 1112         HME_LOCK(sc);
 1113         hme_start_locked(ifp);
 1114         HME_UNLOCK(sc);
 1115 }
 1116 
 1117 static void
 1118 hme_start_locked(struct ifnet *ifp)
 1119 {
 1120         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1121         struct mbuf *m;
 1122         int error, enq = 0;
 1123 
 1124         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1125             IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
 1126                 return;
 1127 
 1128         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1129             sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
 1130                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1131                 if (m == NULL)
 1132                         break;
 1133 
 1134                 error = hme_load_txmbuf(sc, &m);
 1135                 if (error != 0) {
 1136                         if (m == NULL)
 1137                                 break;
 1138                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1139                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1140                         break;
 1141                 }
 1142                 enq++;
 1143                 BPF_MTAP(ifp, m);
 1144         }
 1145 
 1146         if (enq > 0) {
 1147                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1148                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1149                 sc->sc_wdog_timer = 5;
 1150         }
 1151 }
 1152 
 1153 /*
 1154  * Transmit interrupt.
 1155  */
 1156 static void
 1157 hme_tint(struct hme_softc *sc)
 1158 {
 1159         caddr_t txd;
 1160         struct ifnet *ifp = sc->sc_ifp;
 1161         struct hme_txdesc *htx;
 1162         unsigned int ri, txflags;
 1163 
 1164         txd = sc->sc_rb.rb_txd;
 1165         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1166         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1167         /* Fetch current position in the transmit ring */
 1168         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1169                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1170                         CTR0(KTR_HME, "hme_tint: not busy!");
 1171                         break;
 1172                 }
 1173 
 1174                 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
 1175                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1176 
 1177                 if ((txflags & HME_XD_OWN) != 0)
 1178                         break;
 1179 
 1180                 CTR0(KTR_HME, "hme_tint: not owned");
 1181                 --sc->sc_rb.rb_td_nbusy;
 1182                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1183 
 1184                 /* Complete packet transmitted? */
 1185                 if ((txflags & HME_XD_EOP) == 0)
 1186                         continue;
 1187 
 1188                 KASSERT(htx->htx_lastdesc == ri,
 1189                     ("%s: ring indices skewed: %d != %d!",
 1190                     __func__, htx->htx_lastdesc, ri));
 1191                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1192                     BUS_DMASYNC_POSTWRITE);
 1193                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1194 
 1195                 ifp->if_opackets++;
 1196                 m_freem(htx->htx_m);
 1197                 htx->htx_m = NULL;
 1198                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1199                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1200                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1201         }
 1202         sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
 1203 
 1204         /* Update ring */
 1205         sc->sc_rb.rb_tdtail = ri;
 1206 
 1207         hme_start_locked(ifp);
 1208 }
 1209 
 1210 /*
 1211  * RX TCP/UDP checksum
 1212  */
 1213 static void
 1214 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1215 {
 1216         struct ether_header *eh;
 1217         struct ip *ip;
 1218         struct udphdr *uh;
 1219         int32_t hlen, len, pktlen;
 1220         u_int16_t cksum, *opts;
 1221         u_int32_t temp32;
 1222 
 1223         pktlen = m->m_pkthdr.len;
 1224         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1225                 return;
 1226         eh = mtod(m, struct ether_header *);
 1227         if (eh->ether_type != htons(ETHERTYPE_IP))
 1228                 return;
 1229         ip = (struct ip *)(eh + 1);
 1230         if (ip->ip_v != IPVERSION)
 1231                 return;
 1232 
 1233         hlen = ip->ip_hl << 2;
 1234         pktlen -= sizeof(struct ether_header);
 1235         if (hlen < sizeof(struct ip))
 1236                 return;
 1237         if (ntohs(ip->ip_len) < hlen)
 1238                 return;
 1239         if (ntohs(ip->ip_len) != pktlen)
 1240                 return;
 1241         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1242                 return; /* can't handle fragmented packet */
 1243 
 1244         switch (ip->ip_p) {
 1245         case IPPROTO_TCP:
 1246                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1247                         return;
 1248                 break;
 1249         case IPPROTO_UDP:
 1250                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1251                         return;
 1252                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1253                 if (uh->uh_sum == 0)
 1254                         return; /* no checksum */
 1255                 break;
 1256         default:
 1257                 return;
 1258         }
 1259 
 1260         cksum = ~(flags & HME_XD_RXCKSUM);
 1261         /* checksum fixup for IP options */
 1262         len = hlen - sizeof(struct ip);
 1263         if (len > 0) {
 1264                 opts = (u_int16_t *)(ip + 1);
 1265                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1266                         temp32 = cksum - *opts;
 1267                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1268                         cksum = temp32 & 65535;
 1269                 }
 1270         }
 1271         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1272         m->m_pkthdr.csum_data = cksum;
 1273 }
 1274 
 1275 /*
 1276  * Receive interrupt.
 1277  */
 1278 static void
 1279 hme_rint(struct hme_softc *sc)
 1280 {
 1281         caddr_t xdr = sc->sc_rb.rb_rxd;
 1282         struct ifnet *ifp = sc->sc_ifp;
 1283         unsigned int ri, len;
 1284         int progress = 0;
 1285         u_int32_t flags;
 1286 
 1287         /*
 1288          * Process all buffers with valid data.
 1289          */
 1290         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1291         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1292                 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
 1293                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1294                 if ((flags & HME_XD_OWN) != 0)
 1295                         break;
 1296 
 1297                 progress++;
 1298                 if ((flags & HME_XD_OFL) != 0) {
 1299                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1300                             "flags=0x%x\n", ri, flags);
 1301                         ifp->if_ierrors++;
 1302                         hme_discard_rxbuf(sc, ri);
 1303                 } else {
 1304                         len = HME_XD_DECODE_RSIZE(flags);
 1305                         hme_read(sc, ri, len, flags);
 1306                 }
 1307         }
 1308         if (progress) {
 1309                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1310                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1311         }
 1312         sc->sc_rb.rb_rdtail = ri;
 1313 }
 1314 
 1315 static void
 1316 hme_eint(struct hme_softc *sc, u_int status)
 1317 {
 1318 
 1319         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1320                 device_printf(sc->sc_dev, "XXXlink status changed: "
 1321                     "cfg=%#x, stat=%#x, sm=%#x\n",
 1322                     HME_MIF_READ_4(sc, HME_MIFI_CFG),
 1323                     HME_MIF_READ_4(sc, HME_MIFI_STAT),
 1324                     HME_MIF_READ_4(sc, HME_MIFI_SM));
 1325                 return;
 1326         }
 1327 
 1328         /* check for fatal errors that needs reset to unfreeze DMA engine */
 1329         if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
 1330                 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1331                 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1332                 hme_init_locked(sc);
 1333         }
 1334 }
 1335 
 1336 void
 1337 hme_intr(void *v)
 1338 {
 1339         struct hme_softc *sc = (struct hme_softc *)v;
 1340         u_int32_t status;
 1341 
 1342         HME_LOCK(sc);
 1343         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1344         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1345 
 1346         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1347                 hme_eint(sc, status);
 1348 
 1349         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1350                 hme_rint(sc);
 1351 
 1352         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1353                 hme_tint(sc);
 1354         HME_UNLOCK(sc);
 1355 }
 1356 
 1357 static int
 1358 hme_watchdog(struct hme_softc *sc)
 1359 {
 1360         struct ifnet *ifp = sc->sc_ifp;
 1361 
 1362         HME_LOCK_ASSERT(sc, MA_OWNED);
 1363 
 1364 #ifdef HMEDEBUG
 1365         CTR1(KTR_HME, "hme_watchdog: status %x",
 1366             (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
 1367 #endif
 1368 
 1369         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
 1370                 return (0);
 1371 
 1372         if ((sc->sc_flags & HME_LINK) != 0)
 1373                 device_printf(sc->sc_dev, "device timeout\n");
 1374         else if (bootverbose)
 1375                 device_printf(sc->sc_dev, "device timeout (no link)\n");
 1376         ++ifp->if_oerrors;
 1377 
 1378         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1379         hme_init_locked(sc);
 1380         hme_start_locked(ifp);
 1381         return (EJUSTRETURN);
 1382 }
 1383 
 1384 /*
 1385  * Initialize the MII Management Interface
 1386  */
 1387 static void
 1388 hme_mifinit(struct hme_softc *sc)
 1389 {
 1390         u_int32_t v;
 1391 
 1392         /*
 1393          * Configure the MIF in frame mode, polling disabled, internal PHY
 1394          * selected.
 1395          */
 1396         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
 1397 
 1398         /*
 1399          * If the currently selected media uses the external transceiver,
 1400          * enable its MII drivers (which basically isolates the internal
 1401          * one and vice versa). In case the current media hasn't been set,
 1402          * yet, we default to the internal transceiver.
 1403          */
 1404         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
 1405         if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
 1406             sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
 1407             HME_PHYAD_EXTERNAL)
 1408                 v |= HME_MAC_XIF_MIIENABLE;
 1409         else
 1410                 v &= ~HME_MAC_XIF_MIIENABLE;
 1411         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
 1412 }
 1413 
 1414 /*
 1415  * MII interface
 1416  */
 1417 int
 1418 hme_mii_readreg(device_t dev, int phy, int reg)
 1419 {
 1420         struct hme_softc *sc;
 1421         int n;
 1422         u_int32_t v;
 1423 
 1424         sc = device_get_softc(dev);
 1425         /* Select the desired PHY in the MIF configuration register */
 1426         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1427         if (phy == HME_PHYAD_EXTERNAL)
 1428                 v |= HME_MIF_CFG_PHY;
 1429         else
 1430                 v &= ~HME_MIF_CFG_PHY;
 1431         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1432 
 1433         /* Construct the frame command */
 1434         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1435             HME_MIF_FO_TAMSB |
 1436             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1437             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1438             (reg << HME_MIF_FO_REGAD_SHIFT);
 1439 
 1440         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1441         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1442             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1443         for (n = 0; n < 100; n++) {
 1444                 DELAY(1);
 1445                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1446                 if (v & HME_MIF_FO_TALSB)
 1447                         return (v & HME_MIF_FO_DATA);
 1448         }
 1449 
 1450         device_printf(sc->sc_dev, "mii_read timeout\n");
 1451         return (0);
 1452 }
 1453 
 1454 int
 1455 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1456 {
 1457         struct hme_softc *sc;
 1458         int n;
 1459         u_int32_t v;
 1460 
 1461         sc = device_get_softc(dev);
 1462         /* Select the desired PHY in the MIF configuration register */
 1463         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1464         if (phy == HME_PHYAD_EXTERNAL)
 1465                 v |= HME_MIF_CFG_PHY;
 1466         else
 1467                 v &= ~HME_MIF_CFG_PHY;
 1468         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1469 
 1470         /* Construct the frame command */
 1471         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1472             HME_MIF_FO_TAMSB                            |
 1473             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1474             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1475             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1476             (val & HME_MIF_FO_DATA);
 1477 
 1478         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1479         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1480             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1481         for (n = 0; n < 100; n++) {
 1482                 DELAY(1);
 1483                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1484                 if (v & HME_MIF_FO_TALSB)
 1485                         return (1);
 1486         }
 1487 
 1488         device_printf(sc->sc_dev, "mii_write timeout\n");
 1489         return (0);
 1490 }
 1491 
 1492 void
 1493 hme_mii_statchg(device_t dev)
 1494 {
 1495         struct hme_softc *sc;
 1496         uint32_t rxcfg, txcfg;
 1497 
 1498         sc = device_get_softc(dev);
 1499 
 1500 #ifdef HMEDEBUG
 1501         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1502                 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
 1503 #endif
 1504 
 1505         if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
 1506             IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
 1507                 sc->sc_flags |= HME_LINK;
 1508         else
 1509                 sc->sc_flags &= ~HME_LINK;
 1510 
 1511         txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1512         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
 1513             HME_MAC_TXCFG_ENABLE, 0))
 1514                 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
 1515         rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1516         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
 1517             HME_MAC_RXCFG_ENABLE, 0))
 1518                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1519 
 1520         /* Set the MAC Full Duplex bit appropriately. */
 1521         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1522                 txcfg |= HME_MAC_TXCFG_FULLDPLX;
 1523         else
 1524                 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
 1525         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
 1526 
 1527         if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1528             (sc->sc_flags & HME_LINK) != 0) {
 1529                 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
 1530                     HME_MAC_TXCFG_ENABLE))
 1531                         device_printf(sc->sc_dev, "cannot enable TX MAC\n");
 1532                 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
 1533                     HME_MAC_RXCFG_ENABLE))
 1534                         device_printf(sc->sc_dev, "cannot enable RX MAC\n");
 1535         }
 1536 }
 1537 
 1538 static int
 1539 hme_mediachange(struct ifnet *ifp)
 1540 {
 1541         struct hme_softc *sc = ifp->if_softc;
 1542         int error;
 1543 
 1544         HME_LOCK(sc);
 1545         error = hme_mediachange_locked(sc);
 1546         HME_UNLOCK(sc);
 1547         return (error);
 1548 }
 1549 
 1550 static int
 1551 hme_mediachange_locked(struct hme_softc *sc)
 1552 {
 1553         struct mii_softc *child;
 1554 
 1555         HME_LOCK_ASSERT(sc, MA_OWNED);
 1556 
 1557 #ifdef HMEDEBUG
 1558         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1559                 device_printf(sc->sc_dev, "hme_mediachange_locked");
 1560 #endif
 1561 
 1562         hme_mifinit(sc);
 1563 
 1564         /*
 1565          * If both PHYs are present reset them. This is required for
 1566          * unisolating the previously isolated PHY when switching PHYs.
 1567          * As the above hme_mifinit() call will set the MII drivers in
 1568          * the XIF configuration register according to the currently
 1569          * selected media, there should be no window during which the
 1570          * data paths of both transceivers are open at the same time,
 1571          * even if the PHY device drivers use MIIF_NOISOLATE.
 1572          */
 1573         if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
 1574                 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
 1575                         PHY_RESET(child);
 1576         return (mii_mediachg(sc->sc_mii));
 1577 }
 1578 
 1579 static void
 1580 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1581 {
 1582         struct hme_softc *sc = ifp->if_softc;
 1583 
 1584         HME_LOCK(sc);
 1585         if ((ifp->if_flags & IFF_UP) == 0) {
 1586                 HME_UNLOCK(sc);
 1587                 return;
 1588         }
 1589 
 1590         mii_pollstat(sc->sc_mii);
 1591         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1592         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1593         HME_UNLOCK(sc);
 1594 }
 1595 
 1596 /*
 1597  * Process an ioctl request.
 1598  */
 1599 static int
 1600 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1601 {
 1602         struct hme_softc *sc = ifp->if_softc;
 1603         struct ifreq *ifr = (struct ifreq *)data;
 1604         int error = 0;
 1605 
 1606         switch (cmd) {
 1607         case SIOCSIFFLAGS:
 1608                 HME_LOCK(sc);
 1609                 if ((ifp->if_flags & IFF_UP) != 0) {
 1610                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1611                             ((ifp->if_flags ^ sc->sc_ifflags) &
 1612                             (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 1613                                 hme_setladrf(sc, 1);
 1614                         else
 1615                                 hme_init_locked(sc);
 1616                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1617                         hme_stop(sc);
 1618                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1619                         sc->sc_csum_features |= CSUM_UDP;
 1620                 else
 1621                         sc->sc_csum_features &= ~CSUM_UDP;
 1622                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1623                         ifp->if_hwassist = sc->sc_csum_features;
 1624                 sc->sc_ifflags = ifp->if_flags;
 1625                 HME_UNLOCK(sc);
 1626                 break;
 1627 
 1628         case SIOCADDMULTI:
 1629         case SIOCDELMULTI:
 1630                 HME_LOCK(sc);
 1631                 hme_setladrf(sc, 1);
 1632                 HME_UNLOCK(sc);
 1633                 error = 0;
 1634                 break;
 1635         case SIOCGIFMEDIA:
 1636         case SIOCSIFMEDIA:
 1637                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1638                 break;
 1639         case SIOCSIFCAP:
 1640                 HME_LOCK(sc);
 1641                 ifp->if_capenable = ifr->ifr_reqcap;
 1642                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1643                         ifp->if_hwassist = sc->sc_csum_features;
 1644                 else
 1645                         ifp->if_hwassist = 0;
 1646                 HME_UNLOCK(sc);
 1647                 break;
 1648         default:
 1649                 error = ether_ioctl(ifp, cmd, data);
 1650                 break;
 1651         }
 1652 
 1653         return (error);
 1654 }
 1655 
 1656 /*
 1657  * Set up the logical address filter.
 1658  */
 1659 static void
 1660 hme_setladrf(struct hme_softc *sc, int reenable)
 1661 {
 1662         struct ifnet *ifp = sc->sc_ifp;
 1663         struct ifmultiaddr *inm;
 1664         u_int32_t crc;
 1665         u_int32_t hash[4];
 1666         u_int32_t macc;
 1667 
 1668         HME_LOCK_ASSERT(sc, MA_OWNED);
 1669         /* Clear the hash table. */
 1670         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1671 
 1672         /* Get the current RX configuration. */
 1673         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1674 
 1675         /*
 1676          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 1677          * and hash filter.  Depending on the case, the right bit will be
 1678          * enabled.
 1679          */
 1680         macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
 1681 
 1682         /*
 1683          * Disable the receiver while changing it's state as the documentation
 1684          * mandates.
 1685          * We then must wait until the bit clears in the register. This should
 1686          * take at most 3.5ms.
 1687          */
 1688         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1689             HME_MAC_RXCFG_ENABLE, 0))
 1690                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1691         /* Disable the hash filter before writing to the filter registers. */
 1692         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1693             HME_MAC_RXCFG_HENABLE, 0))
 1694                 device_printf(sc->sc_dev, "cannot disable hash filter\n");
 1695 
 1696         /* Make the RX MAC really SIMPLEX. */
 1697         macc |= HME_MAC_RXCFG_ME;
 1698         if (reenable)
 1699                 macc |= HME_MAC_RXCFG_ENABLE;
 1700         else
 1701                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1702 
 1703         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1704                 macc |= HME_MAC_RXCFG_PMISC;
 1705                 goto chipit;
 1706         }
 1707         if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
 1708                 macc |= HME_MAC_RXCFG_PGRP;
 1709                 goto chipit;
 1710         }
 1711 
 1712         macc |= HME_MAC_RXCFG_HENABLE;
 1713 
 1714         /*
 1715          * Set up multicast address filter by passing all multicast addresses
 1716          * through a crc generator, and then using the high order 6 bits as an
 1717          * index into the 64 bit logical address filter.  The high order bit
 1718          * selects the word, while the rest of the bits select the bit within
 1719          * the word.
 1720          */
 1721 
 1722         if_maddr_rlock(ifp);
 1723         TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
 1724                 if (inm->ifma_addr->sa_family != AF_LINK)
 1725                         continue;
 1726                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1727                     inm->ifma_addr), ETHER_ADDR_LEN);
 1728 
 1729                 /* Just want the 6 most significant bits. */
 1730                 crc >>= 26;
 1731 
 1732                 /* Set the corresponding bit in the filter. */
 1733                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1734         }
 1735         if_maddr_runlock(ifp);
 1736 
 1737 chipit:
 1738         /* Now load the hash table into the chip */
 1739         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1740         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1741         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1742         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1743         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1744             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
 1745             HME_MAC_RXCFG_ME)))
 1746                 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
 1747 }

Cache object: c039bbfb13840af89ec83a8a558c01db


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.