The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.35 2003/02/27 14:58:22 pk Exp
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: releng/6.3/sys/dev/hme/if_hme.c 172181 2007-09-15 10:45:34Z marius $");
   42 
   43 /*
   44  * HME Ethernet module driver.
   45  *
   46  * The HME is e.g. part of the PCIO PCI multi function device.
   47  * It supports TX gathering and TX and RX checksum offloading.
   48  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   49  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   50  * are skipped to make sure the header after the ethernet header is aligned on a
   51  * natural boundary, so this ensures minimal wastage in the most common case.
   52  *
   53  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   54  * maximum packet size (this is not verified). Buffers starting on odd
   55  * boundaries must be mapped so that the burst can start on a natural boundary.
   56  *
   57  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   58  * In reality, we can do the same technique for UDP datagram too. However,
   59  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   60  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   61  * can be reactivated by setting special link option link0 with ifconfig(8).
   62  */
   63 #define HME_CSUM_FEATURES       (CSUM_TCP)
   64 #define HMEDEBUG
   65 #define KTR_HME         KTR_CT2         /* XXX */
   66 
   67 #include <sys/param.h>
   68 #include <sys/systm.h>
   69 #include <sys/bus.h>
   70 #include <sys/endian.h>
   71 #include <sys/kernel.h>
   72 #include <sys/module.h>
   73 #include <sys/ktr.h>
   74 #include <sys/mbuf.h>
   75 #include <sys/malloc.h>
   76 #include <sys/socket.h>
   77 #include <sys/sockio.h>
   78 
   79 #include <net/bpf.h>
   80 #include <net/ethernet.h>
   81 #include <net/if.h>
   82 #include <net/if_arp.h>
   83 #include <net/if_dl.h>
   84 #include <net/if_media.h>
   85 #include <net/if_types.h>
   86 #include <net/if_vlan_var.h>
   87 
   88 #include <netinet/in.h>
   89 #include <netinet/in_systm.h>
   90 #include <netinet/ip.h>
   91 #include <netinet/tcp.h>
   92 #include <netinet/udp.h>
   93 
   94 #include <dev/mii/mii.h>
   95 #include <dev/mii/miivar.h>
   96 
   97 #include <machine/bus.h>
   98 
   99 #include <dev/hme/if_hmereg.h>
  100 #include <dev/hme/if_hmevar.h>
  101 
  102 static void     hme_start(struct ifnet *);
  103 static void     hme_start_locked(struct ifnet *);
  104 static void     hme_stop(struct hme_softc *);
  105 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  106 static void     hme_tick(void *);
  107 static int      hme_watchdog(struct hme_softc *);
  108 static void     hme_init(void *);
  109 static void     hme_init_locked(struct hme_softc *);
  110 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  111 static int      hme_meminit(struct hme_softc *);
  112 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  113     u_int32_t, u_int32_t);
  114 static void     hme_mifinit(struct hme_softc *);
  115 static void     hme_setladrf(struct hme_softc *, int);
  116 
  117 static int      hme_mediachange(struct ifnet *);
  118 static int      hme_mediachange_locked(struct hme_softc *);
  119 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  120 
  121 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf **);
  122 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  123 static void     hme_eint(struct hme_softc *, u_int);
  124 static void     hme_rint(struct hme_softc *);
  125 static void     hme_tint(struct hme_softc *);
  126 static void     hme_txcksum(struct mbuf *, u_int32_t *);
  127 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  128 
  129 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  130 
  131 devclass_t hme_devclass;
  132 
  133 static int hme_nerr;
  134 
  135 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  136 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  137 
  138 #define HME_SPC_READ_4(spc, sc, offs) \
  139         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  140             (offs))
  141 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  142         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  143             (offs), (v))
  144 
  145 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  146 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  147 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  148 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  149 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  150 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  151 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  152 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  153 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  154 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  155 
  156 #define HME_MAXERR      5
  157 #define HME_WHINE(dev, ...) do {                                        \
  158         if (hme_nerr++ < HME_MAXERR)                                    \
  159                 device_printf(dev, __VA_ARGS__);                        \
  160         if (hme_nerr == HME_MAXERR) {                                   \
  161                 device_printf(dev, "too many errors; not reporting "    \
  162                     "any more\n");                                      \
  163         }                                                               \
  164 } while(0)
  165 
  166 /* Support oversized VLAN frames. */
  167 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  168 
  169 int
  170 hme_config(struct hme_softc *sc)
  171 {
  172         struct ifnet *ifp;
  173         struct mii_softc *child;
  174         bus_size_t size;
  175         int error, rdesc, tdesc, i;
  176 
  177         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  178         if (ifp == NULL)
  179                 return (ENOSPC);
  180 
  181         /*
  182          * HME common initialization.
  183          *
  184          * hme_softc fields that must be initialized by the front-end:
  185          *
  186          * the DMA bus tag:
  187          *      sc_dmatag
  188          *
  189          * the bus handles, tags and offsets (splitted for SBus compatability):
  190          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  191          *      sc_erx{t,h,o}   (Receiver Unit registers)
  192          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  193          *      sc_mac{t,h,o}   (MAC registers)
  194          *      sc_mif{t,h,o}   (Management Interface registers)
  195          *
  196          * the maximum bus burst size:
  197          *      sc_burst
  198          *
  199          */
  200 
  201         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
  202 
  203         /* Make sure the chip is stopped. */
  204         HME_LOCK(sc);
  205         hme_stop(sc);
  206         HME_UNLOCK(sc);
  207 
  208         /*
  209          * Allocate DMA capable memory
  210          * Buffer descriptors must be aligned on a 2048 byte boundary;
  211          * take this into account when calculating the size. Note that
  212          * the maximum number of descriptors (256) occupies 2048 bytes,
  213          * so we allocate that much regardless of HME_N*DESC.
  214          */
  215         size =  4096;
  216 
  217         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
  218             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  219             HME_NTXDESC + HME_NRXDESC + 1, BUS_SPACE_MAXSIZE_32BIT, 0,
  220             NULL, NULL, &sc->sc_pdmatag);
  221         if (error)
  222                 goto fail_ifnet;
  223 
  224         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  225             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  226             1, BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex,
  227             &sc->sc_lock, &sc->sc_cdmatag);
  228         if (error)
  229                 goto fail_ptag;
  230 
  231         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  232             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  233             HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  234             NULL, NULL, &sc->sc_rdmatag);
  235         if (error)
  236                 goto fail_ctag;
  237 
  238         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  239             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  240             HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  241             NULL, NULL, &sc->sc_tdmatag);
  242         if (error)
  243                 goto fail_rtag;
  244 
  245         /* Allocate control/TX DMA buffer */
  246         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  247             0, &sc->sc_cdmamap);
  248         if (error != 0) {
  249                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  250                 goto fail_ttag;
  251         }
  252 
  253         /* Load the buffer */
  254         sc->sc_rb.rb_dmabase = 0;
  255         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  256              sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  257             sc->sc_rb.rb_dmabase == 0) {
  258                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  259                     error);
  260                 goto fail_free;
  261         }
  262         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  263             sc->sc_rb.rb_dmabase);
  264 
  265         /*
  266          * Prepare the RX descriptors. rdesc serves as marker for the last
  267          * processed descriptor and may be used later on.
  268          */
  269         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  270                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  271                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  272                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  273                 if (error != 0)
  274                         goto fail_rxdesc;
  275         }
  276         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  277             &sc->sc_rb.rb_spare_dmamap);
  278         if (error != 0)
  279                 goto fail_rxdesc;
  280         /* Same for the TX descs. */
  281         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  282                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  283                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  284                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  285                 if (error != 0)
  286                         goto fail_txdesc;
  287         }
  288 
  289         sc->sc_csum_features = HME_CSUM_FEATURES;
  290         /* Initialize ifnet structure. */
  291         ifp->if_softc = sc;
  292         if_initname(ifp, device_get_name(sc->sc_dev),
  293             device_get_unit(sc->sc_dev));
  294         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  295         ifp->if_start = hme_start;
  296         ifp->if_ioctl = hme_ioctl;
  297         ifp->if_init = hme_init;
  298         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  299         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  300         IFQ_SET_READY(&ifp->if_snd);
  301 
  302         hme_mifinit(sc);
  303 
  304         if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
  305             hme_mediastatus)) != 0) {
  306                 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
  307                 goto fail_rxdesc;
  308         }
  309         sc->sc_mii = device_get_softc(sc->sc_miibus);
  310 
  311         /*
  312          * Walk along the list of attached MII devices and
  313          * establish an `MII instance' to `PHY number'
  314          * mapping. We'll use this mapping to enable the MII
  315          * drivers of the external transceiver according to
  316          * the currently selected media.
  317          */
  318         sc->sc_phys[0] = sc->sc_phys[1] = -1;
  319         LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
  320                 /*
  321                  * Note: we support just two PHYs: the built-in
  322                  * internal device and an external on the MII
  323                  * connector.
  324                  */
  325                 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
  326                     child->mii_phy != HME_PHYAD_INTERNAL) ||
  327                     child->mii_inst > 1) {
  328                         device_printf(sc->sc_dev, "cannot accommodate "
  329                             "MII device %s at phy %d, instance %d\n",
  330                             device_get_name(child->mii_dev),
  331                             child->mii_phy, child->mii_inst);
  332                         continue;
  333                 }
  334 
  335                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  336         }
  337 
  338         /* Attach the interface. */
  339         ether_ifattach(ifp, sc->sc_enaddr);
  340 
  341         /*
  342          * Tell the upper layer(s) we support long frames/checksum offloads.
  343          */
  344         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  345         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  346         ifp->if_hwassist |= sc->sc_csum_features;
  347         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  348         return (0);
  349 
  350 fail_txdesc:
  351         for (i = 0; i < tdesc; i++) {
  352                 bus_dmamap_destroy(sc->sc_tdmatag,
  353                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  354         }
  355         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  356 fail_rxdesc:
  357         for (i = 0; i < rdesc; i++) {
  358                 bus_dmamap_destroy(sc->sc_rdmatag,
  359                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  360         }
  361         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  362 fail_free:
  363         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  364 fail_ttag:
  365         bus_dma_tag_destroy(sc->sc_tdmatag);
  366 fail_rtag:
  367         bus_dma_tag_destroy(sc->sc_rdmatag);
  368 fail_ctag:
  369         bus_dma_tag_destroy(sc->sc_cdmatag);
  370 fail_ptag:
  371         bus_dma_tag_destroy(sc->sc_pdmatag);
  372 fail_ifnet:
  373         if_free(ifp);
  374         return (error);
  375 }
  376 
  377 void
  378 hme_detach(struct hme_softc *sc)
  379 {
  380         struct ifnet *ifp = sc->sc_ifp;
  381         int i;
  382 
  383         HME_LOCK(sc);
  384         hme_stop(sc);
  385         HME_UNLOCK(sc);
  386         callout_drain(&sc->sc_tick_ch);
  387         ether_ifdetach(ifp);
  388         if_free(ifp);
  389         device_delete_child(sc->sc_dev, sc->sc_miibus);
  390 
  391         for (i = 0; i < HME_NTXQ; i++) {
  392                 bus_dmamap_destroy(sc->sc_tdmatag,
  393                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  394         }
  395         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  396         for (i = 0; i < HME_NRXDESC; i++) {
  397                 bus_dmamap_destroy(sc->sc_rdmatag,
  398                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  399         }
  400         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
  401         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
  402         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  403         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  404         bus_dma_tag_destroy(sc->sc_tdmatag);
  405         bus_dma_tag_destroy(sc->sc_rdmatag);
  406         bus_dma_tag_destroy(sc->sc_cdmatag);
  407         bus_dma_tag_destroy(sc->sc_pdmatag);
  408 }
  409 
  410 void
  411 hme_suspend(struct hme_softc *sc)
  412 {
  413 
  414         HME_LOCK(sc);
  415         hme_stop(sc);
  416         HME_UNLOCK(sc);
  417 }
  418 
  419 void
  420 hme_resume(struct hme_softc *sc)
  421 {
  422         struct ifnet *ifp = sc->sc_ifp;
  423 
  424         HME_LOCK(sc);
  425         if ((ifp->if_flags & IFF_UP) != 0)
  426                 hme_init_locked(sc);
  427         HME_UNLOCK(sc);
  428 }
  429 
  430 static void
  431 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  432 {
  433         struct hme_softc *sc = (struct hme_softc *)xsc;
  434 
  435         if (error != 0)
  436                 return;
  437         KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
  438         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  439 }
  440 
  441 static void
  442 hme_tick(void *arg)
  443 {
  444         struct hme_softc *sc = arg;
  445         struct ifnet *ifp;
  446 
  447         HME_LOCK_ASSERT(sc, MA_OWNED);
  448 
  449         ifp = sc->sc_ifp;
  450         /*
  451          * Unload collision counters
  452          */
  453         ifp->if_collisions +=
  454                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
  455                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
  456                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
  457                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
  458 
  459         /*
  460          * then clear the hardware counters.
  461          */
  462         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  463         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  464         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  465         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  466 
  467         mii_tick(sc->sc_mii);
  468 
  469         if (hme_watchdog(sc) == EJUSTRETURN)
  470                 return;
  471 
  472         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  473 }
  474 
  475 static void
  476 hme_stop(struct hme_softc *sc)
  477 {
  478         u_int32_t v;
  479         int n;
  480 
  481         callout_stop(&sc->sc_tick_ch);
  482         sc->sc_wdog_timer = 0;
  483         sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  484 
  485         /* Mask all interrupts */
  486         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
  487 
  488         /* Reset transmitter and receiver */
  489         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  490             HME_SEB_RESET_ERX);
  491 
  492         for (n = 0; n < 20; n++) {
  493                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  494                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  495                         return;
  496                 DELAY(20);
  497         }
  498 
  499         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  500 }
  501 
  502 /*
  503  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  504  * ring for subsequent use.
  505  */
  506 static __inline void
  507 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  508 {
  509 
  510         /*
  511          * Dropped a packet, reinitialize the descriptor and turn the
  512          * ownership back to the hardware.
  513          */
  514         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
  515             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
  516 }
  517 
  518 static int
  519 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  520 {
  521         struct hme_rxdesc *rd;
  522         struct mbuf *m;
  523         bus_dma_segment_t segs[1];
  524         bus_dmamap_t map;
  525         uintptr_t b;
  526         int a, unmap, nsegs;
  527 
  528         rd = &sc->sc_rb.rb_rxdesc[ri];
  529         unmap = rd->hrx_m != NULL;
  530         if (unmap && keepold) {
  531                 /*
  532                  * Reinitialize the descriptor flags, as they may have been
  533                  * altered by the hardware.
  534                  */
  535                 hme_discard_rxbuf(sc, ri);
  536                 return (0);
  537         }
  538         if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
  539                 return (ENOBUFS);
  540         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  541         b = mtod(m, uintptr_t);
  542         /*
  543          * Required alignment boundary. At least 16 is needed, but since
  544          * the mapping must be done in a way that a burst can start on a
  545          * natural boundary we might need to extend this.
  546          */
  547         a = imax(HME_MINRXALIGN, sc->sc_burst);
  548         /*
  549          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  550          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  551          * alignment of the header adjacent to the ethernet header, which
  552          * should be sufficient in all cases. Nevertheless, this second-guesses
  553          * ALIGN().
  554          */
  555         m_adj(m, roundup2(b, a) - b);
  556         if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  557             m, segs, &nsegs, 0) != 0) {
  558                 m_freem(m);
  559                 return (ENOBUFS);
  560         }
  561         /* If nsegs is wrong then the stack is corrupt */
  562         KASSERT(nsegs == 1, ("Too many segments returned!"));
  563         if (unmap) {
  564                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  565                     BUS_DMASYNC_POSTREAD);
  566                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  567         }
  568         map = rd->hrx_dmamap;
  569         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  570         sc->sc_rb.rb_spare_dmamap = map;
  571         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  572         HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, segs[0].ds_addr);
  573         rd->hrx_m = m;
  574         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
  575             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  576         return (0);
  577 }
  578 
  579 static int
  580 hme_meminit(struct hme_softc *sc)
  581 {
  582         struct hme_ring *hr = &sc->sc_rb;
  583         struct hme_txdesc *td;
  584         bus_addr_t dma;
  585         caddr_t p;
  586         unsigned int i;
  587         int error;
  588 
  589         p = hr->rb_membase;
  590         dma = hr->rb_dmabase;
  591 
  592         /*
  593          * Allocate transmit descriptors
  594          */
  595         hr->rb_txd = p;
  596         hr->rb_txddma = dma;
  597         p += HME_NTXDESC * HME_XD_SIZE;
  598         dma += HME_NTXDESC * HME_XD_SIZE;
  599         /* We have reserved descriptor space until the next 2048 byte boundary.*/
  600         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  601         p = (caddr_t)roundup((u_long)p, 2048);
  602 
  603         /*
  604          * Allocate receive descriptors
  605          */
  606         hr->rb_rxd = p;
  607         hr->rb_rxddma = dma;
  608         p += HME_NRXDESC * HME_XD_SIZE;
  609         dma += HME_NRXDESC * HME_XD_SIZE;
  610         /* Again move forward to the next 2048 byte boundary.*/
  611         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  612         p = (caddr_t)roundup((u_long)p, 2048);
  613 
  614         /*
  615          * Initialize transmit buffer descriptors
  616          */
  617         for (i = 0; i < HME_NTXDESC; i++) {
  618                 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
  619                 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
  620         }
  621 
  622         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  623         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  624         for (i = 0; i < HME_NTXQ; i++) {
  625                 td = &sc->sc_rb.rb_txdesc[i];
  626                 if (td->htx_m != NULL) {
  627                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  628                             BUS_DMASYNC_POSTWRITE);
  629                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  630                         m_freem(td->htx_m);
  631                         td->htx_m = NULL;
  632                 }
  633                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  634         }
  635 
  636         /*
  637          * Initialize receive buffer descriptors
  638          */
  639         for (i = 0; i < HME_NRXDESC; i++) {
  640                 error = hme_add_rxbuf(sc, i, 1);
  641                 if (error != 0)
  642                         return (error);
  643         }
  644 
  645         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
  646         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
  647 
  648         hr->rb_tdhead = hr->rb_tdtail = 0;
  649         hr->rb_td_nbusy = 0;
  650         hr->rb_rdtail = 0;
  651         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  652             hr->rb_txddma);
  653         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  654             hr->rb_rxddma);
  655         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  656             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  657         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  658             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  659         return (0);
  660 }
  661 
  662 static int
  663 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  664     u_int32_t clr, u_int32_t set)
  665 {
  666         int i = 0;
  667 
  668         val &= ~clr;
  669         val |= set;
  670         HME_MAC_WRITE_4(sc, reg, val);
  671         if (clr == 0 && set == 0)
  672                 return (1);     /* just write, no bits to wait for */
  673         do {
  674                 DELAY(100);
  675                 i++;
  676                 val = HME_MAC_READ_4(sc, reg);
  677                 if (i > 40) {
  678                         /* After 3.5ms, we should have been done. */
  679                         device_printf(sc->sc_dev, "timeout while writing to "
  680                             "MAC configuration register\n");
  681                         return (0);
  682                 }
  683         } while ((val & clr) != 0 && (val & set) != set);
  684         return (1);
  685 }
  686 
  687 /*
  688  * Initialization of interface; set up initialization block
  689  * and transmit/receive descriptor rings.
  690  */
  691 static void
  692 hme_init(void *xsc)
  693 {
  694         struct hme_softc *sc = (struct hme_softc *)xsc;
  695 
  696         HME_LOCK(sc);
  697         hme_init_locked(sc);
  698         HME_UNLOCK(sc);
  699 }
  700 
  701 static void
  702 hme_init_locked(struct hme_softc *sc)
  703 {
  704         struct ifnet *ifp = sc->sc_ifp;
  705         u_int8_t *ea;
  706         u_int32_t n, v;
  707 
  708         HME_LOCK_ASSERT(sc, MA_OWNED);
  709         /*
  710          * Initialization sequence. The numbered steps below correspond
  711          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  712          * Channel Engine manual (part of the PCIO manual).
  713          * See also the STP2002-STQ document from Sun Microsystems.
  714          */
  715 
  716         /* step 1 & 2. Reset the Ethernet Channel */
  717         hme_stop(sc);
  718 
  719         /* Re-initialize the MIF */
  720         hme_mifinit(sc);
  721 
  722 #if 0
  723         /* Mask all MIF interrupts, just in case */
  724         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  725 #endif
  726 
  727         /* step 3. Setup data structures in host memory */
  728         if (hme_meminit(sc) != 0) {
  729                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  730                 return;
  731         }
  732 
  733         /* step 4. TX MAC registers & counters */
  734         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  735         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  736         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  737         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  738         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  739 
  740         /* Load station MAC address */
  741         ea = IFP2ENADDR(sc->sc_ifp);
  742         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  743         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  744         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  745 
  746         /*
  747          * Init seed for backoff
  748          * (source suggested by manual: low 10 bits of MAC address)
  749          */
  750         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  751         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  752 
  753         /* Note: Accepting power-on default for other MAC registers here.. */
  754 
  755         /* step 5. RX MAC registers & counters */
  756         hme_setladrf(sc, 0);
  757 
  758         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  759         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  760         /* Transmit Descriptor ring size: in increments of 16 */
  761         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  762 
  763         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  764         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  765 
  766         /* step 8. Global Configuration & Interrupt Mask */
  767         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  768             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  769                 HME_SEB_STAT_HOSTTOTX |
  770                 HME_SEB_STAT_RXTOHOST |
  771                 HME_SEB_STAT_TXALL |
  772                 HME_SEB_STAT_TXPERR |
  773                 HME_SEB_STAT_RCNTEXP |
  774                 HME_SEB_STAT_ALL_ERRORS ));
  775 
  776         switch (sc->sc_burst) {
  777         default:
  778                 v = 0;
  779                 break;
  780         case 16:
  781                 v = HME_SEB_CFG_BURST16;
  782                 break;
  783         case 32:
  784                 v = HME_SEB_CFG_BURST32;
  785                 break;
  786         case 64:
  787                 v = HME_SEB_CFG_BURST64;
  788                 break;
  789         }
  790         /*
  791          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  792          * Allowing 64bit transfers breaks TX checksum offload as well.
  793          * Don't know this comes from hardware bug or driver's DMAing
  794          * scheme.
  795          *
  796          * if (sc->sc_pci == 0)
  797          *      v |= HME_SEB_CFG_64BIT;
  798          */
  799         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  800 
  801         /* step 9. ETX Configuration: use mostly default values */
  802 
  803         /* Enable DMA */
  804         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  805         v |= HME_ETX_CFG_DMAENABLE;
  806         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  807 
  808         /* step 10. ERX Configuration */
  809         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  810 
  811         /* Encode Receive Descriptor ring size: four possible values */
  812         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  813         switch (HME_NRXDESC) {
  814         case 32:
  815                 v |= HME_ERX_CFG_RINGSIZE32;
  816                 break;
  817         case 64:
  818                 v |= HME_ERX_CFG_RINGSIZE64;
  819                 break;
  820         case 128:
  821                 v |= HME_ERX_CFG_RINGSIZE128;
  822                 break;
  823         case 256:
  824                 v |= HME_ERX_CFG_RINGSIZE256;
  825                 break;
  826         default:
  827                 printf("hme: invalid Receive Descriptor ring size\n");
  828                 break;
  829         }
  830 
  831         /* Enable DMA, fix RX first byte offset. */
  832         v &= ~HME_ERX_CFG_FBO_MASK;
  833         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  834         /* RX TCP/UDP checksum offset */
  835         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  836         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  837         v |= n;
  838         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  839         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  840 
  841         /* step 11. XIF Configuration */
  842         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  843         v |= HME_MAC_XIF_OE;
  844         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  845         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  846 
  847         /* step 12. RX_MAC Configuration Register */
  848         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  849         v |= HME_MAC_RXCFG_ENABLE;
  850         v &= ~(HME_MAC_RXCFG_DCRCS);
  851         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  852         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  853 
  854         /* step 13. TX_MAC Configuration Register */
  855         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  856         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  857         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  858         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  859 
  860         /* step 14. Issue Transmit Pending command */
  861 
  862 #ifdef HMEDEBUG
  863         /* Debug: double-check. */
  864         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  865             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  866             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  867             HME_ERX_READ_4(sc, HME_ERXI_RING),
  868             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  869         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  870             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  871             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  872             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  873         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  874             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  875             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  876 #endif
  877 
  878         /* Set the current media. */
  879         hme_mediachange_locked(sc);
  880 
  881         /* Start the one second timer. */
  882         sc->sc_wdog_timer = 0;
  883         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  884 
  885         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  886         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  887         hme_start_locked(ifp);
  888 }
  889 
  890 /* TX TCP/UDP checksum */
  891 static void
  892 hme_txcksum(struct mbuf *m, u_int32_t *cflags)
  893 {
  894         struct ip *ip;
  895         u_int32_t offset, offset2;
  896         caddr_t p;
  897 
  898         for(; m && m->m_len == 0; m = m->m_next)
  899                 ;
  900         if (m == NULL || m->m_len < ETHER_HDR_LEN) {
  901                 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
  902                 return; /* checksum will be corrupted */
  903         }
  904         if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
  905                 if (m->m_len != ETHER_HDR_LEN) {
  906                         printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
  907                         return; /* checksum will be corrupted */
  908                 }
  909                 /* XXX */
  910                 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
  911                         ;
  912                 if (m == NULL)
  913                         return; /* checksum will be corrupted */
  914                 ip = mtod(m, struct ip *);
  915         } else {
  916                 p = mtod(m, caddr_t);
  917                 p += ETHER_HDR_LEN;
  918                 ip = (struct ip *)p;
  919         }
  920         offset2 = m->m_pkthdr.csum_data;
  921         offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
  922         *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
  923         *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT); 
  924         *cflags |= HME_XD_TXCKSUM;
  925 }
  926 
  927 /*
  928  * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
  929  * start the transmission.
  930  * Returns 0 on success, -1 if there were not enough free descriptors to map
  931  * the packet, or an errno otherwise.
  932  *
  933  * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
  934  * are readable from the nearest burst boundary on (i.e. potentially before
  935  * ds_addr) to the first boundary beyond the end. This is usually a safe
  936  * assumption to make, but is not documented.
  937  */
  938 static int
  939 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
  940 {
  941         struct hme_txdesc *htx;
  942         caddr_t txd;
  943         int i, pci, si, ri, nseg;
  944         u_int32_t flags, cflags = 0;
  945         int error = 0;
  946 
  947         if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  948                 return (-1);
  949         error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  950             *m0, sc->sc_rb.rb_txsegs, &nseg, 0);
  951         if (error == EFBIG) {
  952                 struct mbuf *m;
  953 
  954                 m = m_defrag(*m0, M_DONTWAIT);
  955                 if (m == NULL) {
  956                         m_freem(*m0);
  957                         *m0 = NULL;
  958                         return (ENOMEM);
  959                 }
  960                 *m0 = m;
  961                 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  962                     *m0, sc->sc_rb.rb_txsegs, &nseg, 0);
  963                 if (error != 0) {
  964                         m_freem(*m0);
  965                         *m0 = NULL;
  966                         return (error);
  967                 }
  968         } else if (error != 0)
  969                 return (error);
  970         if (nseg == 0) {
  971                 m_freem(*m0);
  972                 *m0 = NULL;
  973                 return (EIO);
  974         }
  975         if (sc->sc_rb.rb_td_nbusy + nseg >= HME_NTXDESC) {
  976                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
  977                 /* retry with m_defrag(9)? */
  978                 return (-2);
  979         }
  980         if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
  981                 hme_txcksum(*m0, &cflags);
  982         bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
  983 
  984         si = ri = sc->sc_rb.rb_tdhead;
  985         txd = sc->sc_rb.rb_txd;
  986         pci = sc->sc_pci;
  987         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
  988             HME_XD_GETFLAGS(pci, txd, ri));
  989         for (i = 0; i < nseg; i++) {
  990                 /* Fill the ring entry. */
  991                 flags = HME_XD_ENCODE_TSIZE(sc->sc_rb.rb_txsegs[i].ds_len);
  992                 if (i == 0)
  993                         flags |= HME_XD_SOP | cflags;
  994                 else
  995                         flags |= HME_XD_OWN | cflags;
  996                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
  997                     ri, si, flags);
  998                 HME_XD_SETADDR(pci, txd, ri, sc->sc_rb.rb_txsegs[i].ds_addr);
  999                 HME_XD_SETFLAGS(pci, txd, ri, flags);
 1000                 sc->sc_rb.rb_td_nbusy++;
 1001                 htx->htx_lastdesc = ri;
 1002                 ri = (ri + 1) % HME_NTXDESC;
 1003         }
 1004         sc->sc_rb.rb_tdhead = ri;
 1005 
 1006         /* set EOP on the last descriptor */
 1007         ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1008         flags = HME_XD_GETFLAGS(pci, txd, ri);
 1009         flags |= HME_XD_EOP;
 1010         CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
 1011             flags);
 1012         HME_XD_SETFLAGS(pci, txd, ri, flags);
 1013 
 1014         /* Turn the first descriptor ownership to the hme */
 1015         flags = HME_XD_GETFLAGS(pci, txd, si);
 1016         flags |= HME_XD_OWN;
 1017         CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
 1018             ri, flags);
 1019         HME_XD_SETFLAGS(pci, txd, si, flags);
 1020 
 1021         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
 1022         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
 1023         htx->htx_m = *m0;
 1024 
 1025         /* start the transmission. */
 1026         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1027 
 1028         return (0);
 1029 }
 1030 
 1031 /*
 1032  * Pass a packet to the higher levels.
 1033  */
 1034 static void
 1035 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1036 {
 1037         struct ifnet *ifp = sc->sc_ifp;
 1038         struct mbuf *m;
 1039 
 1040         if (len <= sizeof(struct ether_header) ||
 1041             len > HME_MAX_FRAMESIZE) {
 1042 #ifdef HMEDEBUG
 1043                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1044                     len);
 1045 #endif
 1046                 ifp->if_ierrors++;
 1047                 hme_discard_rxbuf(sc, ix);
 1048                 return;
 1049         }
 1050 
 1051         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1052         CTR1(KTR_HME, "hme_read: len %d", len);
 1053 
 1054         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1055                 /*
 1056                  * hme_add_rxbuf will leave the old buffer in the ring until
 1057                  * it is sure that a new buffer can be mapped. If it can not,
 1058                  * drop the packet, but leave the interface up.
 1059                  */
 1060                 ifp->if_iqdrops++;
 1061                 hme_discard_rxbuf(sc, ix);
 1062                 return;
 1063         }
 1064 
 1065         ifp->if_ipackets++;
 1066 
 1067         m->m_pkthdr.rcvif = ifp;
 1068         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1069         m_adj(m, HME_RXOFFS);
 1070         /* RX TCP/UDP checksum */
 1071         if (ifp->if_capenable & IFCAP_RXCSUM)
 1072                 hme_rxcksum(m, flags);
 1073         /* Pass the packet up. */
 1074         HME_UNLOCK(sc);
 1075         (*ifp->if_input)(ifp, m);
 1076         HME_LOCK(sc);
 1077 }
 1078 
 1079 static void
 1080 hme_start(struct ifnet *ifp)
 1081 {
 1082         struct hme_softc *sc = ifp->if_softc;
 1083 
 1084         HME_LOCK(sc);
 1085         hme_start_locked(ifp);
 1086         HME_UNLOCK(sc);
 1087 }
 1088 
 1089 static void
 1090 hme_start_locked(struct ifnet *ifp)
 1091 {
 1092         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1093         struct mbuf *m;
 1094         int error, enq = 0;
 1095 
 1096         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1097             IFF_DRV_RUNNING)
 1098                 return;
 1099 
 1100         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1101             sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
 1102                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1103                 if (m == NULL)
 1104                         break;
 1105 
 1106                 error = hme_load_txmbuf(sc, &m);
 1107                 if (error != 0) {
 1108                         if (m == NULL)
 1109                                 break;
 1110                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1111                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1112                         break;
 1113                 }
 1114                 enq++;
 1115                 BPF_MTAP(ifp, m);
 1116         }
 1117 
 1118         if (enq > 0) {
 1119                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1120                     BUS_DMASYNC_PREWRITE);
 1121                 sc->sc_wdog_timer = 5;
 1122         }
 1123 }
 1124 
 1125 /*
 1126  * Transmit interrupt.
 1127  */
 1128 static void
 1129 hme_tint(struct hme_softc *sc)
 1130 {
 1131         caddr_t txd;
 1132         struct ifnet *ifp = sc->sc_ifp;
 1133         struct hme_txdesc *htx;
 1134         unsigned int ri, txflags;
 1135 
 1136         txd = sc->sc_rb.rb_txd;
 1137         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1138         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1139         /* Fetch current position in the transmit ring */
 1140         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1141                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1142                         CTR0(KTR_HME, "hme_tint: not busy!");
 1143                         break;
 1144                 }
 1145 
 1146                 txflags = HME_XD_GETFLAGS(sc->sc_pci, txd, ri);
 1147                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1148 
 1149                 if ((txflags & HME_XD_OWN) != 0)
 1150                         break;
 1151 
 1152                 CTR0(KTR_HME, "hme_tint: not owned");
 1153                 --sc->sc_rb.rb_td_nbusy;
 1154                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1155 
 1156                 /* Complete packet transmitted? */
 1157                 if ((txflags & HME_XD_EOP) == 0)
 1158                         continue;
 1159 
 1160                 KASSERT(htx->htx_lastdesc == ri,
 1161                     ("hme_tint: ring indices skewed: %d != %d!",
 1162                      htx->htx_lastdesc, ri));
 1163                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1164                     BUS_DMASYNC_POSTWRITE);
 1165                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1166 
 1167                 ifp->if_opackets++;
 1168                 m_freem(htx->htx_m);
 1169                 htx->htx_m = NULL;
 1170                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1171                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1172                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1173         }
 1174         sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
 1175 
 1176         /* Update ring */
 1177         sc->sc_rb.rb_tdtail = ri;
 1178 
 1179         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 1180             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1181                 hme_start_locked(ifp);
 1182 }
 1183 
 1184 /*
 1185  * RX TCP/UDP checksum 
 1186  */
 1187 static void
 1188 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1189 {
 1190         struct ether_header *eh;
 1191         struct ip *ip;
 1192         struct udphdr *uh;
 1193         int32_t hlen, len, pktlen;
 1194         u_int16_t cksum, *opts;
 1195         u_int32_t temp32;
 1196 
 1197         pktlen = m->m_pkthdr.len;
 1198         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1199                 return;
 1200         eh = mtod(m, struct ether_header *);
 1201         if (eh->ether_type != htons(ETHERTYPE_IP))
 1202                 return;
 1203         ip = (struct ip *)(eh + 1);
 1204         if (ip->ip_v != IPVERSION)
 1205                 return;
 1206 
 1207         hlen = ip->ip_hl << 2;
 1208         pktlen -= sizeof(struct ether_header);
 1209         if (hlen < sizeof(struct ip))
 1210                 return;
 1211         if (ntohs(ip->ip_len) < hlen)
 1212                 return;
 1213         if (ntohs(ip->ip_len) != pktlen)
 1214                 return;
 1215         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1216                 return; /* can't handle fragmented packet */
 1217 
 1218         switch (ip->ip_p) {
 1219         case IPPROTO_TCP:
 1220                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1221                         return;
 1222                 break;
 1223         case IPPROTO_UDP:
 1224                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1225                         return;
 1226                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1227                 if (uh->uh_sum == 0)
 1228                         return; /* no checksum */
 1229                 break;
 1230         default:
 1231                 return;
 1232         }
 1233 
 1234         cksum = ~(flags & HME_XD_RXCKSUM);
 1235         /* checksum fixup for IP options */
 1236         len = hlen - sizeof(struct ip);
 1237         if (len > 0) {
 1238                 opts = (u_int16_t *)(ip + 1);
 1239                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1240                         temp32 = cksum - *opts;
 1241                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1242                         cksum = temp32 & 65535;
 1243                 }
 1244         }
 1245         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1246         m->m_pkthdr.csum_data = cksum;
 1247 }
 1248 
 1249 /*
 1250  * Receive interrupt.
 1251  */
 1252 static void
 1253 hme_rint(struct hme_softc *sc)
 1254 {
 1255         caddr_t xdr = sc->sc_rb.rb_rxd;
 1256         struct ifnet *ifp = sc->sc_ifp;
 1257         unsigned int ri, len;
 1258         int progress = 0;
 1259         u_int32_t flags;
 1260 
 1261         /*
 1262          * Process all buffers with valid data.
 1263          */
 1264         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1265         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1266                 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
 1267                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1268                 if ((flags & HME_XD_OWN) != 0)
 1269                         break;
 1270 
 1271                 progress++;
 1272                 if ((flags & HME_XD_OFL) != 0) {
 1273                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1274                             "flags=0x%x\n", ri, flags);
 1275                         ifp->if_ierrors++;
 1276                         hme_discard_rxbuf(sc, ri);
 1277                 } else {
 1278                         len = HME_XD_DECODE_RSIZE(flags);
 1279                         hme_read(sc, ri, len, flags);
 1280                 }
 1281         }
 1282         if (progress) {
 1283                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1284                     BUS_DMASYNC_PREWRITE);
 1285         }
 1286         sc->sc_rb.rb_rdtail = ri;
 1287 }
 1288 
 1289 static void
 1290 hme_eint(struct hme_softc *sc, u_int status)
 1291 {
 1292 
 1293         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1294                 device_printf(sc->sc_dev, "XXXlink status changed: "
 1295                     "cfg=%#x, stat=%#x, sm=%#x\n",
 1296                     HME_MIF_READ_4(sc, HME_MIFI_CFG),
 1297                     HME_MIF_READ_4(sc, HME_MIFI_STAT),
 1298                     HME_MIF_READ_4(sc, HME_MIFI_SM));
 1299                 return;
 1300         }
 1301 
 1302         /* check for fatal errors that needs reset to unfreeze DMA engine */
 1303         if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
 1304                 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1305                 hme_init_locked(sc);
 1306         }
 1307 }
 1308 
 1309 void
 1310 hme_intr(void *v)
 1311 {
 1312         struct hme_softc *sc = (struct hme_softc *)v;
 1313         u_int32_t status;
 1314 
 1315         HME_LOCK(sc);
 1316         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1317         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1318 
 1319         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1320                 hme_eint(sc, status);
 1321 
 1322         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1323                 hme_tint(sc);
 1324 
 1325         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1326                 hme_rint(sc);
 1327         HME_UNLOCK(sc);
 1328 }
 1329 
 1330 static int
 1331 hme_watchdog(struct hme_softc *sc)
 1332 {
 1333 #ifdef HMEDEBUG
 1334         u_int32_t status;
 1335 #endif
 1336 
 1337         HME_LOCK_ASSERT(sc, MA_OWNED);
 1338 #ifdef HMEDEBUG
 1339         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1340         CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
 1341 #endif
 1342 
 1343         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
 1344                 return (0);
 1345 
 1346         device_printf(sc->sc_dev, "device timeout\n");
 1347         ++sc->sc_ifp->if_oerrors;
 1348 
 1349         hme_init_locked(sc);
 1350         return (EJUSTRETURN);
 1351 }
 1352 
 1353 /*
 1354  * Initialize the MII Management Interface
 1355  */
 1356 static void
 1357 hme_mifinit(struct hme_softc *sc)
 1358 {
 1359         u_int32_t v;
 1360 
 1361         /*
 1362          * Configure the MIF in frame mode, polling disabled, internal PHY
 1363          * selected.
 1364          */
 1365         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
 1366 
 1367         /*
 1368          * If the currently selected media uses the external transceiver,
 1369          * enable its MII drivers (which basically isolates the internal
 1370          * one and vice versa). In case the current media hasn't been set,
 1371          * yet, we default to the internal transceiver.
 1372          */
 1373         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
 1374         if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
 1375             sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
 1376             HME_PHYAD_EXTERNAL)
 1377                 v |= HME_MAC_XIF_MIIENABLE;
 1378         else
 1379                 v &= ~HME_MAC_XIF_MIIENABLE;
 1380         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
 1381 }
 1382 
 1383 /*
 1384  * MII interface
 1385  */
 1386 int
 1387 hme_mii_readreg(device_t dev, int phy, int reg)
 1388 {
 1389         struct hme_softc *sc;
 1390         int n;
 1391         u_int32_t v;
 1392 
 1393         /* We can at most have two PHYs. */
 1394         if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
 1395                 return (0);
 1396 
 1397         sc = device_get_softc(dev);
 1398         /* Select the desired PHY in the MIF configuration register */
 1399         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1400         if (phy == HME_PHYAD_EXTERNAL)
 1401                 v |= HME_MIF_CFG_PHY;
 1402         else
 1403                 v &= ~HME_MIF_CFG_PHY;
 1404         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1405 
 1406         /* Construct the frame command */
 1407         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1408             HME_MIF_FO_TAMSB |
 1409             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1410             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1411             (reg << HME_MIF_FO_REGAD_SHIFT);
 1412 
 1413         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1414         for (n = 0; n < 100; n++) {
 1415                 DELAY(1);
 1416                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1417                 if (v & HME_MIF_FO_TALSB)
 1418                         return (v & HME_MIF_FO_DATA);
 1419         }
 1420 
 1421         device_printf(sc->sc_dev, "mii_read timeout\n");
 1422         return (0);
 1423 }
 1424 
 1425 int
 1426 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1427 {
 1428         struct hme_softc *sc;
 1429         int n;
 1430         u_int32_t v;
 1431 
 1432         /* We can at most have two PHYs. */
 1433         if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
 1434                 return (0);
 1435 
 1436         sc = device_get_softc(dev);
 1437         /* Select the desired PHY in the MIF configuration register */
 1438         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1439         if (phy == HME_PHYAD_EXTERNAL)
 1440                 v |= HME_MIF_CFG_PHY;
 1441         else
 1442                 v &= ~HME_MIF_CFG_PHY;
 1443         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1444 
 1445         /* Construct the frame command */
 1446         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1447             HME_MIF_FO_TAMSB                            |
 1448             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1449             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1450             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1451             (val & HME_MIF_FO_DATA);
 1452 
 1453         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1454         for (n = 0; n < 100; n++) {
 1455                 DELAY(1);
 1456                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1457                 if (v & HME_MIF_FO_TALSB)
 1458                         return (1);
 1459         }
 1460 
 1461         device_printf(sc->sc_dev, "mii_write timeout\n");
 1462         return (0);
 1463 }
 1464 
 1465 void
 1466 hme_mii_statchg(device_t dev)
 1467 {
 1468         struct hme_softc *sc;
 1469         u_int32_t v;
 1470 
 1471         sc = device_get_softc(dev);
 1472 
 1473 #ifdef HMEDEBUG
 1474         if (sc->sc_debug)
 1475                 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
 1476 #endif
 1477 
 1478         /* Set the MAC Full Duplex bit appropriately */
 1479         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1480         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
 1481                 return;
 1482         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1483                 v |= HME_MAC_TXCFG_FULLDPLX;
 1484         else
 1485                 v &= ~HME_MAC_TXCFG_FULLDPLX;
 1486         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
 1487         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
 1488                 return;
 1489 }
 1490 
 1491 static int
 1492 hme_mediachange(struct ifnet *ifp)
 1493 {
 1494         struct hme_softc *sc = ifp->if_softc;
 1495         int error;
 1496 
 1497         HME_LOCK(sc);
 1498         error = hme_mediachange_locked(sc);
 1499         HME_UNLOCK(sc);
 1500         return (error);
 1501 }
 1502 
 1503 static int
 1504 hme_mediachange_locked(struct hme_softc *sc)
 1505 {
 1506         struct mii_softc *child;
 1507 
 1508         HME_LOCK_ASSERT(sc, MA_OWNED);
 1509 #ifdef HMEDEBUG
 1510         if (sc->sc_debug)
 1511                 device_printf(sc->sc_dev, "hme_mediachange_locked");
 1512 #endif
 1513 
 1514         hme_mifinit(sc);
 1515 
 1516         /*
 1517          * If both PHYs are present reset them. This is required for
 1518          * unisolating the previously isolated PHY when switching PHYs.
 1519          * As the above hme_mifinit() call will set the MII drivers in
 1520          * the XIF configuration register accoring to the currently
 1521          * selected media, there should be no window during which the
 1522          * data paths of both transceivers are open at the same time,
 1523          * even if the PHY device drivers use MIIF_NOISOLATE.
 1524          */
 1525         if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
 1526                 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
 1527                         mii_phy_reset(child);
 1528         return (mii_mediachg(sc->sc_mii));
 1529 }
 1530 
 1531 static void
 1532 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1533 {
 1534         struct hme_softc *sc = ifp->if_softc;
 1535 
 1536         HME_LOCK(sc);
 1537         if ((ifp->if_flags & IFF_UP) == 0) {
 1538                 HME_UNLOCK(sc);
 1539                 return;
 1540         }
 1541 
 1542         mii_pollstat(sc->sc_mii);
 1543         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1544         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1545         HME_UNLOCK(sc);
 1546 }
 1547 
 1548 /*
 1549  * Process an ioctl request.
 1550  */
 1551 static int
 1552 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1553 {
 1554         struct hme_softc *sc = ifp->if_softc;
 1555         struct ifreq *ifr = (struct ifreq *)data;
 1556         int error = 0;
 1557 
 1558         switch (cmd) {
 1559         case SIOCSIFFLAGS:
 1560                 HME_LOCK(sc);
 1561                 if ((ifp->if_flags & IFF_UP) == 0 &&
 1562                     (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1563                         /*
 1564                          * If interface is marked down and it is running, then
 1565                          * stop it.
 1566                          */
 1567                         hme_stop(sc);
 1568                 } else if ((ifp->if_flags & IFF_UP) != 0 &&
 1569                            (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1570                         /*
 1571                          * If interface is marked up and it is stopped, then
 1572                          * start it.
 1573                          */
 1574                         hme_init_locked(sc);
 1575                 } else if ((ifp->if_flags & IFF_UP) != 0) {
 1576                         /*
 1577                          * Reset the interface to pick up changes in any other
 1578                          * flags that affect hardware registers.
 1579                          */
 1580                         hme_init_locked(sc);
 1581                 }
 1582                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1583                         sc->sc_csum_features |= CSUM_UDP;
 1584                 else
 1585                         sc->sc_csum_features &= ~CSUM_UDP;
 1586                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1587                         ifp->if_hwassist = sc->sc_csum_features;
 1588 #ifdef HMEDEBUG
 1589                 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
 1590 #endif
 1591                 HME_UNLOCK(sc);
 1592                 break;
 1593 
 1594         case SIOCADDMULTI:
 1595         case SIOCDELMULTI:
 1596                 HME_LOCK(sc);
 1597                 hme_setladrf(sc, 1);
 1598                 HME_UNLOCK(sc);
 1599                 error = 0;
 1600                 break;
 1601         case SIOCGIFMEDIA:
 1602         case SIOCSIFMEDIA:
 1603                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1604                 break;
 1605         case SIOCSIFCAP:
 1606                 HME_LOCK(sc);
 1607                 ifp->if_capenable = ifr->ifr_reqcap;
 1608                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1609                         ifp->if_hwassist = sc->sc_csum_features;
 1610                 else
 1611                         ifp->if_hwassist = 0;
 1612                 HME_UNLOCK(sc);
 1613                 break;
 1614         default:
 1615                 error = ether_ioctl(ifp, cmd, data);
 1616                 break;
 1617         }
 1618 
 1619         return (error);
 1620 }
 1621 
 1622 /*
 1623  * Set up the logical address filter.
 1624  */
 1625 static void
 1626 hme_setladrf(struct hme_softc *sc, int reenable)
 1627 {
 1628         struct ifnet *ifp = sc->sc_ifp;
 1629         struct ifmultiaddr *inm;
 1630         u_int32_t crc;
 1631         u_int32_t hash[4];
 1632         u_int32_t macc;
 1633 
 1634         HME_LOCK_ASSERT(sc, MA_OWNED);
 1635         /* Clear hash table */
 1636         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1637 
 1638         /* Get current RX configuration */
 1639         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1640 
 1641         /*
 1642          * Disable the receiver while changing it's state as the documentation
 1643          * mandates.
 1644          * We then must wait until the bit clears in the register. This should
 1645          * take at most 3.5ms.
 1646          */
 1647         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
 1648                 return;
 1649         /* Disable the hash filter before writing to the filter registers. */
 1650         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1651             HME_MAC_RXCFG_HENABLE, 0))
 1652                 return;
 1653 
 1654         /* make RXMAC really SIMPLEX */
 1655         macc |= HME_MAC_RXCFG_ME;
 1656         if (reenable)
 1657                 macc |= HME_MAC_RXCFG_ENABLE;
 1658         else
 1659                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1660 
 1661         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1662                 /* Turn on promiscuous mode; turn off the hash filter */
 1663                 macc |= HME_MAC_RXCFG_PMISC;
 1664                 macc &= ~HME_MAC_RXCFG_HENABLE;
 1665                 ifp->if_flags |= IFF_ALLMULTI;
 1666                 goto chipit;
 1667         }
 1668 
 1669         /* Turn off promiscuous mode; turn on the hash filter */
 1670         macc &= ~HME_MAC_RXCFG_PMISC;
 1671         macc |= HME_MAC_RXCFG_HENABLE;
 1672 
 1673         /*
 1674          * Set up multicast address filter by passing all multicast addresses
 1675          * through a crc generator, and then using the high order 6 bits as an
 1676          * index into the 64 bit logical address filter.  The high order bit
 1677          * selects the word, while the rest of the bits select the bit within
 1678          * the word.
 1679          */
 1680 
 1681         IF_ADDR_LOCK(sc->sc_ifp);
 1682         TAILQ_FOREACH(inm, &sc->sc_ifp->if_multiaddrs, ifma_link) {
 1683                 if (inm->ifma_addr->sa_family != AF_LINK)
 1684                         continue;
 1685                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1686                     inm->ifma_addr), ETHER_ADDR_LEN);
 1687 
 1688                 /* Just want the 6 most significant bits. */
 1689                 crc >>= 26;
 1690 
 1691                 /* Set the corresponding bit in the filter. */
 1692                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1693         }
 1694         IF_ADDR_UNLOCK(sc->sc_ifp);
 1695 
 1696         ifp->if_flags &= ~IFF_ALLMULTI;
 1697 
 1698 chipit:
 1699         /* Now load the hash table into the chip */
 1700         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1701         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1702         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1703         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1704         hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1705             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
 1706             HME_MAC_RXCFG_ME));
 1707 }

Cache object: a92e60c741bc3a0abda4ee2e468d2aa6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.