The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: src/sys/dev/hme/if_hme.c,v 1.31.2.6 2006/03/24 00:39:23 yongari Exp $");
   42 
   43 /*
   44  * HME Ethernet module driver.
   45  *
   46  * The HME is e.g. part of the PCIO PCI multi function device.
   47  * It supports TX gathering and TX and RX checksum offloading.
   48  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   49  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   50  * are skipped to make sure the header after the ethernet header is aligned on a
   51  * natural boundary, so this ensures minimal wastage in the most common case.
   52  *
   53  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   54  * maximum packet size (this is not verified). Buffers starting on odd
   55  * boundaries must be mapped so that the burst can start on a natural boundary.
   56  *
   57  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   58  * In reality, we can do the same technique for UDP datagram too. However,
   59  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   60  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   61  * can be reactivated by setting special link option link0 with ifconfig(8).
   62  */
   63 #define HME_CSUM_FEATURES       (CSUM_TCP)
   64 #define HMEDEBUG
   65 #define KTR_HME         KTR_CT2         /* XXX */
   66 
   67 #include <sys/param.h>
   68 #include <sys/systm.h>
   69 #include <sys/bus.h>
   70 #include <sys/endian.h>
   71 #include <sys/kernel.h>
   72 #include <sys/module.h>
   73 #include <sys/ktr.h>
   74 #include <sys/mbuf.h>
   75 #include <sys/malloc.h>
   76 #include <sys/socket.h>
   77 #include <sys/sockio.h>
   78 
   79 #include <net/bpf.h>
   80 #include <net/ethernet.h>
   81 #include <net/if.h>
   82 #include <net/if_arp.h>
   83 #include <net/if_dl.h>
   84 #include <net/if_media.h>
   85 #include <net/if_vlan_var.h>
   86 
   87 #include <netinet/in.h>
   88 #include <netinet/in_systm.h>
   89 #include <netinet/ip.h>
   90 #include <netinet/tcp.h>
   91 #include <netinet/udp.h>
   92 
   93 #include <dev/mii/mii.h>
   94 #include <dev/mii/miivar.h>
   95 
   96 #include <machine/bus.h>
   97 
   98 #include <dev/hme/if_hmereg.h>
   99 #include <dev/hme/if_hmevar.h>
  100 
  101 static void     hme_start(struct ifnet *);
  102 static void     hme_start_locked(struct ifnet *);
  103 static void     hme_stop(struct hme_softc *);
  104 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  105 static void     hme_tick(void *);
  106 static void     hme_watchdog(struct ifnet *);
  107 static void     hme_init(void *);
  108 static void     hme_init_locked(void *);
  109 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  110 static int      hme_meminit(struct hme_softc *);
  111 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  112     u_int32_t, u_int32_t);
  113 static void     hme_mifinit(struct hme_softc *);
  114 static void     hme_reset(struct hme_softc *);
  115 static void     hme_setladrf(struct hme_softc *, int);
  116 
  117 static int      hme_mediachange(struct ifnet *);
  118 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  119 
  120 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf *);
  121 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  122 static void     hme_eint(struct hme_softc *, u_int);
  123 static void     hme_rint(struct hme_softc *);
  124 static void     hme_tint(struct hme_softc *);
  125 static void     hme_txcksum(struct mbuf *, u_int32_t *);
  126 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  127 
  128 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  129 static void     hme_rxdma_callback(void *, bus_dma_segment_t *, int,
  130     bus_size_t, int);
  131 static void     hme_txdma_callback(void *, bus_dma_segment_t *, int,
  132     bus_size_t, int);
  133 
  134 devclass_t hme_devclass;
  135 
  136 static int hme_nerr;
  137 
  138 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  139 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  140 
  141 #define HME_SPC_READ_4(spc, sc, offs) \
  142         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  143             (offs))
  144 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  145         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  146             (offs), (v))
  147 
  148 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  149 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  150 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  151 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  152 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  153 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  154 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  155 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  156 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  157 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  158 
  159 #define HME_MAXERR      5
  160 #define HME_WHINE(dev, ...) do {                                        \
  161         if (hme_nerr++ < HME_MAXERR)                                    \
  162                 device_printf(dev, __VA_ARGS__);                        \
  163         if (hme_nerr == HME_MAXERR) {                                   \
  164                 device_printf(dev, "too may errors; not reporting any " \
  165                     "more\n");                                          \
  166         }                                                               \
  167 } while(0)
  168 
  169 /* Support oversized VLAN frames. */
  170 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  171 
  172 int
  173 hme_config(struct hme_softc *sc)
  174 {
  175         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  176         struct mii_softc *child;
  177         bus_size_t size;
  178         int error, rdesc, tdesc, i;
  179 
  180         /*
  181          * HME common initialization.
  182          *
  183          * hme_softc fields that must be initialized by the front-end:
  184          *
  185          * the DMA bus tag:
  186          *      sc_dmatag
  187          *
  188          * the bus handles, tags and offsets (splitted for SBus compatability):
  189          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  190          *      sc_erx{t,h,o}   (Receiver Unit registers)
  191          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  192          *      sc_mac{t,h,o}   (MAC registers)
  193          *      sc_mif{t,h,o}   (Management Interface registers)
  194          *
  195          * the maximum bus burst size:
  196          *      sc_burst
  197          *
  198          */
  199 
  200         HME_LOCK_ASSERT(sc, MA_NOTOWNED);
  201         /* Make sure the chip is stopped. */
  202         HME_LOCK(sc);
  203         hme_stop(sc);
  204         HME_UNLOCK(sc);
  205 
  206         /*
  207          * Allocate DMA capable memory
  208          * Buffer descriptors must be aligned on a 2048 byte boundary;
  209          * take this into account when calculating the size. Note that
  210          * the maximum number of descriptors (256) occupies 2048 bytes,
  211          * so we allocate that much regardless of HME_N*DESC.
  212          */
  213         size =  4096;
  214 
  215         error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  216             BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
  217             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
  218         if (error)
  219                 return (error);
  220 
  221         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  222             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  223             1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
  224             &Giant, &sc->sc_cdmatag);
  225         if (error)
  226                 goto fail_ptag;
  227 
  228         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  229             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  230             HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  231             NULL, NULL, &sc->sc_rdmatag);
  232         if (error)
  233                 goto fail_ctag;
  234 
  235         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  236             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  237             HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  238             NULL, NULL, &sc->sc_tdmatag);
  239         if (error)
  240                 goto fail_rtag;
  241 
  242         /* Allocate control/TX DMA buffer */
  243         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  244             0, &sc->sc_cdmamap);
  245         if (error != 0) {
  246                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  247                 goto fail_ttag;
  248         }
  249 
  250         /* Load the buffer */
  251         sc->sc_rb.rb_dmabase = 0;
  252         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  253              sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  254             sc->sc_rb.rb_dmabase == 0) {
  255                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  256                     error);
  257                 goto fail_free;
  258         }
  259         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  260             sc->sc_rb.rb_dmabase);
  261 
  262         /*
  263          * Prepare the RX descriptors. rdesc serves as marker for the last
  264          * processed descriptor and may be used later on.
  265          */
  266         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  267                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  268                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  269                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  270                 if (error != 0)
  271                         goto fail_rxdesc;
  272         }
  273         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  274             &sc->sc_rb.rb_spare_dmamap);
  275         if (error != 0)
  276                 goto fail_rxdesc;
  277         /* Same for the TX descs. */
  278         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  279                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  280                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  281                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  282                 if (error != 0)
  283                         goto fail_txdesc;
  284         }
  285 
  286         sc->sc_csum_features = HME_CSUM_FEATURES;
  287         /* Initialize ifnet structure. */
  288         ifp->if_softc = sc;
  289         if_initname(ifp, device_get_name(sc->sc_dev),
  290             device_get_unit(sc->sc_dev));
  291         ifp->if_mtu = ETHERMTU;
  292         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  293         ifp->if_start = hme_start;
  294         ifp->if_ioctl = hme_ioctl;
  295         ifp->if_init = hme_init;
  296         ifp->if_watchdog = hme_watchdog;
  297         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  298         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  299         IFQ_SET_READY(&ifp->if_snd);
  300 
  301         HME_LOCK(sc);
  302         hme_mifinit(sc);
  303         HME_UNLOCK(sc);
  304 
  305         if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
  306             hme_mediastatus)) != 0) {
  307                 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
  308                 goto fail_rxdesc;
  309         }
  310         sc->sc_mii = device_get_softc(sc->sc_miibus);
  311 
  312         /*
  313          * Walk along the list of attached MII devices and
  314          * establish an `MII instance' to `phy number'
  315          * mapping. We'll use this mapping in media change
  316          * requests to determine which phy to use to program
  317          * the MIF configuration register.
  318          */
  319         for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
  320              child = LIST_NEXT(child, mii_list)) {
  321                 /*
  322                  * Note: we support just two PHYs: the built-in
  323                  * internal device and an external on the MII
  324                  * connector.
  325                  */
  326                 if (child->mii_phy > 1 || child->mii_inst > 1) {
  327                         device_printf(sc->sc_dev, "cannot accommodate "
  328                             "MII device %s at phy %d, instance %d\n",
  329                             device_get_name(child->mii_dev),
  330                             child->mii_phy, child->mii_inst);
  331                         continue;
  332                 }
  333 
  334                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  335         }
  336 
  337         /* Attach the interface. */
  338         ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
  339 
  340         /*
  341          * Tell the upper layer(s) we support long frames/checksum offloads.
  342          */
  343         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  344         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  345         ifp->if_hwassist |= sc->sc_csum_features;
  346         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  347 
  348         callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
  349         return (0);
  350 
  351 fail_txdesc:
  352         for (i = 0; i < tdesc; i++) {
  353                 bus_dmamap_destroy(sc->sc_tdmatag,
  354                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  355         }
  356         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  357 fail_rxdesc:
  358         for (i = 0; i < rdesc; i++) {
  359                 bus_dmamap_destroy(sc->sc_rdmatag,
  360                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  361         }
  362         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  363 fail_free:
  364         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  365 fail_ttag:
  366         bus_dma_tag_destroy(sc->sc_tdmatag);
  367 fail_rtag:
  368         bus_dma_tag_destroy(sc->sc_rdmatag);
  369 fail_ctag:
  370         bus_dma_tag_destroy(sc->sc_cdmatag);
  371 fail_ptag:
  372         bus_dma_tag_destroy(sc->sc_pdmatag);
  373         return (error);
  374 }
  375 
  376 void
  377 hme_detach(struct hme_softc *sc)
  378 {
  379         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  380         int i;
  381 
  382         HME_LOCK_ASSERT(sc, MA_NOTOWNED);
  383 
  384         ether_ifdetach(ifp);
  385         HME_LOCK(sc);
  386         hme_stop(sc);
  387         HME_UNLOCK(sc);
  388         device_delete_child(sc->sc_dev, sc->sc_miibus);
  389 
  390         for (i = 0; i < HME_NTXQ; i++) {
  391                 bus_dmamap_destroy(sc->sc_tdmatag,
  392                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  393         }
  394         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  395         for (i = 0; i < HME_NRXDESC; i++) {
  396                 bus_dmamap_destroy(sc->sc_rdmatag,
  397                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  398         }
  399         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
  400         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
  401         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  402         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  403         bus_dma_tag_destroy(sc->sc_tdmatag);
  404         bus_dma_tag_destroy(sc->sc_rdmatag);
  405         bus_dma_tag_destroy(sc->sc_cdmatag);
  406         bus_dma_tag_destroy(sc->sc_pdmatag);
  407 }
  408 
  409 void
  410 hme_suspend(struct hme_softc *sc)
  411 {
  412 
  413         HME_LOCK(sc);
  414         hme_stop(sc);
  415         HME_UNLOCK(sc);
  416 }
  417 
  418 void
  419 hme_resume(struct hme_softc *sc)
  420 {
  421         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  422 
  423         HME_LOCK(sc);
  424         if ((ifp->if_flags & IFF_UP) != 0)
  425                 hme_init_locked(ifp);
  426         HME_UNLOCK(sc);
  427 }
  428 
  429 static void
  430 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  431 {
  432         struct hme_softc *sc = (struct hme_softc *)xsc;
  433 
  434         if (error != 0)
  435                 return;
  436         KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
  437         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  438 }
  439 
  440 static void
  441 hme_tick(void *arg)
  442 {
  443         struct hme_softc *sc = arg;
  444         int s;
  445 
  446         s = splnet();
  447         mii_tick(sc->sc_mii);
  448         splx(s);
  449 
  450         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  451 }
  452 
  453 static void
  454 hme_reset(struct hme_softc *sc)
  455 {
  456         int s;
  457 
  458         HME_LOCK(sc);
  459         s = splnet();
  460         hme_init_locked(sc);
  461         splx(s);
  462         HME_UNLOCK(sc);
  463 }
  464 
  465 static void
  466 hme_stop(struct hme_softc *sc)
  467 {
  468         u_int32_t v;
  469         int n;
  470 
  471         callout_stop(&sc->sc_tick_ch);
  472 
  473         /* Reset transmitter and receiver */
  474         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  475             HME_SEB_RESET_ERX);
  476 
  477         for (n = 0; n < 20; n++) {
  478                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  479                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  480                         return;
  481                 DELAY(20);
  482         }
  483 
  484         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  485 }
  486 
  487 static void
  488 hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
  489     bus_size_t totsize, int error)
  490 {
  491         bus_addr_t *a = xsc;
  492 
  493         KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
  494         if (error != 0)
  495                 return;
  496         *a = segs[0].ds_addr;
  497 }
  498 
  499 /*
  500  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  501  * ring for subsequent use.
  502  */
  503 static __inline void
  504 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  505 {
  506 
  507         /*
  508          * Dropped a packet, reinitialize the descriptor and turn the
  509          * ownership back to the hardware.
  510          */
  511         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
  512             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
  513 }
  514 
  515 static int
  516 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  517 {
  518         struct hme_rxdesc *rd;
  519         struct mbuf *m;
  520         bus_addr_t ba;
  521         bus_dmamap_t map;
  522         uintptr_t b;
  523         int a, unmap;
  524 
  525         rd = &sc->sc_rb.rb_rxdesc[ri];
  526         unmap = rd->hrx_m != NULL;
  527         if (unmap && keepold) {
  528                 /*
  529                  * Reinitialize the descriptor flags, as they may have been
  530                  * altered by the hardware.
  531                  */
  532                 hme_discard_rxbuf(sc, ri);
  533                 return (0);
  534         }
  535         if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
  536                 return (ENOBUFS);
  537         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  538         b = mtod(m, uintptr_t);
  539         /*
  540          * Required alignment boundary. At least 16 is needed, but since
  541          * the mapping must be done in a way that a burst can start on a
  542          * natural boundary we might need to extend this.
  543          */
  544         a = max(HME_MINRXALIGN, sc->sc_burst);
  545         /*
  546          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  547          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  548          * alignment of the header adjacent to the ethernet header, which
  549          * should be sufficient in all cases. Nevertheless, this second-guesses
  550          * ALIGN().
  551          */
  552         m_adj(m, roundup2(b, a) - b);
  553         if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  554             m, hme_rxdma_callback, &ba, 0) != 0) {
  555                 m_freem(m);
  556                 return (ENOBUFS);
  557         }
  558         if (unmap) {
  559                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  560                     BUS_DMASYNC_POSTREAD);
  561                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  562         }
  563         map = rd->hrx_dmamap;
  564         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  565         sc->sc_rb.rb_spare_dmamap = map;
  566         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  567         HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
  568         rd->hrx_m = m;
  569         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
  570             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  571         return (0);
  572 }
  573 
  574 static int
  575 hme_meminit(struct hme_softc *sc)
  576 {
  577         struct hme_ring *hr = &sc->sc_rb;
  578         struct hme_txdesc *td;
  579         bus_addr_t dma;
  580         caddr_t p;
  581         unsigned int i;
  582         int error;
  583 
  584         p = hr->rb_membase;
  585         dma = hr->rb_dmabase;
  586 
  587         /*
  588          * Allocate transmit descriptors
  589          */
  590         hr->rb_txd = p;
  591         hr->rb_txddma = dma;
  592         p += HME_NTXDESC * HME_XD_SIZE;
  593         dma += HME_NTXDESC * HME_XD_SIZE;
  594         /* We have reserved descriptor space until the next 2048 byte boundary.*/
  595         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  596         p = (caddr_t)roundup((u_long)p, 2048);
  597 
  598         /*
  599          * Allocate receive descriptors
  600          */
  601         hr->rb_rxd = p;
  602         hr->rb_rxddma = dma;
  603         p += HME_NRXDESC * HME_XD_SIZE;
  604         dma += HME_NRXDESC * HME_XD_SIZE;
  605         /* Again move forward to the next 2048 byte boundary.*/
  606         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  607         p = (caddr_t)roundup((u_long)p, 2048);
  608 
  609         /*
  610          * Initialize transmit buffer descriptors
  611          */
  612         for (i = 0; i < HME_NTXDESC; i++) {
  613                 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
  614                 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
  615         }
  616 
  617         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  618         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  619         for (i = 0; i < HME_NTXQ; i++) {
  620                 td = &sc->sc_rb.rb_txdesc[i];
  621                 if (td->htx_m != NULL) {
  622                         m_freem(td->htx_m);
  623                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  624                             BUS_DMASYNC_POSTWRITE);
  625                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  626                         td->htx_m = NULL;
  627                 }
  628                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  629         }
  630 
  631         /*
  632          * Initialize receive buffer descriptors
  633          */
  634         for (i = 0; i < HME_NRXDESC; i++) {
  635                 error = hme_add_rxbuf(sc, i, 1);
  636                 if (error != 0)
  637                         return (error);
  638         }
  639 
  640         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
  641         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
  642 
  643         hr->rb_tdhead = hr->rb_tdtail = 0;
  644         hr->rb_td_nbusy = 0;
  645         hr->rb_rdtail = 0;
  646         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  647             hr->rb_txddma);
  648         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  649             hr->rb_rxddma);
  650         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  651             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  652         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  653             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  654         return (0);
  655 }
  656 
  657 static int
  658 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  659     u_int32_t clr, u_int32_t set)
  660 {
  661         int i = 0;
  662 
  663         val &= ~clr;
  664         val |= set;
  665         HME_MAC_WRITE_4(sc, reg, val);
  666         if (clr == 0 && set == 0)
  667                 return (1);     /* just write, no bits to wait for */
  668         do {
  669                 DELAY(100);
  670                 i++;
  671                 val = HME_MAC_READ_4(sc, reg);
  672                 if (i > 40) {
  673                         /* After 3.5ms, we should have been done. */
  674                         device_printf(sc->sc_dev, "timeout while writing to "
  675                             "MAC configuration register\n");
  676                         return (0);
  677                 }
  678         } while ((val & clr) != 0 && (val & set) != set);
  679         return (1);
  680 }
  681 
  682 /*
  683  * Initialization of interface; set up initialization block
  684  * and transmit/receive descriptor rings.
  685  */
  686 static void
  687 hme_init(void *xsc)
  688 {
  689         struct hme_softc *sc = (struct hme_softc *)xsc;
  690 
  691         HME_LOCK(sc);
  692         hme_init_locked(sc);
  693         HME_UNLOCK(sc);
  694 }
  695 
  696 static void
  697 hme_init_locked(void *xsc)
  698 {
  699         struct hme_softc *sc = (struct hme_softc *)xsc;
  700         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  701         u_int8_t *ea;
  702         u_int32_t n, v;
  703 
  704         HME_LOCK_ASSERT(sc, MA_OWNED);
  705         /*
  706          * Initialization sequence. The numbered steps below correspond
  707          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  708          * Channel Engine manual (part of the PCIO manual).
  709          * See also the STP2002-STQ document from Sun Microsystems.
  710          */
  711 
  712         /* step 1 & 2. Reset the Ethernet Channel */
  713         hme_stop(sc);
  714 
  715         /* Re-initialize the MIF */
  716         hme_mifinit(sc);
  717 
  718 #if 0
  719         /* Mask all MIF interrupts, just in case */
  720         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  721 #endif
  722 
  723         /* step 3. Setup data structures in host memory */
  724         if (hme_meminit(sc) != 0) {
  725                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  726                 return;
  727         }
  728 
  729         /* step 4. TX MAC registers & counters */
  730         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  731         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  732         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  733         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  734         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  735 
  736         /* Load station MAC address */
  737         ea = sc->sc_arpcom.ac_enaddr;
  738         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  739         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  740         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  741 
  742         /*
  743          * Init seed for backoff
  744          * (source suggested by manual: low 10 bits of MAC address)
  745          */
  746         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  747         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  748 
  749 
  750         /* Note: Accepting power-on default for other MAC registers here.. */
  751 
  752         /* step 5. RX MAC registers & counters */
  753         hme_setladrf(sc, 0);
  754 
  755         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  756         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  757         /* Transmit Descriptor ring size: in increments of 16 */
  758         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  759 
  760         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  761         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  762 
  763         /* step 8. Global Configuration & Interrupt Mask */
  764         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  765             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  766                 HME_SEB_STAT_HOSTTOTX |
  767                 HME_SEB_STAT_RXTOHOST |
  768                 HME_SEB_STAT_TXALL |
  769                 HME_SEB_STAT_TXPERR |
  770                 HME_SEB_STAT_RCNTEXP |
  771                 HME_SEB_STAT_ALL_ERRORS ));
  772 
  773         switch (sc->sc_burst) {
  774         default:
  775                 v = 0;
  776                 break;
  777         case 16:
  778                 v = HME_SEB_CFG_BURST16;
  779                 break;
  780         case 32:
  781                 v = HME_SEB_CFG_BURST32;
  782                 break;
  783         case 64:
  784                 v = HME_SEB_CFG_BURST64;
  785                 break;
  786         }
  787         /*
  788          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  789          * Allowing 64bit transfers breaks TX checksum offload as well.
  790          * Don't know this comes from hardware bug or driver's DMAing
  791          * scheme.
  792          *
  793          * if (sc->sc_pci == 0)
  794          *      v |= HME_SEB_CFG_64BIT;
  795          */
  796         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  797 
  798         /* step 9. ETX Configuration: use mostly default values */
  799 
  800         /* Enable DMA */
  801         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  802         v |= HME_ETX_CFG_DMAENABLE;
  803         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  804 
  805         /* step 10. ERX Configuration */
  806         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  807 
  808         /* Encode Receive Descriptor ring size: four possible values */
  809         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  810         switch (HME_NRXDESC) {
  811         case 32:
  812                 v |= HME_ERX_CFG_RINGSIZE32;
  813                 break;
  814         case 64:
  815                 v |= HME_ERX_CFG_RINGSIZE64;
  816                 break;
  817         case 128:
  818                 v |= HME_ERX_CFG_RINGSIZE128;
  819                 break;
  820         case 256:
  821                 v |= HME_ERX_CFG_RINGSIZE256;
  822                 break;
  823         default:
  824                 printf("hme: invalid Receive Descriptor ring size\n");
  825                 break;
  826         }
  827 
  828         /* Enable DMA, fix RX first byte offset. */
  829         v &= ~HME_ERX_CFG_FBO_MASK;
  830         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  831         /* RX TCP/UDP checksum offset */
  832         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  833         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  834         v |= n;
  835         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  836         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  837 
  838         /* step 11. XIF Configuration */
  839         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  840         v |= HME_MAC_XIF_OE;
  841         /* If an external transceiver is connected, enable its MII drivers */
  842         if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
  843                 v |= HME_MAC_XIF_MIIENABLE;
  844         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  845         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  846 
  847         /* step 12. RX_MAC Configuration Register */
  848         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  849         v |= HME_MAC_RXCFG_ENABLE;
  850         v &= ~(HME_MAC_RXCFG_DCRCS);
  851         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  852         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  853 
  854         /* step 13. TX_MAC Configuration Register */
  855         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  856         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  857         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  858         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  859 
  860         /* step 14. Issue Transmit Pending command */
  861 
  862 #ifdef HMEDEBUG
  863         /* Debug: double-check. */
  864         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  865             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  866             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  867             HME_ERX_READ_4(sc, HME_ERXI_RING),
  868             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  869         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  870             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  871             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  872             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  873         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  874             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  875             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  876 #endif
  877 
  878         /* Set the current media. */
  879         /*
  880          * HME_UNLOCK(sc);      
  881          * mii_mediachg(sc->sc_mii);
  882          * HME_LOCK(sc);        
  883          */
  884 
  885         /* Start the one second timer. */
  886         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  887 
  888         ifp->if_flags |= IFF_RUNNING;
  889         ifp->if_flags &= ~IFF_OACTIVE;
  890         ifp->if_timer = 0;
  891         hme_start_locked(ifp);
  892 }
  893 
  894 struct hme_txdma_arg {
  895         struct hme_softc        *hta_sc;
  896         struct hme_txdesc       *hta_htx;
  897         int                     hta_ndescs;
  898 };
  899 
  900 /*
  901  * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
  902  * are readable from the nearest burst boundary on (i.e. potentially before
  903  * ds_addr) to the first boundary beyond the end. This is usually a safe
  904  * assumption to make, but is not documented.
  905  */
  906 static void
  907 hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
  908     bus_size_t totsz, int error)
  909 {
  910         struct hme_txdma_arg *ta = xsc;
  911         struct hme_txdesc *htx;
  912         bus_size_t len = 0;
  913         caddr_t txd;
  914         u_int32_t flags = 0;
  915         int i, tdhead, pci;
  916 
  917         if (error != 0)
  918                 return;
  919 
  920         tdhead = ta->hta_sc->sc_rb.rb_tdhead;
  921         pci = ta->hta_sc->sc_pci;
  922         txd = ta->hta_sc->sc_rb.rb_txd;
  923         htx = ta->hta_htx;
  924 
  925         if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
  926                 ta->hta_ndescs = -1;
  927                 return;
  928         }
  929         ta->hta_ndescs = nsegs;
  930 
  931         for (i = 0; i < nsegs; i++) {
  932                 if (segs[i].ds_len == 0)
  933                         continue;
  934 
  935                 /* Fill the ring entry. */
  936                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
  937                 if (len == 0)
  938                         flags |= HME_XD_SOP;
  939                 if (len + segs[i].ds_len == totsz)
  940                         flags |= HME_XD_EOP;
  941                 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
  942                     "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
  943                     (u_int)segs[i].ds_addr);
  944                 HME_XD_SETFLAGS(pci, txd, tdhead, flags);
  945                 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
  946 
  947                 ta->hta_sc->sc_rb.rb_td_nbusy++;
  948                 htx->htx_lastdesc = tdhead;
  949                 tdhead = (tdhead + 1) % HME_NTXDESC;
  950                 len += segs[i].ds_len;
  951         }
  952         ta->hta_sc->sc_rb.rb_tdhead = tdhead;
  953         KASSERT((flags & HME_XD_EOP) != 0,
  954             ("hme_txdma_callback: missed end of packet!"));
  955 }
  956 
  957 /* TX TCP/UDP checksum */
  958 static void
  959 hme_txcksum(struct mbuf *m, u_int32_t *cflags)
  960 {
  961         struct ip *ip;
  962         u_int32_t offset, offset2;
  963         caddr_t p;
  964 
  965         for(; m && m->m_len == 0; m = m->m_next)
  966                 ;
  967         if (m == NULL || m->m_len < ETHER_HDR_LEN) {
  968                 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
  969                 return; /* checksum will be corrupted */
  970         }
  971         if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
  972                 if (m->m_len != ETHER_HDR_LEN) {
  973                         printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
  974                         return; /* checksum will be corrupted */
  975                 }
  976                 /* XXX */
  977                 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
  978                         ;
  979                 if (m == NULL)
  980                         return; /* checksum will be corrupted */
  981                 ip = mtod(m, struct ip *);
  982         } else {
  983                 p = mtod(m, caddr_t);
  984                 p += ETHER_HDR_LEN;
  985                 ip = (struct ip *)p;
  986         }
  987         offset2 = m->m_pkthdr.csum_data;
  988         offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
  989         *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
  990         *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT); 
  991         *cflags |= HME_XD_TXCKSUM;
  992 }
  993 
  994 /*
  995  * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
  996  * start the transmission.
  997  * Returns 0 on success, -1 if there were not enough free descriptors to map
  998  * the packet, or an errno otherwise.
  999  */
 1000 static int
 1001 hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
 1002 {
 1003         struct hme_txdma_arg cba;
 1004         struct hme_txdesc *td;
 1005         int error, si, ri;
 1006         u_int32_t flags, cflags = 0;
 1007 
 1008         si = sc->sc_rb.rb_tdhead;
 1009         if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
 1010                 return (-1);
 1011         if ((m0->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
 1012                 hme_txcksum(m0, &cflags);
 1013         cba.hta_sc = sc;
 1014         cba.hta_htx = td;
 1015         if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
 1016              m0, hme_txdma_callback, &cba, 0)) != 0)
 1017                 goto fail;
 1018         if (cba.hta_ndescs == -1) {
 1019                 error = -1;
 1020                 goto fail;
 1021         }
 1022         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
 1023             BUS_DMASYNC_PREWRITE);
 1024 
 1025         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
 1026         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
 1027         td->htx_m = m0;
 1028 
 1029         /* Turn descriptor ownership to the hme, back to forth. */
 1030         ri = sc->sc_rb.rb_tdhead;
 1031         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
 1032             ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
 1033         do {
 1034                 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1035                 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
 1036                     HME_XD_OWN | cflags;
 1037                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
 1038                     ri, si, flags);
 1039                 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
 1040         } while (ri != si);
 1041 
 1042         /* start the transmission. */
 1043         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1044         return (0);
 1045 fail:
 1046         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
 1047         return (error);
 1048 }
 1049 
 1050 /*
 1051  * Pass a packet to the higher levels.
 1052  */
 1053 static void
 1054 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1055 {
 1056         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1057         struct mbuf *m;
 1058 
 1059         if (len <= sizeof(struct ether_header) ||
 1060             len > HME_MAX_FRAMESIZE) {
 1061 #ifdef HMEDEBUG
 1062                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1063                     len);
 1064 #endif
 1065                 ifp->if_ierrors++;
 1066                 hme_discard_rxbuf(sc, ix);
 1067                 return;
 1068         }
 1069 
 1070         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1071         CTR1(KTR_HME, "hme_read: len %d", len);
 1072 
 1073         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1074                 /*
 1075                  * hme_add_rxbuf will leave the old buffer in the ring until
 1076                  * it is sure that a new buffer can be mapped. If it can not,
 1077                  * drop the packet, but leave the interface up.
 1078                  */
 1079                 ifp->if_iqdrops++;
 1080                 hme_discard_rxbuf(sc, ix);
 1081                 return;
 1082         }
 1083 
 1084         ifp->if_ipackets++;
 1085 
 1086         m->m_pkthdr.rcvif = ifp;
 1087         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1088         m_adj(m, HME_RXOFFS);
 1089         /* RX TCP/UDP checksum */
 1090         if (ifp->if_capenable & IFCAP_RXCSUM)
 1091                 hme_rxcksum(m, flags);
 1092         /* Pass the packet up. */
 1093         HME_UNLOCK(sc);
 1094         (*ifp->if_input)(ifp, m);
 1095         HME_LOCK(sc);
 1096 }
 1097 
 1098 static void
 1099 hme_start(struct ifnet *ifp)
 1100 {
 1101         struct hme_softc *sc = ifp->if_softc;
 1102 
 1103         HME_LOCK(sc);
 1104         hme_start_locked(ifp);
 1105         HME_UNLOCK(sc);
 1106 }
 1107 
 1108 static void
 1109 hme_start_locked(struct ifnet *ifp)
 1110 {
 1111         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1112         struct mbuf *m;
 1113         int error, enq = 0;
 1114 
 1115         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
 1116                 return;
 1117 
 1118         error = 0;
 1119         for (;;) {
 1120                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1121                 if (m == NULL)
 1122                         break;
 1123 
 1124                 error = hme_load_txmbuf(sc, m);
 1125                 if (error == -1) {
 1126                         ifp->if_flags |= IFF_OACTIVE;
 1127                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1128                         break;
 1129                 } else if (error > 0) {
 1130                         printf("hme_start: error %d while loading mbuf\n",
 1131                             error);
 1132                 } else {
 1133                         enq = 1;
 1134                         BPF_MTAP(ifp, m);
 1135                 }
 1136         }
 1137 
 1138         if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
 1139                 ifp->if_flags |= IFF_OACTIVE;
 1140         /* Set watchdog timer if a packet was queued */
 1141         if (enq) {
 1142                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1143                     BUS_DMASYNC_PREWRITE);
 1144                 ifp->if_timer = 5;
 1145         }
 1146 }
 1147 
 1148 /*
 1149  * Transmit interrupt.
 1150  */
 1151 static void
 1152 hme_tint(struct hme_softc *sc)
 1153 {
 1154         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1155         struct hme_txdesc *htx;
 1156         unsigned int ri, txflags;
 1157 
 1158         /*
 1159          * Unload collision counters
 1160          */
 1161         ifp->if_collisions +=
 1162                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
 1163                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
 1164                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
 1165                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
 1166 
 1167         /*
 1168          * then clear the hardware counters.
 1169          */
 1170         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
 1171         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
 1172         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
 1173         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
 1174 
 1175         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1176         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1177         /* Fetch current position in the transmit ring */
 1178         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1179                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1180                         CTR0(KTR_HME, "hme_tint: not busy!");
 1181                         break;
 1182                 }
 1183 
 1184                 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
 1185                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1186 
 1187                 if ((txflags & HME_XD_OWN) != 0)
 1188                         break;
 1189 
 1190                 CTR0(KTR_HME, "hme_tint: not owned");
 1191                 --sc->sc_rb.rb_td_nbusy;
 1192                 ifp->if_flags &= ~IFF_OACTIVE;
 1193 
 1194                 /* Complete packet transmitted? */
 1195                 if ((txflags & HME_XD_EOP) == 0)
 1196                         continue;
 1197 
 1198                 KASSERT(htx->htx_lastdesc == ri,
 1199                     ("hme_tint: ring indices skewed: %d != %d!",
 1200                      htx->htx_lastdesc, ri));
 1201                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1202                     BUS_DMASYNC_POSTWRITE);
 1203                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1204 
 1205                 ifp->if_opackets++;
 1206                 m_freem(htx->htx_m);
 1207                 htx->htx_m = NULL;
 1208                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1209                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1210                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1211         }
 1212         /* Turn off watchdog */
 1213         if (sc->sc_rb.rb_td_nbusy == 0)
 1214                 ifp->if_timer = 0;
 1215 
 1216         /* Update ring */
 1217         sc->sc_rb.rb_tdtail = ri;
 1218 
 1219         hme_start_locked(ifp);
 1220 
 1221         if (sc->sc_rb.rb_td_nbusy == 0)
 1222                 ifp->if_timer = 0;
 1223 }
 1224 
 1225 /*
 1226  * RX TCP/UDP checksum 
 1227  */
 1228 static void
 1229 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1230 {
 1231         struct ether_header *eh;
 1232         struct ip *ip;
 1233         struct udphdr *uh;
 1234         int32_t hlen, len, pktlen;
 1235         u_int16_t cksum, *opts;
 1236         u_int32_t temp32;
 1237 
 1238         pktlen = m->m_pkthdr.len;
 1239         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1240                 return;
 1241         eh = mtod(m, struct ether_header *);
 1242         if (eh->ether_type != htons(ETHERTYPE_IP))
 1243                 return;
 1244         ip = (struct ip *)(eh + 1);
 1245         if (ip->ip_v != IPVERSION)
 1246                 return;
 1247 
 1248         hlen = ip->ip_hl << 2;
 1249         pktlen -= sizeof(struct ether_header);
 1250         if (hlen < sizeof(struct ip))
 1251                 return;
 1252         if (ntohs(ip->ip_len) < hlen)
 1253                 return;
 1254         if (ntohs(ip->ip_len) != pktlen)
 1255                 return;
 1256         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1257                 return; /* can't handle fragmented packet */
 1258 
 1259         switch (ip->ip_p) {
 1260         case IPPROTO_TCP:
 1261                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1262                         return;
 1263                 break;
 1264         case IPPROTO_UDP:
 1265                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1266                         return;
 1267                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1268                 if (uh->uh_sum == 0)
 1269                         return; /* no checksum */
 1270                 break;
 1271         default:
 1272                 return;
 1273         }
 1274 
 1275         cksum = ~(flags & HME_XD_RXCKSUM);
 1276         /* checksum fixup for IP options */
 1277         len = hlen - sizeof(struct ip);
 1278         if (len > 0) {
 1279                 opts = (u_int16_t *)(ip + 1);
 1280                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1281                         temp32 = cksum - *opts;
 1282                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1283                         cksum = temp32 & 65535;
 1284                 }
 1285         }
 1286         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1287         m->m_pkthdr.csum_data = cksum;
 1288 }
 1289 
 1290 /*
 1291  * Receive interrupt.
 1292  */
 1293 static void
 1294 hme_rint(struct hme_softc *sc)
 1295 {
 1296         caddr_t xdr = sc->sc_rb.rb_rxd;
 1297         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1298         unsigned int ri, len;
 1299         int progress = 0;
 1300         u_int32_t flags;
 1301 
 1302         /*
 1303          * Process all buffers with valid data.
 1304          */
 1305         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1306         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1307                 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
 1308                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1309                 if ((flags & HME_XD_OWN) != 0)
 1310                         break;
 1311 
 1312                 progress++;
 1313                 if ((flags & HME_XD_OFL) != 0) {
 1314                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1315                             "flags=0x%x\n", ri, flags);
 1316                         ifp->if_ierrors++;
 1317                         hme_discard_rxbuf(sc, ri);
 1318                 } else {
 1319                         len = HME_XD_DECODE_RSIZE(flags);
 1320                         hme_read(sc, ri, len, flags);
 1321                 }
 1322         }
 1323         if (progress) {
 1324                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1325                     BUS_DMASYNC_PREWRITE);
 1326         }
 1327         sc->sc_rb.rb_rdtail = ri;
 1328 }
 1329 
 1330 static void
 1331 hme_eint(struct hme_softc *sc, u_int status)
 1332 {
 1333 
 1334         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1335                 device_printf(sc->sc_dev, "XXXlink status changed\n");
 1336                 return;
 1337         }
 1338 
 1339         HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1340 }
 1341 
 1342 void
 1343 hme_intr(void *v)
 1344 {
 1345         struct hme_softc *sc = (struct hme_softc *)v;
 1346         u_int32_t status;
 1347 
 1348         HME_LOCK(sc);
 1349         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1350         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1351 
 1352         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1353                 hme_eint(sc, status);
 1354 
 1355         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1356                 hme_tint(sc);
 1357 
 1358         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1359                 hme_rint(sc);
 1360         HME_UNLOCK(sc);
 1361 }
 1362 
 1363 
 1364 static void
 1365 hme_watchdog(struct ifnet *ifp)
 1366 {
 1367         struct hme_softc *sc = ifp->if_softc;
 1368 #ifdef HMEDEBUG
 1369         u_int32_t status;
 1370 #endif
 1371 
 1372         HME_LOCK(sc);
 1373 #ifdef HMEDEBUG
 1374         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1375         CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
 1376 #endif
 1377         device_printf(sc->sc_dev, "device timeout\n");
 1378         ++ifp->if_oerrors;
 1379         HME_UNLOCK(sc);
 1380 
 1381         hme_reset(sc);
 1382 }
 1383 
 1384 /*
 1385  * Initialize the MII Management Interface
 1386  */
 1387 static void
 1388 hme_mifinit(struct hme_softc *sc)
 1389 {
 1390         u_int32_t v;
 1391 
 1392         HME_LOCK_ASSERT(sc, MA_OWNED);
 1393 
 1394         /* Configure the MIF in frame mode */
 1395         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1396         v &= ~HME_MIF_CFG_BBMODE;
 1397         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1398 }
 1399 
 1400 /*
 1401  * MII interface
 1402  */
 1403 int
 1404 hme_mii_readreg(device_t dev, int phy, int reg)
 1405 {
 1406         struct hme_softc *sc = device_get_softc(dev);
 1407         int n;
 1408         u_int32_t v;
 1409 
 1410         HME_LOCK(sc);
 1411         /* Select the desired PHY in the MIF configuration register */
 1412         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1413         /* Clear PHY select bit */
 1414         v &= ~HME_MIF_CFG_PHY;
 1415         if (phy == HME_PHYAD_EXTERNAL)
 1416                 /* Set PHY select bit to get at external device */
 1417                 v |= HME_MIF_CFG_PHY;
 1418         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1419 
 1420         /* Construct the frame command */
 1421         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1422             HME_MIF_FO_TAMSB |
 1423             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1424             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1425             (reg << HME_MIF_FO_REGAD_SHIFT);
 1426 
 1427         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1428         for (n = 0; n < 100; n++) {
 1429                 DELAY(1);
 1430                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1431                 if (v & HME_MIF_FO_TALSB) {
 1432                         HME_UNLOCK(sc);
 1433                         return (v & HME_MIF_FO_DATA);
 1434                 }
 1435         }
 1436 
 1437         device_printf(sc->sc_dev, "mii_read timeout\n");
 1438         HME_UNLOCK(sc);
 1439         return (0);
 1440 }
 1441 
 1442 int
 1443 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1444 {
 1445         struct hme_softc *sc = device_get_softc(dev);
 1446         int n;
 1447         u_int32_t v;
 1448 
 1449         HME_LOCK(sc);
 1450         /* Select the desired PHY in the MIF configuration register */
 1451         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1452         /* Clear PHY select bit */
 1453         v &= ~HME_MIF_CFG_PHY;
 1454         if (phy == HME_PHYAD_EXTERNAL)
 1455                 /* Set PHY select bit to get at external device */
 1456                 v |= HME_MIF_CFG_PHY;
 1457         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1458 
 1459         /* Construct the frame command */
 1460         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1461             HME_MIF_FO_TAMSB                            |
 1462             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1463             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1464             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1465             (val & HME_MIF_FO_DATA);
 1466 
 1467         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1468         for (n = 0; n < 100; n++) {
 1469                 DELAY(1);
 1470                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1471                 if (v & HME_MIF_FO_TALSB) {
 1472                         HME_UNLOCK(sc);
 1473                         return (1);
 1474                 }
 1475         }
 1476 
 1477         device_printf(sc->sc_dev, "mii_write timeout\n");
 1478         HME_UNLOCK(sc);
 1479         return (0);
 1480 }
 1481 
 1482 void
 1483 hme_mii_statchg(device_t dev)
 1484 {
 1485         struct hme_softc *sc = device_get_softc(dev);
 1486         int instance;
 1487         int phy;
 1488         u_int32_t v;
 1489 
 1490         HME_LOCK(sc);
 1491         instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
 1492         phy = sc->sc_phys[instance];
 1493 #ifdef HMEDEBUG
 1494         if (sc->sc_debug)
 1495                 printf("hme_mii_statchg: status change: phy = %d\n", phy);
 1496 #endif
 1497 
 1498         /* Select the current PHY in the MIF configuration register */
 1499         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1500         v &= ~HME_MIF_CFG_PHY;
 1501         if (phy == HME_PHYAD_EXTERNAL)
 1502                 v |= HME_MIF_CFG_PHY;
 1503         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1504 
 1505         /* Set the MAC Full Duplex bit appropriately */
 1506         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1507         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) {
 1508                 HME_UNLOCK(sc);
 1509                 return;
 1510         }
 1511         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1512                 v |= HME_MAC_TXCFG_FULLDPLX;
 1513         else
 1514                 v &= ~HME_MAC_TXCFG_FULLDPLX;
 1515         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
 1516         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) {
 1517                 HME_UNLOCK(sc);
 1518                 return;
 1519         }
 1520         HME_UNLOCK(sc);
 1521 }
 1522 
 1523 static int
 1524 hme_mediachange(struct ifnet *ifp)
 1525 {
 1526         struct hme_softc *sc = ifp->if_softc;
 1527 
 1528         return (mii_mediachg(sc->sc_mii));
 1529 }
 1530 
 1531 static void
 1532 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1533 {
 1534         struct hme_softc *sc = ifp->if_softc;
 1535 
 1536         HME_LOCK(sc);
 1537         if ((ifp->if_flags & IFF_UP) == 0) {
 1538                 HME_UNLOCK(sc);
 1539                 return;
 1540         }
 1541 
 1542         HME_UNLOCK(sc);
 1543         mii_pollstat(sc->sc_mii);
 1544         HME_LOCK(sc);
 1545         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1546         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1547         HME_UNLOCK(sc);
 1548 }
 1549 
 1550 /*
 1551  * Process an ioctl request.
 1552  */
 1553 static int
 1554 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1555 {
 1556         struct hme_softc *sc = ifp->if_softc;
 1557         struct ifreq *ifr = (struct ifreq *)data;
 1558         int s, error = 0;
 1559 
 1560         HME_LOCK(sc);
 1561         s = splnet();
 1562 
 1563         switch (cmd) {
 1564         case SIOCSIFFLAGS:
 1565                 if ((ifp->if_flags & IFF_UP) == 0 &&
 1566                     (ifp->if_flags & IFF_RUNNING) != 0) {
 1567                         /*
 1568                          * If interface is marked down and it is running, then
 1569                          * stop it.
 1570                          */
 1571                         hme_stop(sc);
 1572                         ifp->if_flags &= ~IFF_RUNNING;
 1573                 } else if ((ifp->if_flags & IFF_UP) != 0 &&
 1574                            (ifp->if_flags & IFF_RUNNING) == 0) {
 1575                         /*
 1576                          * If interface is marked up and it is stopped, then
 1577                          * start it.
 1578                          */
 1579                         hme_init_locked(sc);
 1580                 } else if ((ifp->if_flags & IFF_UP) != 0) {
 1581                         /*
 1582                          * Reset the interface to pick up changes in any other
 1583                          * flags that affect hardware registers.
 1584                          */
 1585                         hme_init_locked(sc);
 1586                 }
 1587                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1588                         sc->sc_csum_features |= CSUM_UDP;
 1589                 else
 1590                         sc->sc_csum_features &= ~CSUM_UDP;
 1591                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1592                         ifp->if_hwassist = sc->sc_csum_features;
 1593 #ifdef HMEDEBUG
 1594                 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
 1595 #endif
 1596                 break;
 1597 
 1598         case SIOCADDMULTI:
 1599         case SIOCDELMULTI:
 1600                 hme_setladrf(sc, 1);
 1601                 error = 0;
 1602                 break;
 1603         case SIOCGIFMEDIA:
 1604         case SIOCSIFMEDIA:
 1605                 HME_UNLOCK(sc);
 1606                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1607                 HME_LOCK(sc);
 1608                 break;
 1609         case SIOCSIFCAP:
 1610                 ifp->if_capenable = ifr->ifr_reqcap;
 1611                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1612                         ifp->if_hwassist = sc->sc_csum_features;
 1613                 else
 1614                         ifp->if_hwassist = 0;
 1615                 break;
 1616         default:
 1617                 HME_UNLOCK(sc);
 1618                 error = ether_ioctl(ifp, cmd, data);
 1619                 HME_LOCK(sc);
 1620                 break;
 1621         }
 1622 
 1623         splx(s);
 1624         HME_UNLOCK(sc);
 1625         return (error);
 1626 }
 1627 
 1628 /*
 1629  * Set up the logical address filter.
 1630  */
 1631 static void
 1632 hme_setladrf(struct hme_softc *sc, int reenable)
 1633 {
 1634         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1635         struct ifmultiaddr *inm;
 1636         u_int32_t crc;
 1637         u_int32_t hash[4];
 1638         u_int32_t macc;
 1639 
 1640         HME_LOCK_ASSERT(sc, MA_OWNED);
 1641         /* Clear hash table */
 1642         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1643 
 1644         /* Get current RX configuration */
 1645         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1646 
 1647         /*
 1648          * Disable the receiver while changing it's state as the documentation
 1649          * mandates.
 1650          * We then must wait until the bit clears in the register. This should
 1651          * take at most 3.5ms.
 1652          */
 1653         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
 1654                 return;
 1655         /* Disable the hash filter before writing to the filter registers. */
 1656         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1657             HME_MAC_RXCFG_HENABLE, 0))
 1658                 return;
 1659 
 1660         /* make RXMAC really SIMPLEX */
 1661         macc |= HME_MAC_RXCFG_ME;
 1662         if (reenable)
 1663                 macc |= HME_MAC_RXCFG_ENABLE;
 1664         else
 1665                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1666 
 1667         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1668                 /* Turn on promiscuous mode; turn off the hash filter */
 1669                 macc |= HME_MAC_RXCFG_PMISC;
 1670                 macc &= ~HME_MAC_RXCFG_HENABLE;
 1671                 ifp->if_flags |= IFF_ALLMULTI;
 1672                 goto chipit;
 1673         }
 1674 
 1675         /* Turn off promiscuous mode; turn on the hash filter */
 1676         macc &= ~HME_MAC_RXCFG_PMISC;
 1677         macc |= HME_MAC_RXCFG_HENABLE;
 1678 
 1679         /*
 1680          * Set up multicast address filter by passing all multicast addresses
 1681          * through a crc generator, and then using the high order 6 bits as an
 1682          * index into the 64 bit logical address filter.  The high order bit
 1683          * selects the word, while the rest of the bits select the bit within
 1684          * the word.
 1685          */
 1686 
 1687         IF_ADDR_LOCK(sc->sc_ifp);
 1688         TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
 1689                 if (inm->ifma_addr->sa_family != AF_LINK)
 1690                         continue;
 1691                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1692                     inm->ifma_addr), ETHER_ADDR_LEN);
 1693 
 1694                 /* Just want the 6 most significant bits. */
 1695                 crc >>= 26;
 1696 
 1697                 /* Set the corresponding bit in the filter. */
 1698                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1699         }
 1700         IF_ADDR_UNLOCK(sc->sc_ifp);
 1701 
 1702         ifp->if_flags &= ~IFF_ALLMULTI;
 1703 
 1704 chipit:
 1705         /* Now load the hash table into the chip */
 1706         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1707         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1708         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1709         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1710         hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1711             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
 1712             HME_MAC_RXCFG_ME));
 1713 }

Cache object: e72bceb2f021e50854806b7c2e56aa6e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.