The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/gem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: gem.c,v 1.38.2.1 2005/06/21 21:28:37 tron Exp $ */
    2 
    3 /*
    4  *
    5  * Copyright (C) 2001 Eduardo Horvath.
    6  * All rights reserved.
    7  *
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  */
   31 
   32 /*
   33  * Driver for Sun GEM ethernet controllers.
   34  */
   35 
   36 #include <sys/cdefs.h>
   37 __KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.38.2.1 2005/06/21 21:28:37 tron Exp $");
   38 
   39 #include "opt_inet.h"
   40 #include "bpfilter.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/callout.h>
   45 #include <sys/mbuf.h>
   46 #include <sys/syslog.h>
   47 #include <sys/malloc.h>
   48 #include <sys/kernel.h>
   49 #include <sys/socket.h>
   50 #include <sys/ioctl.h>
   51 #include <sys/errno.h>
   52 #include <sys/device.h>
   53 
   54 #include <machine/endian.h>
   55 
   56 #include <uvm/uvm_extern.h>
   57 
   58 #include <net/if.h>
   59 #include <net/if_dl.h>
   60 #include <net/if_media.h>
   61 #include <net/if_ether.h>
   62 
   63 #ifdef INET
   64 #include <netinet/in.h>
   65 #include <netinet/in_systm.h>
   66 #include <netinet/in_var.h>
   67 #include <netinet/ip.h>
   68 #include <netinet/tcp.h>
   69 #include <netinet/udp.h>
   70 #endif
   71 
   72 #if NBPFILTER > 0
   73 #include <net/bpf.h>
   74 #endif
   75 
   76 #include <machine/bus.h>
   77 #include <machine/intr.h>
   78 
   79 #include <dev/mii/mii.h>
   80 #include <dev/mii/miivar.h>
   81 #include <dev/mii/mii_bitbang.h>
   82 
   83 #include <dev/ic/gemreg.h>
   84 #include <dev/ic/gemvar.h>
   85 
   86 #define TRIES   10000
   87 
   88 void            gem_start(struct ifnet *);
   89 void            gem_stop(struct ifnet *, int);
   90 int             gem_ioctl(struct ifnet *, u_long, caddr_t);
   91 void            gem_tick(void *);
   92 void            gem_watchdog(struct ifnet *);
   93 void            gem_shutdown(void *);
   94 int             gem_init(struct ifnet *);
   95 void            gem_init_regs(struct gem_softc *sc);
   96 static int      gem_ringsize(int sz);
   97 int             gem_meminit(struct gem_softc *);
   98 void            gem_mifinit(struct gem_softc *);
   99 void            gem_reset(struct gem_softc *);
  100 int             gem_reset_rx(struct gem_softc *sc);
  101 int             gem_reset_tx(struct gem_softc *sc);
  102 int             gem_disable_rx(struct gem_softc *sc);
  103 int             gem_disable_tx(struct gem_softc *sc);
  104 void            gem_rxdrain(struct gem_softc *sc);
  105 int             gem_add_rxbuf(struct gem_softc *sc, int idx);
  106 void            gem_setladrf(struct gem_softc *);
  107 
  108 /* MII methods & callbacks */
  109 static int      gem_mii_readreg(struct device *, int, int);
  110 static void     gem_mii_writereg(struct device *, int, int, int);
  111 static void     gem_mii_statchg(struct device *);
  112 
  113 int             gem_mediachange(struct ifnet *);
  114 void            gem_mediastatus(struct ifnet *, struct ifmediareq *);
  115 
  116 struct mbuf     *gem_get(struct gem_softc *, int, int);
  117 int             gem_put(struct gem_softc *, int, struct mbuf *);
  118 void            gem_read(struct gem_softc *, int, int);
  119 int             gem_eint(struct gem_softc *, u_int);
  120 int             gem_rint(struct gem_softc *);
  121 int             gem_tint(struct gem_softc *);
  122 void            gem_power(int, void *);
  123 
  124 #ifdef GEM_DEBUG
  125 #define DPRINTF(sc, x)  if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
  126                                 printf x
  127 #else
  128 #define DPRINTF(sc, x)  /* nothing */
  129 #endif
  130 
  131 #define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header))
  132 
  133 
  134 /*
  135  * gem_attach:
  136  *
  137  *      Attach a Gem interface to the system.
  138  */
  139 void
  140 gem_attach(sc, enaddr)
  141         struct gem_softc *sc;
  142         const uint8_t *enaddr;
  143 {
  144         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  145         struct mii_data *mii = &sc->sc_mii;
  146         struct mii_softc *child;
  147         struct ifmedia_entry *ifm;
  148         int i, error;
  149         u_int32_t v;
  150         char *nullbuf;
  151 
  152         /* Make sure the chip is stopped. */
  153         ifp->if_softc = sc;
  154         gem_reset(sc);
  155 
  156         /*
  157          * Allocate the control data structures, and create and load the
  158          * DMA map for it. gem_control_data is 9216 bytes, we have space for
  159          * the padding buffer in the bus_dmamem_alloc()'d memory.
  160          */
  161         if ((error = bus_dmamem_alloc(sc->sc_dmatag,
  162             sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE,
  163             0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) {
  164                 aprint_error(
  165                    "%s: unable to allocate control data, error = %d\n",
  166                     sc->sc_dev.dv_xname, error);
  167                 goto fail_0;
  168         }
  169 
  170 /* XXX should map this in with correct endianness */
  171         if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
  172             sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
  173             BUS_DMA_COHERENT)) != 0) {
  174                 aprint_error("%s: unable to map control data, error = %d\n",
  175                     sc->sc_dev.dv_xname, error);
  176                 goto fail_1;
  177         }
  178 
  179         nullbuf =
  180             (caddr_t)sc->sc_control_data + sizeof(struct gem_control_data);
  181 
  182         if ((error = bus_dmamap_create(sc->sc_dmatag,
  183             sizeof(struct gem_control_data), 1,
  184             sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
  185                 aprint_error("%s: unable to create control data DMA map, "
  186                     "error = %d\n", sc->sc_dev.dv_xname, error);
  187                 goto fail_2;
  188         }
  189 
  190         if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
  191             sc->sc_control_data, sizeof(struct gem_control_data), NULL,
  192             0)) != 0) {
  193                 aprint_error(
  194                     "%s: unable to load control data DMA map, error = %d\n",
  195                     sc->sc_dev.dv_xname, error);
  196                 goto fail_3;
  197         }
  198 
  199         memset(nullbuf, 0, ETHER_MIN_TX);
  200         if ((error = bus_dmamap_create(sc->sc_dmatag,
  201             ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) {
  202                 aprint_error("%s: unable to create padding DMA map, "
  203                     "error = %d\n", sc->sc_dev.dv_xname, error);
  204                 goto fail_4;
  205         }
  206 
  207         if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap,
  208             nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) {
  209                 aprint_error(
  210                     "%s: unable to load padding DMA map, error = %d\n",
  211                     sc->sc_dev.dv_xname, error);
  212                 goto fail_5;
  213         }
  214 
  215         bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX,
  216             BUS_DMASYNC_PREWRITE);
  217 
  218         /*
  219          * Initialize the transmit job descriptors.
  220          */
  221         SIMPLEQ_INIT(&sc->sc_txfreeq);
  222         SIMPLEQ_INIT(&sc->sc_txdirtyq);
  223 
  224         /*
  225          * Create the transmit buffer DMA maps.
  226          */
  227         for (i = 0; i < GEM_TXQUEUELEN; i++) {
  228                 struct gem_txsoft *txs;
  229 
  230                 txs = &sc->sc_txsoft[i];
  231                 txs->txs_mbuf = NULL;
  232                 if ((error = bus_dmamap_create(sc->sc_dmatag,
  233                     ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS,
  234                     ETHER_MAX_LEN_JUMBO, 0, 0,
  235                     &txs->txs_dmamap)) != 0) {
  236                         aprint_error("%s: unable to create tx DMA map %d, "
  237                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  238                         goto fail_6;
  239                 }
  240                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  241         }
  242 
  243         /*
  244          * Create the receive buffer DMA maps.
  245          */
  246         for (i = 0; i < GEM_NRXDESC; i++) {
  247                 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
  248                     MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  249                         aprint_error("%s: unable to create rx DMA map %d, "
  250                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  251                         goto fail_7;
  252                 }
  253                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  254         }
  255 
  256         /*
  257          * From this point forward, the attachment cannot fail.  A failure
  258          * before this point releases all resources that may have been
  259          * allocated.
  260          */
  261 
  262         /* Announce ourselves. */
  263         aprint_normal("%s: Ethernet address %s", sc->sc_dev.dv_xname,
  264             ether_sprintf(enaddr));
  265 
  266         /* Get RX FIFO size */
  267         sc->sc_rxfifosize = 64 *
  268             bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE);
  269         aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024);
  270 
  271         /* Get TX FIFO size */
  272         v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE);
  273         aprint_normal(", %uKB TX fifo\n", v / 16);
  274 
  275         /* Initialize ifnet structure. */
  276         strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
  277         ifp->if_softc = sc;
  278         ifp->if_flags =
  279             IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
  280         ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx
  281                                 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
  282         ifp->if_start = gem_start;
  283         ifp->if_ioctl = gem_ioctl;
  284         ifp->if_watchdog = gem_watchdog;
  285         ifp->if_stop = gem_stop;
  286         ifp->if_init = gem_init;
  287         IFQ_SET_READY(&ifp->if_snd);
  288 
  289         /* Initialize ifmedia structures and MII info */
  290         mii->mii_ifp = ifp;
  291         mii->mii_readreg = gem_mii_readreg;
  292         mii->mii_writereg = gem_mii_writereg;
  293         mii->mii_statchg = gem_mii_statchg;
  294 
  295         ifmedia_init(&mii->mii_media, IFM_IMASK, gem_mediachange, gem_mediastatus);
  296 
  297         gem_mifinit(sc);
  298 
  299         mii_attach(&sc->sc_dev, mii, 0xffffffff,
  300                         MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
  301 
  302         child = LIST_FIRST(&mii->mii_phys);
  303         if (child == NULL) {
  304                 /* No PHY attached */
  305                 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
  306                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
  307         } else {
  308                 /*
  309                  * Walk along the list of attached MII devices and
  310                  * establish an `MII instance' to `phy number'
  311                  * mapping. We'll use this mapping in media change
  312                  * requests to determine which phy to use to program
  313                  * the MIF configuration register.
  314                  */
  315                 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
  316                         /*
  317                          * Note: we support just two PHYs: the built-in
  318                          * internal device and an external on the MII
  319                          * connector.
  320                          */
  321                         if (child->mii_phy > 1 || child->mii_inst > 1) {
  322                                 aprint_error(
  323                                     "%s: cannot accomodate MII device %s"
  324                                        " at phy %d, instance %d\n",
  325                                        sc->sc_dev.dv_xname,
  326                                        child->mii_dev.dv_xname,
  327                                        child->mii_phy, child->mii_inst);
  328                                 continue;
  329                         }
  330 
  331                         sc->sc_phys[child->mii_inst] = child->mii_phy;
  332                 }
  333 
  334                 /*
  335                  * Now select and activate the PHY we will use.
  336                  *
  337                  * The order of preference is External (MDI1),
  338                  * Internal (MDI0), Serial Link (no MII).
  339                  */
  340                 if (sc->sc_phys[1]) {
  341 #ifdef DEBUG
  342                         aprint_debug("using external phy\n");
  343 #endif
  344                         sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
  345                 } else {
  346 #ifdef DEBUG
  347                         aprint_debug("using internal phy\n");
  348 #endif
  349                         sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
  350                 }
  351                 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG,
  352                         sc->sc_mif_config);
  353 
  354                 /*
  355                  * XXX - we can really do the following ONLY if the
  356                  * phy indeed has the auto negotiation capability!!
  357                  */
  358                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
  359         }
  360 
  361         /*
  362          * If we support GigE media, we support jumbo frames too.
  363          * Unless we are Apple.
  364          */
  365         TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
  366                 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
  367                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
  368                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
  369                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
  370                         if (sc->sc_variant != GEM_APPLE_GMAC)
  371                                 sc->sc_ethercom.ec_capabilities
  372                                     |= ETHERCAP_JUMBO_MTU;
  373 
  374                         sc->sc_flags |= GEM_GIGABIT;
  375                         break;
  376                 }
  377         }
  378 
  379         /* claim 802.1q capability */
  380         sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
  381 
  382         /* Attach the interface. */
  383         if_attach(ifp);
  384         ether_ifattach(ifp, enaddr);
  385 
  386         sc->sc_sh = shutdownhook_establish(gem_shutdown, sc);
  387         if (sc->sc_sh == NULL)
  388                 panic("gem_config: can't establish shutdownhook");
  389 
  390 #if NRND > 0
  391         rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
  392                           RND_TYPE_NET, 0);
  393 #endif
  394 
  395         evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
  396             NULL, sc->sc_dev.dv_xname, "interrupts");
  397 #ifdef GEM_COUNTERS
  398         evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR,
  399             &sc->sc_ev_intr, sc->sc_dev.dv_xname, "tx interrupts");
  400         evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR,
  401             &sc->sc_ev_intr, sc->sc_dev.dv_xname, "rx interrupts");
  402         evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR,
  403             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx ring full");
  404         evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR,
  405             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx malloc failure");
  406         evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR,
  407             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 0desc");
  408         evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR,
  409             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 1desc");
  410         evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR,
  411             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 2desc");
  412         evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR,
  413             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 3desc");
  414         evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR,
  415             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >3desc");
  416         evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR,
  417             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >7desc");
  418         evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR,
  419             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >15desc");
  420         evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR,
  421             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >31desc");
  422         evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR,
  423             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >63desc");
  424 #endif
  425 
  426 #if notyet
  427         /*
  428          * Add a suspend hook to make sure we come back up after a
  429          * resume.
  430          */
  431         sc->sc_powerhook = powerhook_establish(gem_power, sc);
  432         if (sc->sc_powerhook == NULL)
  433                 aprint_error("%s: WARNING: unable to establish power hook\n",
  434                     sc->sc_dev.dv_xname);
  435 #endif
  436 
  437         callout_init(&sc->sc_tick_ch);
  438         return;
  439 
  440         /*
  441          * Free any resources we've allocated during the failed attach
  442          * attempt.  Do this in reverse order and fall through.
  443          */
  444  fail_7:
  445         for (i = 0; i < GEM_NRXDESC; i++) {
  446                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  447                         bus_dmamap_destroy(sc->sc_dmatag,
  448                             sc->sc_rxsoft[i].rxs_dmamap);
  449         }
  450  fail_6:
  451         for (i = 0; i < GEM_TXQUEUELEN; i++) {
  452                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
  453                         bus_dmamap_destroy(sc->sc_dmatag,
  454                             sc->sc_txsoft[i].txs_dmamap);
  455         }
  456         bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
  457  fail_5:
  458         bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap);
  459  fail_4:
  460         bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)nullbuf, ETHER_MIN_TX);
  461  fail_3:
  462         bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
  463  fail_2:
  464         bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
  465             sizeof(struct gem_control_data));
  466  fail_1:
  467         bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
  468  fail_0:
  469         return;
  470 }
  471 
  472 
  473 void
  474 gem_tick(arg)
  475         void *arg;
  476 {
  477         struct gem_softc *sc = arg;
  478         int s;
  479 
  480         s = splnet();
  481         mii_tick(&sc->sc_mii);
  482         splx(s);
  483 
  484         callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
  485 
  486 }
  487 
  488 void
  489 gem_reset(sc)
  490         struct gem_softc *sc;
  491 {
  492         bus_space_tag_t t = sc->sc_bustag;
  493         bus_space_handle_t h = sc->sc_h;
  494         int i;
  495         int s;
  496 
  497         s = splnet();
  498         DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
  499         gem_reset_rx(sc);
  500         gem_reset_tx(sc);
  501 
  502         /* Do a full reset */
  503         bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
  504         for (i=TRIES; i--; delay(100))
  505                 if ((bus_space_read_4(t, h, GEM_RESET) &
  506                         (GEM_RESET_RX|GEM_RESET_TX)) == 0)
  507                         break;
  508         if ((bus_space_read_4(t, h, GEM_RESET) &
  509                 (GEM_RESET_RX|GEM_RESET_TX)) != 0) {
  510                 printf("%s: cannot reset device\n",
  511                         sc->sc_dev.dv_xname);
  512         }
  513         splx(s);
  514 }
  515 
  516 
  517 /*
  518  * gem_rxdrain:
  519  *
  520  *      Drain the receive queue.
  521  */
  522 void
  523 gem_rxdrain(struct gem_softc *sc)
  524 {
  525         struct gem_rxsoft *rxs;
  526         int i;
  527 
  528         for (i = 0; i < GEM_NRXDESC; i++) {
  529                 rxs = &sc->sc_rxsoft[i];
  530                 if (rxs->rxs_mbuf != NULL) {
  531                         bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
  532                         m_freem(rxs->rxs_mbuf);
  533                         rxs->rxs_mbuf = NULL;
  534                 }
  535         }
  536 }
  537 
  538 /*
  539  * Reset the whole thing.
  540  */
  541 void
  542 gem_stop(struct ifnet *ifp, int disable)
  543 {
  544         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
  545         struct gem_txsoft *txs;
  546 
  547         DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
  548 
  549         callout_stop(&sc->sc_tick_ch);
  550         mii_down(&sc->sc_mii);
  551 
  552         /* XXX - Should we reset these instead? */
  553         gem_disable_rx(sc);
  554         gem_disable_tx(sc);
  555 
  556         /*
  557          * Release any queued transmit buffers.
  558          */
  559         while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
  560                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
  561                 if (txs->txs_mbuf != NULL) {
  562                         bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
  563                         m_freem(txs->txs_mbuf);
  564                         txs->txs_mbuf = NULL;
  565                 }
  566                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  567         }
  568 
  569         if (disable) {
  570                 gem_rxdrain(sc);
  571         }
  572 
  573         /*
  574          * Mark the interface down and cancel the watchdog timer.
  575          */
  576         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
  577         ifp->if_timer = 0;
  578 }
  579 
  580 
  581 /*
  582  * Reset the receiver
  583  */
  584 int
  585 gem_reset_rx(struct gem_softc *sc)
  586 {
  587         bus_space_tag_t t = sc->sc_bustag;
  588         bus_space_handle_t h = sc->sc_h;
  589         int i;
  590 
  591 
  592         /*
  593          * Resetting while DMA is in progress can cause a bus hang, so we
  594          * disable DMA first.
  595          */
  596         gem_disable_rx(sc);
  597         bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
  598         /* Wait till it finishes */
  599         for (i=TRIES; i--; delay(100))
  600                 if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) == 0)
  601                         break;
  602         if ((bus_space_read_4(t, h, GEM_RX_CONFIG) & 1) != 0)
  603                 printf("%s: cannot disable read DMA\n",
  604                         sc->sc_dev.dv_xname);
  605 
  606         /* Wait 5ms extra. */
  607         delay(5000);
  608 
  609         /* Finally, reset the ERX */
  610         bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX);
  611         /* Wait till it finishes */
  612         for (i=TRIES; i--; delay(100))
  613                 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) == 0)
  614                         break;
  615         if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_RX) != 0) {
  616                 printf("%s: cannot reset receiver\n",
  617                         sc->sc_dev.dv_xname);
  618                 return (1);
  619         }
  620         return (0);
  621 }
  622 
  623 
  624 /*
  625  * Reset the transmitter
  626  */
  627 int
  628 gem_reset_tx(struct gem_softc *sc)
  629 {
  630         bus_space_tag_t t = sc->sc_bustag;
  631         bus_space_handle_t h = sc->sc_h;
  632         int i;
  633 
  634         /*
  635          * Resetting while DMA is in progress can cause a bus hang, so we
  636          * disable DMA first.
  637          */
  638         gem_disable_tx(sc);
  639         bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
  640         /* Wait till it finishes */
  641         for (i=TRIES; i--; delay(100))
  642                 if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) == 0)
  643                         break;
  644         if ((bus_space_read_4(t, h, GEM_TX_CONFIG) & 1) != 0)
  645                 printf("%s: cannot disable read DMA\n",
  646                         sc->sc_dev.dv_xname);
  647 
  648         /* Wait 5ms extra. */
  649         delay(5000);
  650 
  651         /* Finally, reset the ETX */
  652         bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX);
  653         /* Wait till it finishes */
  654         for (i=TRIES; i--; delay(100))
  655                 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0)
  656                         break;
  657         if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) != 0) {
  658                 printf("%s: cannot reset receiver\n",
  659                         sc->sc_dev.dv_xname);
  660                 return (1);
  661         }
  662         return (0);
  663 }
  664 
  665 /*
  666  * disable receiver.
  667  */
  668 int
  669 gem_disable_rx(struct gem_softc *sc)
  670 {
  671         bus_space_tag_t t = sc->sc_bustag;
  672         bus_space_handle_t h = sc->sc_h;
  673         int i;
  674         u_int32_t cfg;
  675 
  676         /* Flip the enable bit */
  677         cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
  678         cfg &= ~GEM_MAC_RX_ENABLE;
  679         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
  680 
  681         /* Wait for it to finish */
  682         for (i=TRIES; i--; delay(100))
  683                 if ((bus_space_read_4(t, h, GEM_MAC_RX_CONFIG) &
  684                         GEM_MAC_RX_ENABLE) == 0)
  685                         return (0);
  686         return (1);
  687 }
  688 
  689 /*
  690  * disable transmitter.
  691  */
  692 int
  693 gem_disable_tx(struct gem_softc *sc)
  694 {
  695         bus_space_tag_t t = sc->sc_bustag;
  696         bus_space_handle_t h = sc->sc_h;
  697         int i;
  698         u_int32_t cfg;
  699 
  700         /* Flip the enable bit */
  701         cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
  702         cfg &= ~GEM_MAC_TX_ENABLE;
  703         bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
  704 
  705         /* Wait for it to finish */
  706         for (i=TRIES; i--; delay(100))
  707                 if ((bus_space_read_4(t, h, GEM_MAC_TX_CONFIG) &
  708                         GEM_MAC_TX_ENABLE) == 0)
  709                         return (0);
  710         return (1);
  711 }
  712 
  713 /*
  714  * Initialize interface.
  715  */
  716 int
  717 gem_meminit(struct gem_softc *sc)
  718 {
  719         struct gem_rxsoft *rxs;
  720         int i, error;
  721 
  722         /*
  723          * Initialize the transmit descriptor ring.
  724          */
  725         memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
  726         for (i = 0; i < GEM_NTXDESC; i++) {
  727                 sc->sc_txdescs[i].gd_flags = 0;
  728                 sc->sc_txdescs[i].gd_addr = 0;
  729         }
  730         GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
  731             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  732         sc->sc_txfree = GEM_NTXDESC-1;
  733         sc->sc_txnext = 0;
  734         sc->sc_txwin = 0;
  735 
  736         /*
  737          * Initialize the receive descriptor and receive job
  738          * descriptor rings.
  739          */
  740         for (i = 0; i < GEM_NRXDESC; i++) {
  741                 rxs = &sc->sc_rxsoft[i];
  742                 if (rxs->rxs_mbuf == NULL) {
  743                         if ((error = gem_add_rxbuf(sc, i)) != 0) {
  744                                 printf("%s: unable to allocate or map rx "
  745                                     "buffer %d, error = %d\n",
  746                                     sc->sc_dev.dv_xname, i, error);
  747                                 /*
  748                                  * XXX Should attempt to run with fewer receive
  749                                  * XXX buffers instead of just failing.
  750                                  */
  751                                 gem_rxdrain(sc);
  752                                 return (1);
  753                         }
  754                 } else
  755                         GEM_INIT_RXDESC(sc, i);
  756         }
  757         sc->sc_rxptr = 0;
  758 
  759         return (0);
  760 }
  761 
  762 static int
  763 gem_ringsize(int sz)
  764 {
  765         switch (sz) {
  766         case 32:
  767                 return GEM_RING_SZ_32;
  768         case 64:
  769                 return GEM_RING_SZ_64;
  770         case 128:
  771                 return GEM_RING_SZ_128;
  772         case 256:
  773                 return GEM_RING_SZ_256;
  774         case 512:
  775                 return GEM_RING_SZ_512;
  776         case 1024:
  777                 return GEM_RING_SZ_1024;
  778         case 2048:
  779                 return GEM_RING_SZ_2048;
  780         case 4096:
  781                 return GEM_RING_SZ_4096;
  782         case 8192:
  783                 return GEM_RING_SZ_8192;
  784         default:
  785                 printf("gem: invalid Receive Descriptor ring size %d\n", sz);
  786                 return GEM_RING_SZ_32;
  787         }
  788 }
  789 
  790 /*
  791  * Initialization of interface; set up initialization block
  792  * and transmit/receive descriptor rings.
  793  */
  794 int
  795 gem_init(struct ifnet *ifp)
  796 {
  797         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
  798         bus_space_tag_t t = sc->sc_bustag;
  799         bus_space_handle_t h = sc->sc_h;
  800         int s;
  801         u_int max_frame_size;
  802         u_int32_t v;
  803 
  804         s = splnet();
  805 
  806         DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
  807         /*
  808          * Initialization sequence. The numbered steps below correspond
  809          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  810          * Channel Engine manual (part of the PCIO manual).
  811          * See also the STP2002-STQ document from Sun Microsystems.
  812          */
  813 
  814         /* step 1 & 2. Reset the Ethernet Channel */
  815         gem_stop(ifp, 0);
  816         gem_reset(sc);
  817         DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
  818 
  819         /* Re-initialize the MIF */
  820         gem_mifinit(sc);
  821 
  822         /* Call MI reset function if any */
  823         if (sc->sc_hwreset)
  824                 (*sc->sc_hwreset)(sc);
  825 
  826         /* step 3. Setup data structures in host memory */
  827         gem_meminit(sc);
  828 
  829         /* step 4. TX MAC registers & counters */
  830         gem_init_regs(sc);
  831         max_frame_size = max(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU);
  832         max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN;
  833         if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
  834                 max_frame_size += ETHER_VLAN_ENCAP_LEN;
  835         bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
  836             max_frame_size|/* burst size */(0x2000<<16));
  837 
  838         /* step 5. RX MAC registers & counters */
  839         gem_setladrf(sc);
  840 
  841         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  842         /* NOTE: we use only 32-bit DMA addresses here. */
  843         bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0);
  844         bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
  845 
  846         bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
  847         bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
  848 
  849         /* step 8. Global Configuration & Interrupt Mask */
  850         bus_space_write_4(t, h, GEM_INTMASK,
  851                       ~(GEM_INTR_TX_INTME|
  852                         GEM_INTR_TX_EMPTY|
  853                         GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
  854                         GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
  855                         GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
  856                         GEM_INTR_BERR));
  857         bus_space_write_4(t, h, GEM_MAC_RX_MASK,
  858                         GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
  859         bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
  860         bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
  861 
  862         /* step 9. ETX Configuration: use mostly default values */
  863 
  864         /* Enable DMA */
  865         v = gem_ringsize(GEM_NTXDESC /*XXX*/);
  866         bus_space_write_4(t, h, GEM_TX_CONFIG,
  867                 v|GEM_TX_CONFIG_TXDMA_EN|
  868                 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
  869         bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext);
  870 
  871         /* step 10. ERX Configuration */
  872 
  873         /* Encode Receive Descriptor ring size: four possible values */
  874         v = gem_ringsize(GEM_NRXDESC /*XXX*/);
  875 
  876         /* Set receive h/w checksum offset */
  877 #ifdef INET
  878         v |= (ETHER_HDR_LEN + sizeof(struct ip) +
  879               ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
  880                 ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT;
  881 #endif
  882 
  883         /* Enable DMA */
  884         bus_space_write_4(t, h, GEM_RX_CONFIG,
  885                 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
  886                 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN);
  887 
  888         /*
  889          * The following value is for an OFF Threshold of about 3/4 full
  890          * and an ON Threshold of 1/4 full.
  891          */
  892         bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
  893              (3 * sc->sc_rxfifosize / 256) |
  894              (   (sc->sc_rxfifosize / 256) << 12));
  895         bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6);
  896 
  897         /* step 11. Configure Media */
  898         mii_mediachg(&sc->sc_mii);
  899 
  900 /* XXXX Serial link needs a whole different setup. */
  901 
  902 
  903         /* step 12. RX_MAC Configuration Register */
  904         v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
  905         v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
  906         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
  907 
  908         /* step 14. Issue Transmit Pending command */
  909 
  910         /* Call MI initialization function if any */
  911         if (sc->sc_hwinit)
  912                 (*sc->sc_hwinit)(sc);
  913 
  914 
  915         /* step 15.  Give the reciever a swift kick */
  916         bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
  917 
  918         /* Start the one second timer. */
  919         callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
  920 
  921         ifp->if_flags |= IFF_RUNNING;
  922         ifp->if_flags &= ~IFF_OACTIVE;
  923         ifp->if_timer = 0;
  924         splx(s);
  925 
  926         return (0);
  927 }
  928 
  929 void
  930 gem_init_regs(struct gem_softc *sc)
  931 {
  932         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  933         bus_space_tag_t t = sc->sc_bustag;
  934         bus_space_handle_t h = sc->sc_h;
  935         const u_char *laddr = LLADDR(ifp->if_sadl);
  936         u_int32_t v;
  937 
  938         /* These regs are not cleared on reset */
  939         if (!sc->sc_inited) {
  940 
  941                 /* Wooo.  Magic values. */
  942                 bus_space_write_4(t, h, GEM_MAC_IPG0, 0);
  943                 bus_space_write_4(t, h, GEM_MAC_IPG1, 8);
  944                 bus_space_write_4(t, h, GEM_MAC_IPG2, 4);
  945 
  946                 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
  947                 /* Max frame and max burst size */
  948                 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
  949                      ETHER_MAX_LEN | (0x2000<<16));
  950 
  951                 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7);
  952                 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4);
  953                 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
  954                 /* Dunno.... */
  955                 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
  956                 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
  957                     ((laddr[5]<<8)|laddr[4])&0x3ff);
  958 
  959                 /* Secondary MAC addr set to 0:0:0:0:0:0 */
  960                 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
  961                 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
  962                 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
  963 
  964                 /* MAC control addr set to 01:80:c2:00:00:01 */
  965                 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
  966                 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
  967                 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
  968 
  969                 /* MAC filter addr set to 0:0:0:0:0:0 */
  970                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
  971                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
  972                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
  973 
  974                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
  975                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
  976 
  977                 sc->sc_inited = 1;
  978         }
  979 
  980         /* Counters need to be zeroed */
  981         bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
  982         bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
  983         bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
  984         bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
  985         bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
  986         bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
  987         bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
  988         bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
  989         bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
  990         bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
  991         bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
  992 
  993         /* Un-pause stuff */
  994 #if 0
  995         bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
  996 #else
  997         bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0);
  998 #endif
  999 
 1000         /*
 1001          * Set the station address.
 1002          */
 1003         bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
 1004         bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
 1005         bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
 1006 
 1007 #if 0
 1008         if (sc->sc_variant != APPLE_GMAC)
 1009                 return;
 1010 #endif
 1011 
 1012         /*
 1013          * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
 1014          */
 1015         sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
 1016         v = GEM_MAC_XIF_TX_MII_ENA;
 1017         if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
 1018                 v |= GEM_MAC_XIF_FDPLX_LED;
 1019                 if (sc->sc_flags & GEM_GIGABIT)
 1020                         v |= GEM_MAC_XIF_GMII_MODE;
 1021         }
 1022         bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
 1023 }
 1024 
 1025 void
 1026 gem_start(ifp)
 1027         struct ifnet *ifp;
 1028 {
 1029         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
 1030         struct mbuf *m0, *m;
 1031         struct gem_txsoft *txs, *last_txs;
 1032         bus_dmamap_t dmamap;
 1033         int error, firsttx, nexttx, lasttx = -1, ofree, seg;
 1034         uint64_t flags = 0;
 1035 
 1036         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
 1037                 return;
 1038 
 1039         /*
 1040          * Remember the previous number of free descriptors and
 1041          * the first descriptor we'll use.
 1042          */
 1043         ofree = sc->sc_txfree;
 1044         firsttx = sc->sc_txnext;
 1045 
 1046         DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n",
 1047             sc->sc_dev.dv_xname, ofree, firsttx));
 1048 
 1049         /*
 1050          * Loop through the send queue, setting up transmit descriptors
 1051          * until we drain the queue, or use up all available transmit
 1052          * descriptors.
 1053          */
 1054         while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
 1055                sc->sc_txfree != 0) {
 1056                 /*
 1057                  * Grab a packet off the queue.
 1058                  */
 1059                 IFQ_POLL(&ifp->if_snd, m0);
 1060                 if (m0 == NULL)
 1061                         break;
 1062                 m = NULL;
 1063 
 1064                 dmamap = txs->txs_dmamap;
 1065 
 1066                 /*
 1067                  * Load the DMA map.  If this fails, the packet either
 1068                  * didn't fit in the alloted number of segments, or we were
 1069                  * short on resources.  In this case, we'll copy and try
 1070                  * again.
 1071                  */
 1072                 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0,
 1073                       BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0 ||
 1074                       (m0->m_pkthdr.len < ETHER_MIN_TX &&
 1075                        dmamap->dm_nsegs == GEM_NTXSEGS)) {
 1076                         if (m0->m_pkthdr.len > MCLBYTES) {
 1077                                 printf("%s: unable to allocate jumbo Tx "
 1078                                     "cluster\n", sc->sc_dev.dv_xname);
 1079                                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1080                                 m_freem(m0);
 1081                                 continue;
 1082                         }
 1083                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1084                         if (m == NULL) {
 1085                                 printf("%s: unable to allocate Tx mbuf\n",
 1086                                     sc->sc_dev.dv_xname);
 1087                                 break;
 1088                         }
 1089                         MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
 1090                         if (m0->m_pkthdr.len > MHLEN) {
 1091                                 MCLGET(m, M_DONTWAIT);
 1092                                 if ((m->m_flags & M_EXT) == 0) {
 1093                                         printf("%s: unable to allocate Tx "
 1094                                             "cluster\n", sc->sc_dev.dv_xname);
 1095                                         m_freem(m);
 1096                                         break;
 1097                                 }
 1098                         }
 1099                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
 1100                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
 1101                         error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap,
 1102                             m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
 1103                         if (error) {
 1104                                 printf("%s: unable to load Tx buffer, "
 1105                                     "error = %d\n", sc->sc_dev.dv_xname, error);
 1106                                 break;
 1107                         }
 1108                 }
 1109 
 1110                 /*
 1111                  * Ensure we have enough descriptors free to describe
 1112                  * the packet.
 1113                  */
 1114                 if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ?
 1115                      (sc->sc_txfree - 1) : sc->sc_txfree)) {
 1116                         /*
 1117                          * Not enough free descriptors to transmit this
 1118                          * packet.  We haven't committed to anything yet,
 1119                          * so just unload the DMA map, put the packet
 1120                          * back on the queue, and punt.  Notify the upper
 1121                          * layer that there are no more slots left.
 1122                          *
 1123                          * XXX We could allocate an mbuf and copy, but
 1124                          * XXX it is worth it?
 1125                          */
 1126                         ifp->if_flags |= IFF_OACTIVE;
 1127                         bus_dmamap_unload(sc->sc_dmatag, dmamap);
 1128                         if (m != NULL)
 1129                                 m_freem(m);
 1130                         break;
 1131                 }
 1132 
 1133                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1134                 if (m != NULL) {
 1135                         m_freem(m0);
 1136                         m0 = m;
 1137                 }
 1138 
 1139                 /*
 1140                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
 1141                  */
 1142 
 1143                 /* Sync the DMA map. */
 1144                 bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize,
 1145                     BUS_DMASYNC_PREWRITE);
 1146 
 1147                 /*
 1148                  * Initialize the transmit descriptors.
 1149                  */
 1150                 for (nexttx = sc->sc_txnext, seg = 0;
 1151                      seg < dmamap->dm_nsegs;
 1152                      seg++, nexttx = GEM_NEXTTX(nexttx)) {
 1153 
 1154                         /*
 1155                          * If this is the first descriptor we're
 1156                          * enqueueing, set the start of packet flag,
 1157                          * and the checksum stuff if we want the hardware
 1158                          * to do it.
 1159                          */
 1160                         sc->sc_txdescs[nexttx].gd_addr =
 1161                             GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr);
 1162                         flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE;
 1163                         if (nexttx == firsttx) {
 1164                                 flags |= GEM_TD_START_OF_PACKET;
 1165                                 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
 1166                                         sc->sc_txwin = 0;
 1167                                         flags |= GEM_TD_INTERRUPT_ME;
 1168                                 }
 1169 
 1170 #ifdef INET
 1171                                 /* h/w checksum */
 1172                                 if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 |
 1173                                     M_CSUM_UDPv4) && m0->m_pkthdr.csum_flags &
 1174                                     (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
 1175                                         struct ether_header *eh;
 1176                                         uint16_t offset, start;
 1177 
 1178                                         eh = mtod(m0, struct ether_header *);
 1179                                         switch (ntohs(eh->ether_type)) {
 1180                                         case ETHERTYPE_IP:
 1181                                                 start = ETHER_HDR_LEN;
 1182                                                 break;
 1183                                         case ETHERTYPE_VLAN:
 1184                                                 start = ETHER_HDR_LEN +
 1185                                                         ETHER_VLAN_ENCAP_LEN;
 1186                                                 break;
 1187                                         default:
 1188                                                 /* unsupported, drop it */
 1189                                                 m_free(m0);
 1190                                                 continue;
 1191                                         }
 1192                                         start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
 1193                                         offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start;
 1194                                         flags |= (start <<
 1195                                                   GEM_TD_CXSUM_STARTSHFT) |
 1196                                                  (offset <<
 1197                                                   GEM_TD_CXSUM_STUFFSHFT) |
 1198                                                  GEM_TD_CXSUM_ENABLE;
 1199                                 }
 1200 #endif
 1201                         }
 1202                         if (seg == dmamap->dm_nsegs - 1) {
 1203                                 flags |= GEM_TD_END_OF_PACKET;
 1204                         } else {
 1205                                 /* last flag set outside of loop */
 1206                                 sc->sc_txdescs[nexttx].gd_flags =
 1207                                         GEM_DMA_WRITE(sc, flags);
 1208                         }
 1209                         lasttx = nexttx;
 1210                 }
 1211                 if (m0->m_pkthdr.len < ETHER_MIN_TX) {
 1212                         /* add padding buffer at end of chain */
 1213                         flags &= ~GEM_TD_END_OF_PACKET;
 1214                         sc->sc_txdescs[lasttx].gd_flags =
 1215                             GEM_DMA_WRITE(sc, flags);
 1216 
 1217                         sc->sc_txdescs[nexttx].gd_addr =
 1218                             GEM_DMA_WRITE(sc,
 1219                             sc->sc_nulldmamap->dm_segs[0].ds_addr);
 1220                         flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) &
 1221                             GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET;
 1222                         lasttx = nexttx;
 1223                         nexttx = GEM_NEXTTX(nexttx);
 1224                         seg++;
 1225                 }
 1226                 sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags);
 1227 
 1228                 KASSERT(lasttx != -1);
 1229 
 1230                 /*
 1231                  * Store a pointer to the packet so we can free it later,
 1232                  * and remember what txdirty will be once the packet is
 1233                  * done.
 1234                  */
 1235                 txs->txs_mbuf = m0;
 1236                 txs->txs_firstdesc = sc->sc_txnext;
 1237                 txs->txs_lastdesc = lasttx;
 1238                 txs->txs_ndescs = seg;
 1239 
 1240 #ifdef GEM_DEBUG
 1241                 if (ifp->if_flags & IFF_DEBUG) {
 1242                         printf("     gem_start %p transmit chain:\n", txs);
 1243                         for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) {
 1244                                 printf("descriptor %d:\t", seg);
 1245                                 printf("gd_flags:   0x%016llx\t", (long long)
 1246                                         GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags));
 1247                                 printf("gd_addr: 0x%016llx\n", (long long)
 1248                                         GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr));
 1249                                 if (seg == lasttx)
 1250                                         break;
 1251                         }
 1252                 }
 1253 #endif
 1254 
 1255                 /* Sync the descriptors we're using. */
 1256                 GEM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
 1257                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1258 
 1259                 /* Advance the tx pointer. */
 1260                 sc->sc_txfree -= txs->txs_ndescs;
 1261                 sc->sc_txnext = nexttx;
 1262 
 1263                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
 1264                 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
 1265 
 1266                 last_txs = txs;
 1267 
 1268 #if NBPFILTER > 0
 1269                 /*
 1270                  * Pass the packet to any BPF listeners.
 1271                  */
 1272                 if (ifp->if_bpf)
 1273                         bpf_mtap(ifp->if_bpf, m0);
 1274 #endif /* NBPFILTER > 0 */
 1275         }
 1276 
 1277         if (txs == NULL || sc->sc_txfree == 0) {
 1278                 /* No more slots left; notify upper layer. */
 1279                 ifp->if_flags |= IFF_OACTIVE;
 1280         }
 1281 
 1282         if (sc->sc_txfree != ofree) {
 1283                 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
 1284                     sc->sc_dev.dv_xname, lasttx, firsttx));
 1285                 /*
 1286                  * The entire packet chain is set up.
 1287                  * Kick the transmitter.
 1288                  */
 1289                 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n",
 1290                         sc->sc_dev.dv_xname, nexttx));
 1291                 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK,
 1292                         sc->sc_txnext);
 1293 
 1294                 /* Set a watchdog timer in case the chip flakes out. */
 1295                 ifp->if_timer = 5;
 1296                 DPRINTF(sc, ("%s: gem_start: watchdog %d\n",
 1297                         sc->sc_dev.dv_xname, ifp->if_timer));
 1298         }
 1299 }
 1300 
 1301 /*
 1302  * Transmit interrupt.
 1303  */
 1304 int
 1305 gem_tint(sc)
 1306         struct gem_softc *sc;
 1307 {
 1308         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1309         bus_space_tag_t t = sc->sc_bustag;
 1310         bus_space_handle_t mac = sc->sc_h;
 1311         struct gem_txsoft *txs;
 1312         int txlast;
 1313         int progress = 0;
 1314 
 1315 
 1316         DPRINTF(sc, ("%s: gem_tint\n", sc->sc_dev.dv_xname));
 1317 
 1318         /*
 1319          * Unload collision counters
 1320          */
 1321         ifp->if_collisions +=
 1322                 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
 1323                 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) +
 1324                 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
 1325                 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
 1326 
 1327         /*
 1328          * then clear the hardware counters.
 1329          */
 1330         bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
 1331         bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
 1332         bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
 1333         bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
 1334 
 1335         /*
 1336          * Go through our Tx list and free mbufs for those
 1337          * frames that have been transmitted.
 1338          */
 1339         while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
 1340                 GEM_CDTXSYNC(sc, txs->txs_lastdesc,
 1341                     txs->txs_ndescs,
 1342                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1343 
 1344 #ifdef GEM_DEBUG
 1345                 if (ifp->if_flags & IFF_DEBUG) {
 1346                         int i;
 1347                         printf("    txsoft %p transmit chain:\n", txs);
 1348                         for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
 1349                                 printf("descriptor %d: ", i);
 1350                                 printf("gd_flags: 0x%016llx\t", (long long)
 1351                                         GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
 1352                                 printf("gd_addr: 0x%016llx\n", (long long)
 1353                                         GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
 1354                                 if (i == txs->txs_lastdesc)
 1355                                         break;
 1356                         }
 1357                 }
 1358 #endif
 1359 
 1360                 /*
 1361                  * In theory, we could harveast some descriptors before
 1362                  * the ring is empty, but that's a bit complicated.
 1363                  *
 1364                  * GEM_TX_COMPLETION points to the last descriptor
 1365                  * processed +1.
 1366                  */
 1367                 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION);
 1368                 DPRINTF(sc,
 1369                         ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n",
 1370                                 txs->txs_lastdesc, txlast));
 1371                 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
 1372                         if ((txlast >= txs->txs_firstdesc) &&
 1373                                 (txlast <= txs->txs_lastdesc))
 1374                                 break;
 1375                 } else {
 1376                         /* Ick -- this command wraps */
 1377                         if ((txlast >= txs->txs_firstdesc) ||
 1378                                 (txlast <= txs->txs_lastdesc))
 1379                                 break;
 1380                 }
 1381 
 1382                 DPRINTF(sc, ("gem_tint: releasing a desc\n"));
 1383                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
 1384 
 1385                 sc->sc_txfree += txs->txs_ndescs;
 1386 
 1387                 if (txs->txs_mbuf == NULL) {
 1388 #ifdef DIAGNOSTIC
 1389                                 panic("gem_txintr: null mbuf");
 1390 #endif
 1391                 }
 1392 
 1393                 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap,
 1394                     0, txs->txs_dmamap->dm_mapsize,
 1395                     BUS_DMASYNC_POSTWRITE);
 1396                 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
 1397                 m_freem(txs->txs_mbuf);
 1398                 txs->txs_mbuf = NULL;
 1399 
 1400                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
 1401 
 1402                 ifp->if_opackets++;
 1403                 progress = 1;
 1404         }
 1405 
 1406 #if 0
 1407         DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x "
 1408                 "GEM_TX_DATA_PTR %llx "
 1409                 "GEM_TX_COMPLETION %x\n",
 1410                 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE),
 1411                 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h,
 1412                         GEM_TX_DATA_PTR_HI) << 32) |
 1413                              bus_space_read_4(sc->sc_bustag, sc->sc_h,
 1414                         GEM_TX_DATA_PTR_LO),
 1415                 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)));
 1416 #endif
 1417 
 1418         if (progress) {
 1419                 if (sc->sc_txfree == GEM_NTXDESC - 1)
 1420                         sc->sc_txwin = 0;
 1421 
 1422                 ifp->if_flags &= ~IFF_OACTIVE;
 1423                 gem_start(ifp);
 1424 
 1425                 if (SIMPLEQ_EMPTY(&sc->sc_txdirtyq))
 1426                         ifp->if_timer = 0;
 1427         }
 1428         DPRINTF(sc, ("%s: gem_tint: watchdog %d\n",
 1429                 sc->sc_dev.dv_xname, ifp->if_timer));
 1430 
 1431         return (1);
 1432 }
 1433 
 1434 /*
 1435  * Receive interrupt.
 1436  */
 1437 int
 1438 gem_rint(sc)
 1439         struct gem_softc *sc;
 1440 {
 1441         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1442         bus_space_tag_t t = sc->sc_bustag;
 1443         bus_space_handle_t h = sc->sc_h;
 1444         struct gem_rxsoft *rxs;
 1445         struct mbuf *m;
 1446         u_int64_t rxstat;
 1447         u_int32_t rxcomp;
 1448         int i, len, progress = 0;
 1449 
 1450         DPRINTF(sc, ("%s: gem_rint\n", sc->sc_dev.dv_xname));
 1451 
 1452         /*
 1453          * Read the completion register once.  This limits
 1454          * how long the following loop can execute.
 1455          */
 1456         rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION);
 1457 
 1458         /*
 1459          * XXXX Read the lastrx only once at the top for speed.
 1460          */
 1461         DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n",
 1462                 sc->sc_rxptr, rxcomp));
 1463 
 1464         /*
 1465          * Go into the loop at least once.
 1466          */
 1467         for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp;
 1468              i = GEM_NEXTRX(i)) {
 1469                 rxs = &sc->sc_rxsoft[i];
 1470 
 1471                 GEM_CDRXSYNC(sc, i,
 1472                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1473 
 1474                 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
 1475 
 1476                 if (rxstat & GEM_RD_OWN) {
 1477                         /*
 1478                          * We have processed all of the receive buffers.
 1479                          */
 1480                         break;
 1481                 }
 1482 
 1483                 progress++;
 1484                 ifp->if_ipackets++;
 1485 
 1486                 if (rxstat & GEM_RD_BAD_CRC) {
 1487                         ifp->if_ierrors++;
 1488                         printf("%s: receive error: CRC error\n",
 1489                                 sc->sc_dev.dv_xname);
 1490                         GEM_INIT_RXDESC(sc, i);
 1491                         continue;
 1492                 }
 1493 
 1494                 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1495                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 1496 #ifdef GEM_DEBUG
 1497                 if (ifp->if_flags & IFF_DEBUG) {
 1498                         printf("    rxsoft %p descriptor %d: ", rxs, i);
 1499                         printf("gd_flags: 0x%016llx\t", (long long)
 1500                                 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
 1501                         printf("gd_addr: 0x%016llx\n", (long long)
 1502                                 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
 1503                 }
 1504 #endif
 1505 
 1506                 /* No errors; receive the packet. */
 1507                 len = GEM_RD_BUFLEN(rxstat);
 1508 
 1509                 /*
 1510                  * Allocate a new mbuf cluster.  If that fails, we are
 1511                  * out of memory, and must drop the packet and recycle
 1512                  * the buffer that's already attached to this descriptor.
 1513                  */
 1514                 m = rxs->rxs_mbuf;
 1515                 if (gem_add_rxbuf(sc, i) != 0) {
 1516                         GEM_COUNTER_INCR(sc, sc_ev_rxnobuf);
 1517                         ifp->if_ierrors++;
 1518                         GEM_INIT_RXDESC(sc, i);
 1519                         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1520                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1521                         continue;
 1522                 }
 1523                 m->m_data += 2; /* We're already off by two */
 1524 
 1525                 m->m_pkthdr.rcvif = ifp;
 1526                 m->m_pkthdr.len = m->m_len = len;
 1527 
 1528 #if NBPFILTER > 0
 1529                 /*
 1530                  * Pass this up to any BPF listeners, but only
 1531                  * pass it up the stack if its for us.
 1532                  */
 1533                 if (ifp->if_bpf)
 1534                         bpf_mtap(ifp->if_bpf, m);
 1535 #endif /* NPBFILTER > 0 */
 1536 
 1537 #ifdef INET
 1538                 /* hardware checksum */
 1539                 if (ifp->if_csum_flags_rx & (M_CSUM_UDPv4 | M_CSUM_TCPv4)) {
 1540                         struct ether_header *eh;
 1541                         struct ip *ip;
 1542                         struct udphdr *uh;
 1543                         int32_t hlen, pktlen;
 1544 
 1545                         if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) {
 1546                                 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN -
 1547                                          ETHER_VLAN_ENCAP_LEN;
 1548                                 eh = (struct ether_header *) mtod(m, caddr_t) +
 1549                                         ETHER_VLAN_ENCAP_LEN;
 1550                         } else {
 1551                                 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN;
 1552                                 eh = mtod(m, struct ether_header *);
 1553                         }
 1554                         if (ntohs(eh->ether_type) != ETHERTYPE_IP)
 1555                                 goto swcsum;
 1556                         ip = (struct ip *) ((caddr_t)eh + ETHER_HDR_LEN);
 1557 
 1558                         /* IPv4 only */
 1559                         if (ip->ip_v != IPVERSION)
 1560                                 goto swcsum;
 1561 
 1562                         hlen = ip->ip_hl << 2;
 1563                         if (hlen < sizeof(struct ip))
 1564                                 goto swcsum;
 1565 
 1566                         /*
 1567                          * bail if too short, has random trailing garbage,
 1568                          * truncated, fragment, or has ethernet pad.
 1569                          */
 1570                         if ((ntohs(ip->ip_len) < hlen) ||
 1571                             (ntohs(ip->ip_len) != pktlen) ||
 1572                             (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)))
 1573                                 goto swcsum;
 1574 
 1575                         switch (ip->ip_p) {
 1576                         case IPPROTO_TCP:
 1577                                 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
 1578                                         goto swcsum;
 1579                                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1580                                         goto swcsum;
 1581                                 m->m_pkthdr.csum_flags = M_CSUM_TCPv4;
 1582                                 break;
 1583                         case IPPROTO_UDP:
 1584                                 if (! (ifp->if_csum_flags_rx & M_CSUM_UDPv4))
 1585                                         goto swcsum;
 1586                                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1587                                         goto swcsum;
 1588                                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1589                                 /* no checksum */
 1590                                 if (uh->uh_sum == 0)
 1591                                         goto swcsum;
 1592                                 m->m_pkthdr.csum_flags = M_CSUM_UDPv4;
 1593                                 break;
 1594                         default:
 1595                                 goto swcsum;
 1596                         }
 1597 
 1598                         /* the uncomplemented sum is expected */
 1599                         m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM;
 1600 
 1601                         /* if the pkt had ip options, we have to deduct them */
 1602                         if (hlen > sizeof(struct ip)) {
 1603                                 uint16_t *opts;
 1604                                 uint32_t optsum, temp;
 1605 
 1606                                 optsum = 0;
 1607                                 temp = hlen - sizeof(struct ip);
 1608                                 opts = (uint16_t *) ((caddr_t) ip +
 1609                                         sizeof(struct ip));
 1610 
 1611                                 while (temp > 1) {
 1612                                         optsum += ntohs(*opts++);
 1613                                         temp -= 2;
 1614                                 }
 1615                                 while (optsum >> 16)
 1616                                         optsum = (optsum >> 16) +
 1617                                                  (optsum & 0xffff);
 1618 
 1619                                 /* Deduct ip opts sum from hwsum (rfc 1624). */
 1620                                 m->m_pkthdr.csum_data =
 1621                                         ~((~m->m_pkthdr.csum_data) - ~optsum);
 1622 
 1623                                 while (m->m_pkthdr.csum_data >> 16)
 1624                                         m->m_pkthdr.csum_data =
 1625                                                 (m->m_pkthdr.csum_data >> 16) +
 1626                                                 (m->m_pkthdr.csum_data &
 1627                                                  0xffff);
 1628                         }
 1629 
 1630                         m->m_pkthdr.csum_flags |= M_CSUM_DATA |
 1631                                                   M_CSUM_NO_PSEUDOHDR;
 1632                 } else
 1633 swcsum:
 1634                         m->m_pkthdr.csum_flags = 0;
 1635 #endif
 1636                 /* Pass it on. */
 1637                 (*ifp->if_input)(ifp, m);
 1638         }
 1639 
 1640         if (progress) {
 1641                 /* Update the receive pointer. */
 1642                 if (i == sc->sc_rxptr) {
 1643                         GEM_COUNTER_INCR(sc, sc_ev_rxfull);
 1644 #ifdef GEM_DEBUG
 1645                         if (ifp->if_flags & IFF_DEBUG)
 1646                                 printf("%s: rint: ring wrap\n",
 1647                                     sc->sc_dev.dv_xname);
 1648 #endif
 1649                 }
 1650                 sc->sc_rxptr = i;
 1651                 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i));
 1652         }
 1653 #ifdef GEM_COUNTERS
 1654         if (progress <= 4) {
 1655                 GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]);
 1656         } else if (progress < 32) {
 1657                 if (progress < 16)
 1658                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]);
 1659                 else
 1660                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]);
 1661 
 1662         } else {
 1663                 if (progress < 64)
 1664                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]);
 1665                 else
 1666                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]);
 1667         }
 1668 #endif
 1669 
 1670         DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
 1671                 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
 1672 
 1673         return (1);
 1674 }
 1675 
 1676 
 1677 /*
 1678  * gem_add_rxbuf:
 1679  *
 1680  *      Add a receive buffer to the indicated descriptor.
 1681  */
 1682 int
 1683 gem_add_rxbuf(struct gem_softc *sc, int idx)
 1684 {
 1685         struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
 1686         struct mbuf *m;
 1687         int error;
 1688 
 1689         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1690         if (m == NULL)
 1691                 return (ENOBUFS);
 1692 
 1693         MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
 1694         MCLGET(m, M_DONTWAIT);
 1695         if ((m->m_flags & M_EXT) == 0) {
 1696                 m_freem(m);
 1697                 return (ENOBUFS);
 1698         }
 1699 
 1700 #ifdef GEM_DEBUG
 1701 /* bzero the packet to check DMA */
 1702         memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
 1703 #endif
 1704 
 1705         if (rxs->rxs_mbuf != NULL)
 1706                 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
 1707 
 1708         rxs->rxs_mbuf = m;
 1709 
 1710         error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
 1711             m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
 1712             BUS_DMA_READ|BUS_DMA_NOWAIT);
 1713         if (error) {
 1714                 printf("%s: can't load rx DMA map %d, error = %d\n",
 1715                     sc->sc_dev.dv_xname, idx, error);
 1716                 panic("gem_add_rxbuf"); /* XXX */
 1717         }
 1718 
 1719         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1720             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1721 
 1722         GEM_INIT_RXDESC(sc, idx);
 1723 
 1724         return (0);
 1725 }
 1726 
 1727 
 1728 int
 1729 gem_eint(sc, status)
 1730         struct gem_softc *sc;
 1731         u_int status;
 1732 {
 1733         char bits[128];
 1734 
 1735         if ((status & GEM_INTR_MIF) != 0) {
 1736                 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
 1737                 return (1);
 1738         }
 1739 
 1740         printf("%s: status=%s\n", sc->sc_dev.dv_xname,
 1741                 bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits)));
 1742         return (1);
 1743 }
 1744 
 1745 
 1746 int
 1747 gem_intr(v)
 1748         void *v;
 1749 {
 1750         struct gem_softc *sc = (struct gem_softc *)v;
 1751         bus_space_tag_t t = sc->sc_bustag;
 1752         bus_space_handle_t seb = sc->sc_h;
 1753         u_int32_t status;
 1754         int r = 0;
 1755 #ifdef GEM_DEBUG
 1756         char bits[128];
 1757 #endif
 1758 
 1759         sc->sc_ev_intr.ev_count++;
 1760 
 1761         status = bus_space_read_4(t, seb, GEM_STATUS);
 1762         DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n",
 1763                 sc->sc_dev.dv_xname, (status >> 19),
 1764                 bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits))));
 1765 
 1766         if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
 1767                 r |= gem_eint(sc, status);
 1768 
 1769         if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) {
 1770                 GEM_COUNTER_INCR(sc, sc_ev_txint);
 1771                 r |= gem_tint(sc);
 1772         }
 1773 
 1774         if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) {
 1775                 GEM_COUNTER_INCR(sc, sc_ev_rxint);
 1776                 r |= gem_rint(sc);
 1777         }
 1778 
 1779         /* We should eventually do more than just print out error stats. */
 1780         if (status & GEM_INTR_TX_MAC) {
 1781                 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
 1782                 if (txstat & ~GEM_MAC_TX_XMIT_DONE)
 1783                         printf("%s: MAC tx fault, status %x\n",
 1784                             sc->sc_dev.dv_xname, txstat);
 1785         }
 1786         if (status & GEM_INTR_RX_MAC) {
 1787                 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
 1788                 if (rxstat & ~GEM_MAC_RX_DONE)
 1789                         printf("%s: MAC rx fault, status %x\n",
 1790                             sc->sc_dev.dv_xname, rxstat);
 1791         }
 1792         return (r);
 1793 }
 1794 
 1795 
 1796 void
 1797 gem_watchdog(ifp)
 1798         struct ifnet *ifp;
 1799 {
 1800         struct gem_softc *sc = ifp->if_softc;
 1801 
 1802         DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
 1803                 "GEM_MAC_RX_CONFIG %x\n",
 1804                 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG),
 1805                 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS),
 1806                 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)));
 1807 
 1808         log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
 1809         ++ifp->if_oerrors;
 1810 
 1811         /* Try to get more packets going. */
 1812         gem_start(ifp);
 1813 }
 1814 
 1815 /*
 1816  * Initialize the MII Management Interface
 1817  */
 1818 void
 1819 gem_mifinit(sc)
 1820         struct gem_softc *sc;
 1821 {
 1822         bus_space_tag_t t = sc->sc_bustag;
 1823         bus_space_handle_t mif = sc->sc_h;
 1824 
 1825         /* Configure the MIF in frame mode */
 1826         sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
 1827         sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
 1828         bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
 1829 }
 1830 
 1831 /*
 1832  * MII interface
 1833  *
 1834  * The GEM MII interface supports at least three different operating modes:
 1835  *
 1836  * Bitbang mode is implemented using data, clock and output enable registers.
 1837  *
 1838  * Frame mode is implemented by loading a complete frame into the frame
 1839  * register and polling the valid bit for completion.
 1840  *
 1841  * Polling mode uses the frame register but completion is indicated by
 1842  * an interrupt.
 1843  *
 1844  */
 1845 static int
 1846 gem_mii_readreg(self, phy, reg)
 1847         struct device *self;
 1848         int phy, reg;
 1849 {
 1850         struct gem_softc *sc = (void *)self;
 1851         bus_space_tag_t t = sc->sc_bustag;
 1852         bus_space_handle_t mif = sc->sc_h;
 1853         int n;
 1854         u_int32_t v;
 1855 
 1856 #ifdef GEM_DEBUG1
 1857         if (sc->sc_debug)
 1858                 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
 1859 #endif
 1860 
 1861 #if 0
 1862         /* Select the desired PHY in the MIF configuration register */
 1863         v = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
 1864         /* Clear PHY select bit */
 1865         v &= ~GEM_MIF_CONFIG_PHY_SEL;
 1866         if (phy == GEM_PHYAD_EXTERNAL)
 1867                 /* Set PHY select bit to get at external device */
 1868                 v |= GEM_MIF_CONFIG_PHY_SEL;
 1869         bus_space_write_4(t, mif, GEM_MIF_CONFIG, v);
 1870 #endif
 1871 
 1872         /* Construct the frame command */
 1873         v = (reg << GEM_MIF_REG_SHIFT)  | (phy << GEM_MIF_PHY_SHIFT) |
 1874                 GEM_MIF_FRAME_READ;
 1875 
 1876         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 1877         for (n = 0; n < 100; n++) {
 1878                 DELAY(1);
 1879                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 1880                 if (v & GEM_MIF_FRAME_TA0)
 1881                         return (v & GEM_MIF_FRAME_DATA);
 1882         }
 1883 
 1884         printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
 1885         return (0);
 1886 }
 1887 
 1888 static void
 1889 gem_mii_writereg(self, phy, reg, val)
 1890         struct device *self;
 1891         int phy, reg, val;
 1892 {
 1893         struct gem_softc *sc = (void *)self;
 1894         bus_space_tag_t t = sc->sc_bustag;
 1895         bus_space_handle_t mif = sc->sc_h;
 1896         int n;
 1897         u_int32_t v;
 1898 
 1899 #ifdef GEM_DEBUG1
 1900         if (sc->sc_debug)
 1901                 printf("gem_mii_writereg: phy %d reg %d val %x\n",
 1902                         phy, reg, val);
 1903 #endif
 1904 
 1905 #if 0
 1906         /* Select the desired PHY in the MIF configuration register */
 1907         v = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
 1908         /* Clear PHY select bit */
 1909         v &= ~GEM_MIF_CONFIG_PHY_SEL;
 1910         if (phy == GEM_PHYAD_EXTERNAL)
 1911                 /* Set PHY select bit to get at external device */
 1912                 v |= GEM_MIF_CONFIG_PHY_SEL;
 1913         bus_space_write_4(t, mif, GEM_MIF_CONFIG, v);
 1914 #endif
 1915         /* Construct the frame command */
 1916         v = GEM_MIF_FRAME_WRITE                 |
 1917             (phy << GEM_MIF_PHY_SHIFT)          |
 1918             (reg << GEM_MIF_REG_SHIFT)          |
 1919             (val & GEM_MIF_FRAME_DATA);
 1920 
 1921         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 1922         for (n = 0; n < 100; n++) {
 1923                 DELAY(1);
 1924                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 1925                 if (v & GEM_MIF_FRAME_TA0)
 1926                         return;
 1927         }
 1928 
 1929         printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
 1930 }
 1931 
 1932 static void
 1933 gem_mii_statchg(dev)
 1934         struct device *dev;
 1935 {
 1936         struct gem_softc *sc = (void *)dev;
 1937 #ifdef GEM_DEBUG
 1938         int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
 1939 #endif
 1940         bus_space_tag_t t = sc->sc_bustag;
 1941         bus_space_handle_t mac = sc->sc_h;
 1942         u_int32_t v;
 1943 
 1944 #ifdef GEM_DEBUG
 1945         if (sc->sc_debug)
 1946                 printf("gem_mii_statchg: status change: phy = %d\n",
 1947                         sc->sc_phys[instance]);
 1948 #endif
 1949 
 1950 
 1951         /* Set tx full duplex options */
 1952         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
 1953         delay(10000); /* reg must be cleared and delay before changing. */
 1954         v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
 1955                 GEM_MAC_TX_ENABLE;
 1956         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
 1957                 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
 1958         }
 1959         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
 1960 
 1961         /* XIF Configuration */
 1962  /* We should really calculate all this rather than rely on defaults */
 1963         v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG);
 1964         v = GEM_MAC_XIF_LINK_LED;
 1965         v |= GEM_MAC_XIF_TX_MII_ENA;
 1966 
 1967         /* If an external transceiver is connected, enable its MII drivers */
 1968         sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
 1969         if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
 1970                 /* External MII needs echo disable if half duplex. */
 1971                 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
 1972                         /* turn on full duplex LED */
 1973                         v |= GEM_MAC_XIF_FDPLX_LED;
 1974                 else
 1975                         /* half duplex -- disable echo */
 1976                         v |= GEM_MAC_XIF_ECHO_DISABL;
 1977 
 1978                 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
 1979                         v |= GEM_MAC_XIF_GMII_MODE;
 1980                 else
 1981                         v &= ~GEM_MAC_XIF_GMII_MODE;
 1982         } else
 1983                 /* Internal MII needs buf enable */
 1984                 v |= GEM_MAC_XIF_MII_BUF_ENA;
 1985         bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
 1986 }
 1987 
 1988 int
 1989 gem_mediachange(ifp)
 1990         struct ifnet *ifp;
 1991 {
 1992         struct gem_softc *sc = ifp->if_softc;
 1993 
 1994         if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
 1995                 return (EINVAL);
 1996 
 1997         return (mii_mediachg(&sc->sc_mii));
 1998 }
 1999 
 2000 void
 2001 gem_mediastatus(ifp, ifmr)
 2002         struct ifnet *ifp;
 2003         struct ifmediareq *ifmr;
 2004 {
 2005         struct gem_softc *sc = ifp->if_softc;
 2006 
 2007         if ((ifp->if_flags & IFF_UP) == 0)
 2008                 return;
 2009 
 2010         mii_pollstat(&sc->sc_mii);
 2011         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 2012         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 2013 }
 2014 
 2015 int gem_ioctldebug = 0;
 2016 /*
 2017  * Process an ioctl request.
 2018  */
 2019 int
 2020 gem_ioctl(ifp, cmd, data)
 2021         struct ifnet *ifp;
 2022         u_long cmd;
 2023         caddr_t data;
 2024 {
 2025         struct gem_softc *sc = ifp->if_softc;
 2026         struct ifreq *ifr = (struct ifreq *)data;
 2027         int s, error = 0;
 2028 
 2029         s = splnet();
 2030 
 2031         switch (cmd) {
 2032         case SIOCGIFMEDIA:
 2033         case SIOCSIFMEDIA:
 2034                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
 2035                 break;
 2036 
 2037         default:
 2038                 error = ether_ioctl(ifp, cmd, data);
 2039                 if (error == ENETRESET) {
 2040                         /*
 2041                          * Multicast list has changed; set the hardware filter
 2042                          * accordingly.
 2043                          */
 2044                         if (ifp->if_flags & IFF_RUNNING) {
 2045 if (gem_ioctldebug) printf("reset1\n");
 2046                                 gem_init(ifp);
 2047                                 delay(50000);
 2048                         }
 2049                         error = 0;
 2050                 }
 2051                 break;
 2052         }
 2053 
 2054         /* Try to get things going again */
 2055         if (ifp->if_flags & IFF_UP) {
 2056 if (gem_ioctldebug) printf("start\n");
 2057                 gem_start(ifp);
 2058         }
 2059         splx(s);
 2060         return (error);
 2061 }
 2062 
 2063 
 2064 void
 2065 gem_shutdown(arg)
 2066         void *arg;
 2067 {
 2068         struct gem_softc *sc = (struct gem_softc *)arg;
 2069         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2070 
 2071         gem_stop(ifp, 1);
 2072 }
 2073 
 2074 /*
 2075  * Set up the logical address filter.
 2076  */
 2077 void
 2078 gem_setladrf(sc)
 2079         struct gem_softc *sc;
 2080 {
 2081         struct ethercom *ec = &sc->sc_ethercom;
 2082         struct ifnet *ifp = &ec->ec_if;
 2083         struct ether_multi *enm;
 2084         struct ether_multistep step;
 2085         bus_space_tag_t t = sc->sc_bustag;
 2086         bus_space_handle_t h = sc->sc_h;
 2087         u_int32_t crc;
 2088         u_int32_t hash[16];
 2089         u_int32_t v;
 2090         int i;
 2091 
 2092         /* Get current RX configuration */
 2093         v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
 2094 
 2095         /*
 2096          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 2097          * and hash filter.  Depending on the case, the right bit will be
 2098          * enabled.
 2099          */
 2100         v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
 2101             GEM_MAC_RX_PROMISC_GRP);
 2102 
 2103         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 2104                 /* Turn on promiscuous mode */
 2105                 v |= GEM_MAC_RX_PROMISCUOUS;
 2106                 ifp->if_flags |= IFF_ALLMULTI;
 2107                 goto chipit;
 2108         }
 2109 
 2110         /*
 2111          * Set up multicast address filter by passing all multicast addresses
 2112          * through a crc generator, and then using the high order 8 bits as an
 2113          * index into the 256 bit logical address filter.  The high order 4
 2114          * bits select the word, while the other 4 bits select the bit within
 2115          * the word (where bit 0 is the MSB).
 2116          */
 2117 
 2118         /* Clear hash table */
 2119         memset(hash, 0, sizeof(hash));
 2120 
 2121         ETHER_FIRST_MULTI(step, ec, enm);
 2122         while (enm != NULL) {
 2123                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 2124                         /*
 2125                          * We must listen to a range of multicast addresses.
 2126                          * For now, just accept all multicasts, rather than
 2127                          * trying to set only those filter bits needed to match
 2128                          * the range.  (At this time, the only use of address
 2129                          * ranges is for IP multicast routing, for which the
 2130                          * range is big enough to require all bits set.)
 2131                          * XXX use the addr filter for this
 2132                          */
 2133                         ifp->if_flags |= IFF_ALLMULTI;
 2134                         v |= GEM_MAC_RX_PROMISC_GRP;
 2135                         goto chipit;
 2136                 }
 2137 
 2138                 /* Get the LE CRC32 of the address */
 2139                 crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo));
 2140 
 2141                 /* Just want the 8 most significant bits. */
 2142                 crc >>= 24;
 2143 
 2144                 /* Set the corresponding bit in the filter. */
 2145                 hash[crc >> 4] |= 1 << (15 - (crc & 15));
 2146 
 2147                 ETHER_NEXT_MULTI(step, enm);
 2148         }
 2149 
 2150         v |= GEM_MAC_RX_HASH_FILTER;
 2151         ifp->if_flags &= ~IFF_ALLMULTI;
 2152 
 2153         /* Now load the hash table into the chip (if we are using it) */
 2154         for (i = 0; i < 16; i++) {
 2155                 bus_space_write_4(t, h,
 2156                     GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
 2157                     hash[i]);
 2158         }
 2159 
 2160 chipit:
 2161         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
 2162 }
 2163 
 2164 #if notyet
 2165 
 2166 /*
 2167  * gem_power:
 2168  *
 2169  *      Power management (suspend/resume) hook.
 2170  */
 2171 void
 2172 gem_power(why, arg)
 2173         int why;
 2174         void *arg;
 2175 {
 2176         struct gem_softc *sc = arg;
 2177         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2178         int s;
 2179 
 2180         s = splnet();
 2181         switch (why) {
 2182         case PWR_SUSPEND:
 2183         case PWR_STANDBY:
 2184                 gem_stop(ifp, 1);
 2185                 if (sc->sc_power != NULL)
 2186                         (*sc->sc_power)(sc, why);
 2187                 break;
 2188         case PWR_RESUME:
 2189                 if (ifp->if_flags & IFF_UP) {
 2190                         if (sc->sc_power != NULL)
 2191                                 (*sc->sc_power)(sc, why);
 2192                         gem_init(ifp);
 2193                 }
 2194                 break;
 2195         case PWR_SOFTSUSPEND:
 2196         case PWR_SOFTSTANDBY:
 2197         case PWR_SOFTRESUME:
 2198                 break;
 2199         }
 2200         splx(s);
 2201 }
 2202 #endif

Cache object: 77f619e3867c1bbe148391e77fd6a19c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.