The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/gem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: gem.c,v 1.52.2.1 2008/05/17 16:36:08 bouyer Exp $ */
    2 
    3 /*
    4  *
    5  * Copyright (C) 2001 Eduardo Horvath.
    6  * Copyright (c) 2001-2003 Thomas Moestl
    7  * All rights reserved.
    8  *
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  */
   32 
   33 /*
   34  * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
   35  * See `GEM Gigabit Ethernet ASIC Specification'
   36  *   http://www.sun.com/processors/manuals/ge.pdf
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.52.2.1 2008/05/17 16:36:08 bouyer Exp $");
   41 
   42 #include "opt_inet.h"
   43 #include "bpfilter.h"
   44 
   45 #include <sys/param.h>
   46 #include <sys/systm.h>
   47 #include <sys/callout.h>
   48 #include <sys/mbuf.h>
   49 #include <sys/syslog.h>
   50 #include <sys/malloc.h>
   51 #include <sys/kernel.h>
   52 #include <sys/socket.h>
   53 #include <sys/ioctl.h>
   54 #include <sys/errno.h>
   55 #include <sys/device.h>
   56 
   57 #include <machine/endian.h>
   58 
   59 #include <uvm/uvm_extern.h>
   60 
   61 #include <net/if.h>
   62 #include <net/if_dl.h>
   63 #include <net/if_media.h>
   64 #include <net/if_ether.h>
   65 
   66 #ifdef INET
   67 #include <netinet/in.h>
   68 #include <netinet/in_systm.h>
   69 #include <netinet/in_var.h>
   70 #include <netinet/ip.h>
   71 #include <netinet/tcp.h>
   72 #include <netinet/udp.h>
   73 #endif
   74 
   75 #if NBPFILTER > 0
   76 #include <net/bpf.h>
   77 #endif
   78 
   79 #include <machine/bus.h>
   80 #include <machine/intr.h>
   81 
   82 #include <dev/mii/mii.h>
   83 #include <dev/mii/miivar.h>
   84 #include <dev/mii/mii_bitbang.h>
   85 
   86 #include <dev/ic/gemreg.h>
   87 #include <dev/ic/gemvar.h>
   88 
   89 #define TRIES   10000
   90 
   91 static void     gem_start(struct ifnet *);
   92 static void     gem_stop(struct ifnet *, int);
   93 int             gem_ioctl(struct ifnet *, u_long, caddr_t);
   94 void            gem_tick(void *);
   95 void            gem_watchdog(struct ifnet *);
   96 void            gem_shutdown(void *);
   97 void            gem_pcs_start(struct gem_softc *sc);
   98 void            gem_pcs_stop(struct gem_softc *sc, int);
   99 int             gem_init(struct ifnet *);
  100 void            gem_init_regs(struct gem_softc *sc);
  101 static int      gem_ringsize(int sz);
  102 static int      gem_meminit(struct gem_softc *);
  103 void            gem_mifinit(struct gem_softc *);
  104 static int      gem_bitwait(struct gem_softc *sc, bus_space_handle_t, int,
  105                     u_int32_t, u_int32_t);
  106 void            gem_reset(struct gem_softc *);
  107 int             gem_reset_rx(struct gem_softc *sc);
  108 static void     gem_reset_rxdma(struct gem_softc *sc);
  109 static void     gem_rx_common(struct gem_softc *sc);
  110 int             gem_reset_tx(struct gem_softc *sc);
  111 int             gem_disable_rx(struct gem_softc *sc);
  112 int             gem_disable_tx(struct gem_softc *sc);
  113 static void     gem_rxdrain(struct gem_softc *sc);
  114 int             gem_add_rxbuf(struct gem_softc *sc, int idx);
  115 void            gem_setladrf(struct gem_softc *);
  116 
  117 /* MII methods & callbacks */
  118 static int      gem_mii_readreg(struct device *, int, int);
  119 static void     gem_mii_writereg(struct device *, int, int, int);
  120 static void     gem_mii_statchg(struct device *);
  121 
  122 void            gem_statuschange(struct gem_softc *);
  123 
  124 int             gem_mediachange(struct ifnet *);
  125 void            gem_mediastatus(struct ifnet *, struct ifmediareq *);
  126 
  127 struct mbuf     *gem_get(struct gem_softc *, int, int);
  128 int             gem_put(struct gem_softc *, int, struct mbuf *);
  129 void            gem_read(struct gem_softc *, int, int);
  130 int             gem_pint(struct gem_softc *);
  131 int             gem_eint(struct gem_softc *, u_int);
  132 int             gem_rint(struct gem_softc *);
  133 int             gem_tint(struct gem_softc *);
  134 void            gem_power(int, void *);
  135 
  136 #ifdef GEM_DEBUG
  137 static void gem_txsoft_print(const struct gem_softc *, int, int);
  138 #define DPRINTF(sc, x)  if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
  139                                 printf x
  140 #else
  141 #define DPRINTF(sc, x)  /* nothing */
  142 #endif
  143 
  144 #define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header))
  145 
  146 
  147 /*
  148  * gem_attach:
  149  *
  150  *      Attach a Gem interface to the system.
  151  */
  152 void
  153 gem_attach(sc, enaddr)
  154         struct gem_softc *sc;
  155         const uint8_t *enaddr;
  156 {
  157         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  158         struct mii_data *mii = &sc->sc_mii;
  159         bus_space_tag_t t = sc->sc_bustag;
  160         bus_space_handle_t h = sc->sc_h1;
  161         struct mii_softc *child;
  162         struct ifmedia_entry *ifm;
  163         int i, error;
  164         u_int32_t v;
  165         char *nullbuf;
  166 
  167         /* Make sure the chip is stopped. */
  168         ifp->if_softc = sc;
  169         gem_reset(sc);
  170 
  171         /*
  172          * Allocate the control data structures, and create and load the
  173          * DMA map for it. gem_control_data is 9216 bytes, we have space for
  174          * the padding buffer in the bus_dmamem_alloc()'d memory.
  175          */
  176         if ((error = bus_dmamem_alloc(sc->sc_dmatag,
  177             sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE,
  178             0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) {
  179                 aprint_error(
  180                    "%s: unable to allocate control data, error = %d\n",
  181                     sc->sc_dev.dv_xname, error);
  182                 goto fail_0;
  183         }
  184 
  185         /* XXX should map this in with correct endianness */
  186         if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
  187             sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
  188             BUS_DMA_COHERENT)) != 0) {
  189                 aprint_error("%s: unable to map control data, error = %d\n",
  190                     sc->sc_dev.dv_xname, error);
  191                 goto fail_1;
  192         }
  193 
  194         nullbuf =
  195             (caddr_t)sc->sc_control_data + sizeof(struct gem_control_data);
  196 
  197         if ((error = bus_dmamap_create(sc->sc_dmatag,
  198             sizeof(struct gem_control_data), 1,
  199             sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
  200                 aprint_error("%s: unable to create control data DMA map, "
  201                     "error = %d\n", sc->sc_dev.dv_xname, error);
  202                 goto fail_2;
  203         }
  204 
  205         if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
  206             sc->sc_control_data, sizeof(struct gem_control_data), NULL,
  207             0)) != 0) {
  208                 aprint_error(
  209                     "%s: unable to load control data DMA map, error = %d\n",
  210                     sc->sc_dev.dv_xname, error);
  211                 goto fail_3;
  212         }
  213 
  214         memset(nullbuf, 0, ETHER_MIN_TX);
  215         if ((error = bus_dmamap_create(sc->sc_dmatag,
  216             ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) {
  217                 aprint_error("%s: unable to create padding DMA map, "
  218                     "error = %d\n", sc->sc_dev.dv_xname, error);
  219                 goto fail_4;
  220         }
  221 
  222         if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap,
  223             nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) {
  224                 aprint_error(
  225                     "%s: unable to load padding DMA map, error = %d\n",
  226                     sc->sc_dev.dv_xname, error);
  227                 goto fail_5;
  228         }
  229 
  230         bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX,
  231             BUS_DMASYNC_PREWRITE);
  232 
  233         /*
  234          * Initialize the transmit job descriptors.
  235          */
  236         SIMPLEQ_INIT(&sc->sc_txfreeq);
  237         SIMPLEQ_INIT(&sc->sc_txdirtyq);
  238 
  239         /*
  240          * Create the transmit buffer DMA maps.
  241          */
  242         for (i = 0; i < GEM_TXQUEUELEN; i++) {
  243                 struct gem_txsoft *txs;
  244 
  245                 txs = &sc->sc_txsoft[i];
  246                 txs->txs_mbuf = NULL;
  247                 if ((error = bus_dmamap_create(sc->sc_dmatag,
  248                     ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS,
  249                     ETHER_MAX_LEN_JUMBO, 0, 0,
  250                     &txs->txs_dmamap)) != 0) {
  251                         aprint_error("%s: unable to create tx DMA map %d, "
  252                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  253                         goto fail_6;
  254                 }
  255                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  256         }
  257 
  258         /*
  259          * Create the receive buffer DMA maps.
  260          */
  261         for (i = 0; i < GEM_NRXDESC; i++) {
  262                 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
  263                     MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  264                         aprint_error("%s: unable to create rx DMA map %d, "
  265                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  266                         goto fail_7;
  267                 }
  268                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  269         }
  270 
  271         /* Initialize ifmedia structures and MII info */
  272         mii->mii_ifp = ifp;
  273         mii->mii_readreg = gem_mii_readreg;
  274         mii->mii_writereg = gem_mii_writereg;
  275         mii->mii_statchg = gem_mii_statchg;
  276 
  277         ifmedia_init(&mii->mii_media, IFM_IMASK, gem_mediachange, gem_mediastatus);
  278 
  279         /*
  280          * Initialization based  on `GEM Gigabit Ethernet ASIC Specification'
  281          * Section 3.2.1 `Initialization Sequence'.
  282          * However, we can't assume SERDES or Serialink if neither
  283          * GEM_MIF_CONFIG_MDI0 nor GEM_MIF_CONFIG_MDI1 are set
  284          * being set, as both are set on Sun X1141A (with SERDES).  So,
  285          * we rely on our bus attachment setting GEM_SERDES or GEM_SERIAL.
  286          * Also, for Apple variants with 2 PHY's, we prefer the external
  287          * PHY over the internal PHY.
  288          */
  289         gem_mifinit(sc);
  290 
  291         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) {
  292                 mii_attach(&sc->sc_dev, mii, 0xffffffff,
  293                     MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
  294                 child = LIST_FIRST(&mii->mii_phys);
  295                 if (child == NULL) {
  296                                 /* No PHY attached */
  297                                 aprint_error("%s: PHY probe failed\n",
  298                                     sc->sc_dev.dv_xname);
  299                                 goto fail_7;
  300                 } else {
  301                         /*
  302                          * Walk along the list of attached MII devices and
  303                          * establish an `MII instance' to `PHY number'
  304                          * mapping.
  305                          */
  306                         for (; child != NULL;
  307                             child = LIST_NEXT(child, mii_list)) {
  308                                 /*
  309                                  * Note: we support just one PHY: the internal
  310                                  * or external MII is already selected for us
  311                                  * by the GEM_MIF_CONFIG  register.
  312                                  */
  313                                 if (child->mii_phy > 1 || child->mii_inst > 0) {
  314                                         aprint_error(
  315                                             "%s: cannot accommodate MII device"
  316                                             " %s at PHY %d, instance %d\n",
  317                                                sc->sc_dev.dv_xname,
  318                                                child->mii_dev.dv_xname,
  319                                                child->mii_phy, child->mii_inst);
  320                                         continue;
  321                                 }
  322                                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  323                         }
  324 
  325                         /*
  326                          * Now select and activate the PHY we will use.
  327                          *
  328                          * The order of preference is External (MDI1),
  329                          * then Internal (MDI0),
  330                          */
  331                         if (sc->sc_phys[1]) {
  332 #ifdef GEM_DEBUG
  333                                 aprint_debug("%s: using external PHY\n",
  334                                     sc->sc_dev.dv_xname);
  335 #endif
  336                                 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
  337                         } else {
  338 #ifdef GEM_DEBUG
  339                                 aprint_debug("%s: using internal PHY\n",
  340                                     sc->sc_dev.dv_xname);
  341                                 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
  342 #endif
  343                         }
  344                         bus_space_write_4(t, h, GEM_MIF_CONFIG,
  345                             sc->sc_mif_config);
  346                         if (sc->sc_variant != GEM_SUN_ERI)
  347                                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  348                                     GEM_MII_DATAPATH_MII);
  349 
  350                         /*
  351                          * XXX - we can really do the following ONLY if the
  352                          * PHY indeed has the auto negotiation capability!!
  353                          */
  354                         ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
  355                 }
  356         } else {
  357                 /* SERDES or Serialink */
  358                 if (sc->sc_flags & GEM_SERDES) {
  359                         bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  360                             GEM_MII_DATAPATH_SERDES);
  361                 } else {
  362                         sc->sc_flags |= GEM_SERIAL;
  363                         bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  364                             GEM_MII_DATAPATH_SERIAL);
  365                 }
  366 
  367                 aprint_normal("%s: using external PCS %s: ",
  368                     sc->sc_dev.dv_xname,
  369                     sc->sc_flags & GEM_SERDES ? "SERDES" : "Serialink");
  370 
  371                 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
  372                 /* Check for FDX and HDX capabilities */
  373                 sc->sc_mii_anar = bus_space_read_4(t, h, GEM_MII_ANAR);
  374                 if (sc->sc_mii_anar & GEM_MII_ANEG_FUL_DUPLX) {
  375                         ifmedia_add(&sc->sc_media,
  376                             IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_FDX, 0, NULL);
  377                         aprint_normal("1000baseSX-FDX, ");
  378                 }
  379                 if (sc->sc_mii_anar & GEM_MII_ANEG_HLF_DUPLX) {
  380                         ifmedia_add(&sc->sc_media,
  381                             IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_HDX, 0, NULL);
  382                         aprint_normal("1000baseSX-HDX, ");
  383                 }
  384                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
  385                 sc->sc_mii_media = IFM_AUTO;
  386                 aprint_normal("auto\n");
  387 
  388                 gem_pcs_stop(sc, 1);
  389         }
  390 
  391         /*
  392          * From this point forward, the attachment cannot fail.  A failure
  393          * before this point releases all resources that may have been
  394          * allocated.
  395          */
  396 
  397         /* Announce ourselves. */
  398         aprint_normal("%s: Ethernet address %s", sc->sc_dev.dv_xname,
  399             ether_sprintf(enaddr));
  400 
  401         /* Get RX FIFO size */
  402         sc->sc_rxfifosize = 64 *
  403             bus_space_read_4(t, h, GEM_RX_FIFO_SIZE);
  404         aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024);
  405 
  406         /* Get TX FIFO size */
  407         v = bus_space_read_4(t, h, GEM_TX_FIFO_SIZE);
  408         aprint_normal(", %uKB TX fifo\n", v / 16);
  409 
  410         /* Initialize ifnet structure. */
  411         strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
  412         ifp->if_softc = sc;
  413         ifp->if_flags =
  414             IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
  415         sc->sc_if_flags = ifp->if_flags;
  416         /*
  417          * The GEM hardware supports basic TCP checksum offloading only.
  418          * Several (all?) revisions (Sun rev. 01 and Apple rev. 00 and 80)
  419          * have bugs in the receive checksum, so don't enable it for now. 
  420         if ((GEM_IS_SUN(sc) && sc->sc_chiprev != 1) ||
  421             (GEM_IS_APPLE(sc) &&
  422             (sc->sc_chiprev != 0 && sc->sc_chiprev != 0x80)))
  423                 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
  424         */
  425         ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
  426         ifp->if_start = gem_start;
  427         ifp->if_ioctl = gem_ioctl;
  428         ifp->if_watchdog = gem_watchdog;
  429         ifp->if_stop = gem_stop;
  430         ifp->if_init = gem_init;
  431         IFQ_SET_READY(&ifp->if_snd);
  432 
  433         /*
  434          * If we support GigE media, we support jumbo frames too.
  435          * Unless we are Apple.
  436          */
  437         TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
  438                 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
  439                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
  440                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
  441                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
  442                         if (!GEM_IS_APPLE(sc))
  443                                 sc->sc_ethercom.ec_capabilities
  444                                     |= ETHERCAP_JUMBO_MTU;
  445                         sc->sc_flags |= GEM_GIGABIT;
  446                         break;
  447                 }
  448         }
  449 
  450         /* claim 802.1q capability */
  451         sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
  452 
  453         /* Attach the interface. */
  454         if_attach(ifp);
  455         ether_ifattach(ifp, enaddr);
  456 
  457         sc->sc_sh = shutdownhook_establish(gem_shutdown, sc);
  458         if (sc->sc_sh == NULL)
  459                 panic("gem_config: can't establish shutdownhook");
  460 
  461 #if NRND > 0
  462         rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
  463                           RND_TYPE_NET, 0);
  464 #endif
  465 
  466         evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
  467             NULL, sc->sc_dev.dv_xname, "interrupts");
  468 #ifdef GEM_COUNTERS
  469         evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR,
  470             &sc->sc_ev_intr, sc->sc_dev.dv_xname, "tx interrupts");
  471         evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR,
  472             &sc->sc_ev_intr, sc->sc_dev.dv_xname, "rx interrupts");
  473         evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR,
  474             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx ring full");
  475         evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR,
  476             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx malloc failure");
  477         evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR,
  478             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 0desc");
  479         evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR,
  480             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 1desc");
  481         evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR,
  482             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 2desc");
  483         evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR,
  484             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 3desc");
  485         evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR,
  486             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >3desc");
  487         evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR,
  488             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >7desc");
  489         evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR,
  490             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >15desc");
  491         evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR,
  492             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >31desc");
  493         evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR,
  494             &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >63desc");
  495 #endif
  496 
  497 #if notyet
  498         /*
  499          * Add a suspend hook to make sure we come back up after a
  500          * resume.
  501          */
  502         sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
  503             gem_power, sc);
  504         if (sc->sc_powerhook == NULL)
  505                 aprint_error("%s: WARNING: unable to establish power hook\n",
  506                     sc->sc_dev.dv_xname);
  507 #endif
  508 
  509         callout_init(&sc->sc_tick_ch);
  510         return;
  511 
  512         /*
  513          * Free any resources we've allocated during the failed attach
  514          * attempt.  Do this in reverse order and fall through.
  515          */
  516  fail_7:
  517         for (i = 0; i < GEM_NRXDESC; i++) {
  518                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  519                         bus_dmamap_destroy(sc->sc_dmatag,
  520                             sc->sc_rxsoft[i].rxs_dmamap);
  521         }
  522  fail_6:
  523         for (i = 0; i < GEM_TXQUEUELEN; i++) {
  524                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
  525                         bus_dmamap_destroy(sc->sc_dmatag,
  526                             sc->sc_txsoft[i].txs_dmamap);
  527         }
  528         bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
  529  fail_5:
  530         bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap);
  531  fail_4:
  532         bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)nullbuf, ETHER_MIN_TX);
  533  fail_3:
  534         bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
  535  fail_2:
  536         bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
  537             sizeof(struct gem_control_data));
  538  fail_1:
  539         bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
  540  fail_0:
  541         return;
  542 }
  543 
  544 
  545 void
  546 gem_tick(arg)
  547         void *arg;
  548 {
  549         struct gem_softc *sc = arg;
  550         int s;
  551 
  552         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) {
  553                 /*
  554                  * We have to reset everything if we failed to get a
  555                  * PCS interrupt.  Restarting the callout is handled
  556                  * in gem_pcs_start().
  557                  */
  558                 gem_init(&sc->sc_ethercom.ec_if);
  559         } else {
  560                 s = splnet();
  561                 mii_tick(&sc->sc_mii);
  562                 splx(s);
  563                 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
  564         }
  565 }
  566 
  567 static int
  568 gem_bitwait(sc, h, r, clr, set)
  569         struct gem_softc *sc;
  570         bus_space_handle_t h;
  571         int r;
  572         u_int32_t clr;
  573         u_int32_t set;
  574 {
  575         int i;
  576         u_int32_t reg;
  577 
  578         for (i = TRIES; i--; DELAY(100)) {
  579                 reg = bus_space_read_4(sc->sc_bustag, h, r);
  580                 if ((reg & clr) == 0 && (reg & set) == set)
  581                         return (1);
  582         }
  583         return (0);
  584 }
  585 
  586 void
  587 gem_reset(sc)
  588         struct gem_softc *sc;
  589 {
  590         bus_space_tag_t t = sc->sc_bustag;
  591         bus_space_handle_t h = sc->sc_h2;
  592         int s;
  593 
  594         s = splnet();
  595         DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
  596         gem_reset_rx(sc);
  597         gem_reset_tx(sc);
  598 
  599         /* Do a full reset */
  600         bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
  601         if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
  602                 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
  603         splx(s);
  604 }
  605 
  606 
  607 /*
  608  * gem_rxdrain:
  609  *
  610  *      Drain the receive queue.
  611  */
  612 static void
  613 gem_rxdrain(struct gem_softc *sc)
  614 {
  615         struct gem_rxsoft *rxs;
  616         int i;
  617 
  618         for (i = 0; i < GEM_NRXDESC; i++) {
  619                 rxs = &sc->sc_rxsoft[i];
  620                 if (rxs->rxs_mbuf != NULL) {
  621                         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
  622                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
  623                         bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
  624                         m_freem(rxs->rxs_mbuf);
  625                         rxs->rxs_mbuf = NULL;
  626                 }
  627         }
  628 }
  629 
  630 /*
  631  * Reset the whole thing.
  632  */
  633 static void
  634 gem_stop(struct ifnet *ifp, int disable)
  635 {
  636         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
  637         struct gem_txsoft *txs;
  638 
  639         DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
  640 
  641         callout_stop(&sc->sc_tick_ch);
  642         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
  643                 gem_pcs_stop(sc, disable);
  644         else
  645                 mii_down(&sc->sc_mii);
  646 
  647         /* XXX - Should we reset these instead? */
  648         gem_disable_tx(sc);
  649         gem_disable_rx(sc);
  650 
  651         /*
  652          * Release any queued transmit buffers.
  653          */
  654         while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
  655                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
  656                 if (txs->txs_mbuf != NULL) {
  657                         bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 0,
  658                             txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
  659                         bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
  660                         m_freem(txs->txs_mbuf);
  661                         txs->txs_mbuf = NULL;
  662                 }
  663                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  664         }
  665 
  666         if (disable) {
  667                 gem_rxdrain(sc);
  668         }
  669 
  670         /*
  671          * Mark the interface down and cancel the watchdog timer.
  672          */
  673         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
  674         sc->sc_if_flags = ifp->if_flags;
  675         ifp->if_timer = 0;
  676 }
  677 
  678 
  679 /*
  680  * Reset the receiver
  681  */
  682 int
  683 gem_reset_rx(struct gem_softc *sc)
  684 {
  685         bus_space_tag_t t = sc->sc_bustag;
  686         bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
  687 
  688         /*
  689          * Resetting while DMA is in progress can cause a bus hang, so we
  690          * disable DMA first.
  691          */
  692         gem_disable_rx(sc);
  693         bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
  694         bus_space_barrier(t, h, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  695         /* Wait till it finishes */
  696         if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
  697                 printf("%s: cannot disable read dma\n", sc->sc_dev.dv_xname);
  698 
  699         /* Finally, reset the ERX */
  700         bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
  701         bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
  702         /* Wait till it finishes */
  703         if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
  704                 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
  705                 return (1);
  706         }
  707         return (0);
  708 }
  709 
  710 
  711 /*
  712  * Reset the receiver DMA engine.
  713  *
  714  * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
  715  * etc in order to reset the receiver DMA engine only and not do a full
  716  * reset which amongst others also downs the link and clears the FIFOs.
  717  */
  718 static void
  719 gem_reset_rxdma(struct gem_softc *sc)
  720 {
  721         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  722         bus_space_tag_t t = sc->sc_bustag;
  723         bus_space_handle_t h = sc->sc_h1;
  724         int i;
  725 
  726         if (gem_reset_rx(sc) != 0) {
  727                 gem_init(ifp);
  728                 return;
  729         }
  730         for (i = 0; i < GEM_NRXDESC; i++)
  731                 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
  732                         GEM_UPDATE_RXDESC(sc, i);
  733         sc->sc_rxptr = 0;
  734         GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
  735         GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
  736 
  737         /* Reprogram Descriptor Ring Base Addresses */
  738         /* NOTE: we use only 32-bit DMA addresses here. */
  739         bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
  740         bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
  741 
  742         /* Redo ERX Configuration */
  743         gem_rx_common(sc);
  744 
  745         /* Give the reciever a swift kick */
  746         bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC - 4);
  747 }
  748 
  749 /*
  750  * Common RX configuration for gem_init() and gem_reset_rxdma().
  751  */
  752 static void
  753 gem_rx_common(struct gem_softc *sc)
  754 {
  755         bus_space_tag_t t = sc->sc_bustag;
  756         bus_space_handle_t h = sc->sc_h1;
  757         u_int32_t v;
  758 
  759         /* Encode Receive Descriptor ring size: four possible values */
  760         v = gem_ringsize(GEM_NRXDESC /*XXX*/);
  761 
  762         /* Set receive h/w checksum offset */
  763 #ifdef INET
  764         v |= (ETHER_HDR_LEN + sizeof(struct ip) +
  765             ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
  766             ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT;
  767 #endif
  768 
  769         /* Enable RX DMA */
  770         bus_space_write_4(t, h, GEM_RX_CONFIG,
  771             v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
  772             (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN);
  773 
  774         /*
  775          * The following value is for an OFF Threshold of about 3/4 full
  776          * and an ON Threshold of 1/4 full.
  777          */
  778         bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
  779             (3 * sc->sc_rxfifosize / 256) |
  780             ((sc->sc_rxfifosize / 256) << 12));
  781         bus_space_write_4(t, h, GEM_RX_BLANKING,
  782             (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
  783 }
  784 
  785 /*
  786  * Reset the transmitter
  787  */
  788 int
  789 gem_reset_tx(struct gem_softc *sc)
  790 {
  791         bus_space_tag_t t = sc->sc_bustag;
  792         bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
  793 
  794         /*
  795          * Resetting while DMA is in progress can cause a bus hang, so we
  796          * disable DMA first.
  797          */
  798         gem_disable_tx(sc);
  799         bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
  800         bus_space_barrier(t, h, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  801         /* Wait till it finishes */
  802         if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
  803                 printf("%s: cannot disable read dma\n", sc->sc_dev.dv_xname);
  804         /* Wait 5ms extra. */
  805         delay(5000);
  806 
  807         /* Finally, reset the ETX */
  808         bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
  809         bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
  810         /* Wait till it finishes */
  811         if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
  812                 printf("%s: cannot reset receiver\n",
  813                         sc->sc_dev.dv_xname);
  814                 return (1);
  815         }
  816         return (0);
  817 }
  818 
  819 /*
  820  * disable receiver.
  821  */
  822 int
  823 gem_disable_rx(struct gem_softc *sc)
  824 {
  825         bus_space_tag_t t = sc->sc_bustag;
  826         bus_space_handle_t h = sc->sc_h1;
  827         u_int32_t cfg;
  828 
  829         /* Flip the enable bit */
  830         cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
  831         cfg &= ~GEM_MAC_RX_ENABLE;
  832         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
  833         bus_space_barrier(t, h, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  834         /* Wait for it to finish */
  835         return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
  836 }
  837 
  838 /*
  839  * disable transmitter.
  840  */
  841 int
  842 gem_disable_tx(struct gem_softc *sc)
  843 {
  844         bus_space_tag_t t = sc->sc_bustag;
  845         bus_space_handle_t h = sc->sc_h1;
  846         u_int32_t cfg;
  847 
  848         /* Flip the enable bit */
  849         cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
  850         cfg &= ~GEM_MAC_TX_ENABLE;
  851         bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
  852         bus_space_barrier(t, h, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  853         /* Wait for it to finish */
  854         return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
  855 }
  856 
  857 /*
  858  * Initialize interface.
  859  */
  860 int
  861 gem_meminit(struct gem_softc *sc)
  862 {
  863         struct gem_rxsoft *rxs;
  864         int i, error;
  865 
  866         /*
  867          * Initialize the transmit descriptor ring.
  868          */
  869         memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
  870         for (i = 0; i < GEM_NTXDESC; i++) {
  871                 sc->sc_txdescs[i].gd_flags = 0;
  872                 sc->sc_txdescs[i].gd_addr = 0;
  873         }
  874         GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
  875             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  876         sc->sc_txfree = GEM_NTXDESC-1;
  877         sc->sc_txnext = 0;
  878         sc->sc_txwin = 0;
  879 
  880         /*
  881          * Initialize the receive descriptor and receive job
  882          * descriptor rings.
  883          */
  884         for (i = 0; i < GEM_NRXDESC; i++) {
  885                 rxs = &sc->sc_rxsoft[i];
  886                 if (rxs->rxs_mbuf == NULL) {
  887                         if ((error = gem_add_rxbuf(sc, i)) != 0) {
  888                                 printf("%s: unable to allocate or map rx "
  889                                     "buffer %d, error = %d\n",
  890                                     sc->sc_dev.dv_xname, i, error);
  891                                 /*
  892                                  * XXX Should attempt to run with fewer receive
  893                                  * XXX buffers instead of just failing.
  894                                  */
  895                                 gem_rxdrain(sc);
  896                                 return (1);
  897                         }
  898                 } else
  899                         GEM_INIT_RXDESC(sc, i);
  900         }
  901         sc->sc_rxptr = 0;
  902         sc->sc_meminited = 1;
  903         GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
  904         GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
  905 
  906         return (0);
  907 }
  908 
  909 static int
  910 gem_ringsize(int sz)
  911 {
  912         switch (sz) {
  913         case 32:
  914                 return GEM_RING_SZ_32;
  915         case 64:
  916                 return GEM_RING_SZ_64;
  917         case 128:
  918                 return GEM_RING_SZ_128;
  919         case 256:
  920                 return GEM_RING_SZ_256;
  921         case 512:
  922                 return GEM_RING_SZ_512;
  923         case 1024:
  924                 return GEM_RING_SZ_1024;
  925         case 2048:
  926                 return GEM_RING_SZ_2048;
  927         case 4096:
  928                 return GEM_RING_SZ_4096;
  929         case 8192:
  930                 return GEM_RING_SZ_8192;
  931         default:
  932                 printf("gem: invalid Receive Descriptor ring size %d\n", sz);
  933                 return GEM_RING_SZ_32;
  934         }
  935 }
  936 
  937 
  938 /*
  939  * Start PCS
  940  */
  941 void
  942 gem_pcs_start(struct gem_softc *sc)
  943 {
  944         bus_space_tag_t t = sc->sc_bustag;
  945         bus_space_handle_t h = sc->sc_h1;
  946         uint32_t v;
  947 
  948 #ifdef GEM_DEBUG
  949         aprint_debug("%s: gem_pcs_start()\n", sc->sc_dev.dv_xname);
  950 #endif
  951 
  952         /*
  953          * Set up.  We must disable the MII before modifying the
  954          * GEM_MII_ANAR register
  955          */
  956         if (sc->sc_flags & GEM_SERDES) {
  957                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  958                     GEM_MII_DATAPATH_SERDES);
  959                 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
  960                     GEM_MII_SLINK_LOOPBACK);
  961         } else {
  962                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  963                     GEM_MII_DATAPATH_SERIAL);
  964                 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 0);
  965         }
  966         bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
  967         v = bus_space_read_4(t, h, GEM_MII_ANAR);
  968         v |= (GEM_MII_ANEG_SYM_PAUSE | GEM_MII_ANEG_ASYM_PAUSE);
  969         if (sc->sc_mii_media == IFM_AUTO)
  970                 v |= (GEM_MII_ANEG_FUL_DUPLX | GEM_MII_ANEG_HLF_DUPLX);
  971         else if (sc->sc_mii_media == IFM_FDX) {
  972                 v |= GEM_MII_ANEG_FUL_DUPLX;
  973                 v &= ~GEM_MII_ANEG_HLF_DUPLX;
  974         } else if (sc->sc_mii_media == IFM_HDX) {
  975                 v &= ~GEM_MII_ANEG_FUL_DUPLX;
  976                 v |= GEM_MII_ANEG_HLF_DUPLX;
  977         }
  978 
  979         /* Configure link. */
  980         bus_space_write_4(t, h, GEM_MII_ANAR, v);
  981         bus_space_write_4(t, h, GEM_MII_CONTROL,
  982             GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
  983         bus_space_write_4(t, h, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
  984         gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_ANEG_CPT);
  985 
  986         /* Start the 10 second timer */
  987         callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc);
  988 }
  989 
  990 /*
  991  * Stop PCS
  992  */
  993 void
  994 gem_pcs_stop(struct gem_softc *sc, int disable)
  995 {
  996         bus_space_tag_t t = sc->sc_bustag;
  997         bus_space_handle_t h = sc->sc_h1;
  998 
  999 #ifdef GEM_DEBUG
 1000         aprint_debug("%s: gem_pcs_stop()\n", sc->sc_dev.dv_xname);
 1001 #endif
 1002 
 1003         /* Tell link partner that we're going away */
 1004         bus_space_write_4(t, h, GEM_MII_ANAR, GEM_MII_ANEG_RF);
 1005 
 1006         /*
 1007          * Disable PCS MII.  The documentation suggests that setting
 1008          * GEM_MII_CONFIG_ENABLE to zero and then restarting auto-
 1009          * negotiation will shut down the link.  However, it appears
 1010          * that we also need to unset the datapath mode.
 1011          */
 1012         bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
 1013         bus_space_write_4(t, h, GEM_MII_CONTROL,
 1014             GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
 1015         bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII);
 1016         bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
 1017 
 1018         if (disable) {
 1019                 if (sc->sc_flags & GEM_SERDES)
 1020                         bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
 1021                                 GEM_MII_SLINK_POWER_OFF);
 1022                 else
 1023                         bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
 1024                             GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_POWER_OFF);
 1025         }
 1026 
 1027         sc->sc_flags &= ~GEM_LINK;
 1028         sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
 1029         sc->sc_mii.mii_media_status = IFM_AVALID;
 1030 }
 1031 
 1032 
 1033 /*
 1034  * Initialization of interface; set up initialization block
 1035  * and transmit/receive descriptor rings.
 1036  */
 1037 int
 1038 gem_init(struct ifnet *ifp)
 1039 {
 1040         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
 1041         bus_space_tag_t t = sc->sc_bustag;
 1042         bus_space_handle_t h = sc->sc_h1;
 1043         int s;
 1044         u_int max_frame_size;
 1045         u_int32_t v;
 1046 
 1047         s = splnet();
 1048 
 1049         DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
 1050         /*
 1051          * Initialization sequence. The numbered steps below correspond
 1052          * to the sequence outlined in section 6.3.5.1 in the Ethernet
 1053          * Channel Engine manual (part of the PCIO manual).
 1054          * See also the STP2002-STQ document from Sun Microsystems.
 1055          */
 1056 
 1057         /* step 1 & 2. Reset the Ethernet Channel */
 1058         gem_stop(ifp, 0);
 1059         gem_reset(sc);
 1060         DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
 1061 
 1062         /* Re-initialize the MIF */
 1063         gem_mifinit(sc);
 1064 
 1065         /* Set up correct datapath for non-SERDES/Serialink */
 1066         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 &&
 1067             sc->sc_variant != GEM_SUN_ERI)
 1068                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
 1069                     GEM_MII_DATAPATH_MII);
 1070 
 1071         /* Call MI reset function if any */
 1072         if (sc->sc_hwreset)
 1073                 (*sc->sc_hwreset)(sc);
 1074 
 1075         /* step 3. Setup data structures in host memory */
 1076         if (gem_meminit(sc) != 0)
 1077                 return 1;
 1078 
 1079         /* step 4. TX MAC registers & counters */
 1080         gem_init_regs(sc);
 1081         max_frame_size = max(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU);
 1082         max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN;
 1083         if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
 1084                 max_frame_size += ETHER_VLAN_ENCAP_LEN;
 1085         bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
 1086             max_frame_size|/* burst size */(0x2000<<16));
 1087 
 1088         /* step 5. RX MAC registers & counters */
 1089         gem_setladrf(sc);
 1090 
 1091         /* step 6 & 7. Program Descriptor Ring Base Addresses */
 1092         /* NOTE: we use only 32-bit DMA addresses here. */
 1093         bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0);
 1094         bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
 1095 
 1096         bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
 1097         bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
 1098 
 1099         /* step 8. Global Configuration & Interrupt Mask */
 1100         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
 1101                 v = GEM_INTR_PCS;
 1102         else
 1103                 v = GEM_INTR_MIF;
 1104         bus_space_write_4(t, h, GEM_INTMASK,
 1105                       ~(GEM_INTR_TX_INTME |
 1106                         GEM_INTR_TX_EMPTY |
 1107                         GEM_INTR_TX_MAC |
 1108                         GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF|
 1109                         GEM_INTR_RX_TAG_ERR | GEM_INTR_MAC_CONTROL|
 1110                         GEM_INTR_BERR | v));
 1111         bus_space_write_4(t, h, GEM_MAC_RX_MASK,
 1112                         GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
 1113         bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXX */
 1114         bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK,
 1115             GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
 1116 
 1117         /* step 9. ETX Configuration: use mostly default values */
 1118 
 1119         /* Enable TX DMA */
 1120         v = gem_ringsize(GEM_NTXDESC /*XXX*/);
 1121         bus_space_write_4(t, h, GEM_TX_CONFIG,
 1122                 v|GEM_TX_CONFIG_TXDMA_EN|
 1123                 ((0x4FF<<10)&GEM_TX_CONFIG_TXFIFO_TH));
 1124         bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext);
 1125 
 1126         /* step 10. ERX Configuration */
 1127         gem_rx_common(sc);
 1128 
 1129         /* step 11. Configure Media */
 1130         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)
 1131                 mii_mediachg(&sc->sc_mii);
 1132 
 1133         /* step 12. RX_MAC Configuration Register */
 1134         v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
 1135         v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
 1136         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
 1137 
 1138         /* step 14. Issue Transmit Pending command */
 1139 
 1140         /* Call MI initialization function if any */
 1141         if (sc->sc_hwinit)
 1142                 (*sc->sc_hwinit)(sc);
 1143 
 1144 
 1145         /* step 15.  Give the reciever a swift kick */
 1146         bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
 1147 
 1148         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
 1149                 /* Configure PCS */
 1150                 gem_pcs_start(sc);
 1151         else
 1152                 /* Start the one second timer. */
 1153                 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
 1154 
 1155         sc->sc_flags &= ~GEM_LINK;
 1156         ifp->if_flags |= IFF_RUNNING;
 1157         ifp->if_flags &= ~IFF_OACTIVE;
 1158         ifp->if_timer = 0;
 1159         sc->sc_if_flags = ifp->if_flags;
 1160 
 1161         splx(s);
 1162 
 1163         return (0);
 1164 }
 1165 
 1166 void
 1167 gem_init_regs(struct gem_softc *sc)
 1168 {
 1169         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1170         bus_space_tag_t t = sc->sc_bustag;
 1171         bus_space_handle_t h = sc->sc_h1;
 1172         const u_char *laddr = LLADDR(ifp->if_sadl);
 1173         u_int32_t v;
 1174 
 1175         /* These regs are not cleared on reset */
 1176         if (!sc->sc_inited) {
 1177 
 1178                 /* Load recommended values */
 1179                 bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
 1180                 bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
 1181                 bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
 1182 
 1183                 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
 1184                 /* Max frame and max burst size */
 1185                 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
 1186                     ETHER_MAX_LEN | (0x2000<<16));
 1187 
 1188                 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
 1189                 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
 1190                 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
 1191                 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
 1192                 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
 1193                     ((laddr[5]<<8)|laddr[4])&0x3ff);
 1194 
 1195                 /* Secondary MAC addr set to 0:0:0:0:0:0 */
 1196                 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
 1197                 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
 1198                 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
 1199 
 1200                 /* MAC control addr set to 01:80:c2:00:00:01 */
 1201                 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
 1202                 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
 1203                 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
 1204 
 1205                 /* MAC filter addr set to 0:0:0:0:0:0 */
 1206                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
 1207                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
 1208                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
 1209 
 1210                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
 1211                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
 1212 
 1213                 sc->sc_inited = 1;
 1214         }
 1215 
 1216         /* Counters need to be zeroed */
 1217         bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
 1218         bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
 1219         bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
 1220         bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
 1221         bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
 1222         bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
 1223         bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
 1224         bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
 1225         bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
 1226         bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
 1227         bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
 1228 
 1229         /* Set XOFF PAUSE time. */
 1230         bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
 1231 
 1232         /*
 1233          * Set the internal arbitration to "infinite" bursts of the
 1234          * maximum length of 31 * 64 bytes so DMA transfers aren't
 1235          * split up in cache line size chunks. This greatly improves
 1236          * especially RX performance.
 1237          * Enable silicon bug workarounds for the Apple variants.
 1238          */
 1239         bus_space_write_4(t, h, GEM_CONFIG,
 1240             GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
 1241             GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
 1242             GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
 1243 
 1244         /*
 1245          * Set the station address.
 1246          */
 1247         bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
 1248         bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
 1249         bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
 1250 
 1251         /*
 1252          * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
 1253          */
 1254         sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
 1255         v = GEM_MAC_XIF_TX_MII_ENA;
 1256         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)  {
 1257                 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
 1258                         v |= GEM_MAC_XIF_FDPLX_LED;
 1259                                 if (sc->sc_flags & GEM_GIGABIT)
 1260                                         v |= GEM_MAC_XIF_GMII_MODE;
 1261                 }
 1262         } else {
 1263                 v |= GEM_MAC_XIF_GMII_MODE;
 1264         }
 1265         bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
 1266 }
 1267 
 1268 #ifdef GEM_DEBUG
 1269 static void
 1270 gem_txsoft_print(const struct gem_softc *sc, int firstdesc, int lastdesc)
 1271 {
 1272         int i;
 1273 
 1274         for (i = firstdesc;; i = GEM_NEXTTX(i)) {
 1275                 printf("descriptor %d:\t", i);
 1276                 printf("gd_flags:   0x%016" PRIx64 "\t",
 1277                         GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
 1278                 printf("gd_addr: 0x%016" PRIx64 "\n",
 1279                         GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
 1280                 if (i == lastdesc)
 1281                         break;
 1282         }
 1283 }
 1284 #endif
 1285 
 1286 static void
 1287 gem_start(ifp)
 1288         struct ifnet *ifp;
 1289 {
 1290         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
 1291         struct mbuf *m0, *m;
 1292         struct gem_txsoft *txs;
 1293         bus_dmamap_t dmamap;
 1294         int error, firsttx, nexttx = -1, lasttx = -1, ofree, seg;
 1295         uint64_t flags = 0;
 1296 
 1297         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
 1298                 return;
 1299 
 1300         /*
 1301          * Remember the previous number of free descriptors and
 1302          * the first descriptor we'll use.
 1303          */
 1304         ofree = sc->sc_txfree;
 1305         firsttx = sc->sc_txnext;
 1306 
 1307         DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n",
 1308             sc->sc_dev.dv_xname, ofree, firsttx));
 1309 
 1310         /*
 1311          * Loop through the send queue, setting up transmit descriptors
 1312          * until we drain the queue, or use up all available transmit
 1313          * descriptors.
 1314          */
 1315         while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
 1316             sc->sc_txfree != 0) {
 1317                 /*
 1318                  * Grab a packet off the queue.
 1319                  */
 1320                 IFQ_POLL(&ifp->if_snd, m0);
 1321                 if (m0 == NULL)
 1322                         break;
 1323                 m = NULL;
 1324 
 1325                 dmamap = txs->txs_dmamap;
 1326 
 1327                 /*
 1328                  * Load the DMA map.  If this fails, the packet either
 1329                  * didn't fit in the alloted number of segments, or we were
 1330                  * short on resources.  In this case, we'll copy and try
 1331                  * again.
 1332                  */
 1333                 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0,
 1334                       BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0 ||
 1335                       (m0->m_pkthdr.len < ETHER_MIN_TX &&
 1336                        dmamap->dm_nsegs == GEM_NTXSEGS)) {
 1337                         if (m0->m_pkthdr.len > MCLBYTES) {
 1338                                 printf("%s: unable to allocate jumbo Tx "
 1339                                     "cluster\n", sc->sc_dev.dv_xname);
 1340                                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1341                                 m_freem(m0);
 1342                                 continue;
 1343                         }
 1344                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1345                         if (m == NULL) {
 1346                                 printf("%s: unable to allocate Tx mbuf\n",
 1347                                     sc->sc_dev.dv_xname);
 1348                                 break;
 1349                         }
 1350                         MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
 1351                         if (m0->m_pkthdr.len > MHLEN) {
 1352                                 MCLGET(m, M_DONTWAIT);
 1353                                 if ((m->m_flags & M_EXT) == 0) {
 1354                                         printf("%s: unable to allocate Tx "
 1355                                             "cluster\n", sc->sc_dev.dv_xname);
 1356                                         m_freem(m);
 1357                                         break;
 1358                                 }
 1359                         }
 1360                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
 1361                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
 1362                         error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap,
 1363                             m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
 1364                         if (error) {
 1365                                 printf("%s: unable to load Tx buffer, "
 1366                                     "error = %d\n", sc->sc_dev.dv_xname, error);
 1367                                 break;
 1368                         }
 1369                 }
 1370 
 1371                 /*
 1372                  * Ensure we have enough descriptors free to describe
 1373                  * the packet.
 1374                  */
 1375                 if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ?
 1376                      (sc->sc_txfree - 1) : sc->sc_txfree)) {
 1377                         /*
 1378                          * Not enough free descriptors to transmit this
 1379                          * packet.  We haven't committed to anything yet,
 1380                          * so just unload the DMA map, put the packet
 1381                          * back on the queue, and punt.  Notify the upper
 1382                          * layer that there are no more slots left.
 1383                          *
 1384                          * XXX We could allocate an mbuf and copy, but
 1385                          * XXX it is worth it?
 1386                          */
 1387                         ifp->if_flags |= IFF_OACTIVE;
 1388                         sc->sc_if_flags = ifp->if_flags;
 1389                         bus_dmamap_unload(sc->sc_dmatag, dmamap);
 1390                         if (m != NULL)
 1391                                 m_freem(m);
 1392                         break;
 1393                 }
 1394 
 1395                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1396                 if (m != NULL) {
 1397                         m_freem(m0);
 1398                         m0 = m;
 1399                 }
 1400 
 1401                 /*
 1402                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
 1403                  */
 1404 
 1405                 /* Sync the DMA map. */
 1406                 bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize,
 1407                     BUS_DMASYNC_PREWRITE);
 1408 
 1409                 /*
 1410                  * Initialize the transmit descriptors.
 1411                  */
 1412                 for (nexttx = sc->sc_txnext, seg = 0;
 1413                      seg < dmamap->dm_nsegs;
 1414                      seg++, nexttx = GEM_NEXTTX(nexttx)) {
 1415 
 1416                         /*
 1417                          * If this is the first descriptor we're
 1418                          * enqueueing, set the start of packet flag,
 1419                          * and the checksum stuff if we want the hardware
 1420                          * to do it.
 1421                          */
 1422                         sc->sc_txdescs[nexttx].gd_addr =
 1423                             GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr);
 1424                         flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE;
 1425                         if (nexttx == firsttx) {
 1426                                 flags |= GEM_TD_START_OF_PACKET;
 1427                                 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
 1428                                         sc->sc_txwin = 0;
 1429                                         flags |= GEM_TD_INTERRUPT_ME;
 1430                                 }
 1431 
 1432 #ifdef INET
 1433                                 /* h/w checksum */
 1434                                 if (ifp->if_csum_flags_tx & M_CSUM_TCPv4 &&
 1435                                     m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
 1436                                         struct ether_header *eh;
 1437                                         uint16_t offset, start;
 1438 
 1439                                         eh = mtod(m0, struct ether_header *);
 1440                                         switch (ntohs(eh->ether_type)) {
 1441                                         case ETHERTYPE_IP:
 1442                                                 start = ETHER_HDR_LEN;
 1443                                                 break;
 1444                                         case ETHERTYPE_VLAN:
 1445                                                 start = ETHER_HDR_LEN +
 1446                                                         ETHER_VLAN_ENCAP_LEN;
 1447                                                 break;
 1448                                         default:
 1449                                                 /* unsupported, drop it */
 1450                                                 m_free(m0);
 1451                                                 continue;
 1452                                         }
 1453                                         start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
 1454                                         offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start;
 1455                                         flags |= (start <<
 1456                                                   GEM_TD_CXSUM_STARTSHFT) |
 1457                                                  (offset <<
 1458                                                   GEM_TD_CXSUM_STUFFSHFT) |
 1459                                                  GEM_TD_CXSUM_ENABLE;
 1460                                 }
 1461 #endif
 1462                         }
 1463                         if (seg == dmamap->dm_nsegs - 1) {
 1464                                 flags |= GEM_TD_END_OF_PACKET;
 1465                         } else {
 1466                                 /* last flag set outside of loop */
 1467                                 sc->sc_txdescs[nexttx].gd_flags =
 1468                                         GEM_DMA_WRITE(sc, flags);
 1469                         }
 1470                         lasttx = nexttx;
 1471                 }
 1472                 if (m0->m_pkthdr.len < ETHER_MIN_TX) {
 1473                         /* add padding buffer at end of chain */
 1474                         flags &= ~GEM_TD_END_OF_PACKET;
 1475                         sc->sc_txdescs[lasttx].gd_flags =
 1476                             GEM_DMA_WRITE(sc, flags);
 1477 
 1478                         sc->sc_txdescs[nexttx].gd_addr =
 1479                             GEM_DMA_WRITE(sc,
 1480                             sc->sc_nulldmamap->dm_segs[0].ds_addr);
 1481                         flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) &
 1482                             GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET;
 1483                         lasttx = nexttx;
 1484                         nexttx = GEM_NEXTTX(nexttx);
 1485                         seg++;
 1486                 }
 1487                 sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags);
 1488 
 1489                 KASSERT(lasttx != -1);
 1490 
 1491                 /*
 1492                  * Store a pointer to the packet so we can free it later,
 1493                  * and remember what txdirty will be once the packet is
 1494                  * done.
 1495                  */
 1496                 txs->txs_mbuf = m0;
 1497                 txs->txs_firstdesc = sc->sc_txnext;
 1498                 txs->txs_lastdesc = lasttx;
 1499                 txs->txs_ndescs = seg;
 1500 
 1501 #ifdef GEM_DEBUG
 1502                 if (ifp->if_flags & IFF_DEBUG) {
 1503                         printf("     gem_start %p transmit chain:\n", txs);
 1504                         gem_txsoft_print(sc, txs->txs_firstdesc,
 1505                             txs->txs_lastdesc);
 1506                 }
 1507 #endif
 1508 
 1509                 /* Sync the descriptors we're using. */
 1510                 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
 1511                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1512 
 1513                 /* Advance the tx pointer. */
 1514                 sc->sc_txfree -= txs->txs_ndescs;
 1515                 sc->sc_txnext = nexttx;
 1516 
 1517                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
 1518                 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
 1519 
 1520 #if NBPFILTER > 0
 1521                 /*
 1522                  * Pass the packet to any BPF listeners.
 1523                  */
 1524                 if (ifp->if_bpf)
 1525                         bpf_mtap(ifp->if_bpf, m0);
 1526 #endif /* NBPFILTER > 0 */
 1527         }
 1528 
 1529         if (txs == NULL || sc->sc_txfree == 0) {
 1530                 /* No more slots left; notify upper layer. */
 1531                 ifp->if_flags |= IFF_OACTIVE;
 1532                 sc->sc_if_flags = ifp->if_flags;
 1533         }
 1534 
 1535         if (sc->sc_txfree != ofree) {
 1536                 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
 1537                     sc->sc_dev.dv_xname, lasttx, firsttx));
 1538                 /*
 1539                  * The entire packet chain is set up.
 1540                  * Kick the transmitter.
 1541                  */
 1542                 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n",
 1543                         sc->sc_dev.dv_xname, nexttx));
 1544                 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK,
 1545                         sc->sc_txnext);
 1546 
 1547                 /* Set a watchdog timer in case the chip flakes out. */
 1548                 ifp->if_timer = 5;
 1549                 DPRINTF(sc, ("%s: gem_start: watchdog %d\n",
 1550                         sc->sc_dev.dv_xname, ifp->if_timer));
 1551         }
 1552 }
 1553 
 1554 /*
 1555  * Transmit interrupt.
 1556  */
 1557 int
 1558 gem_tint(sc)
 1559         struct gem_softc *sc;
 1560 {
 1561         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1562         bus_space_tag_t t = sc->sc_bustag;
 1563         bus_space_handle_t mac = sc->sc_h1;
 1564         struct gem_txsoft *txs;
 1565         int txlast;
 1566         int progress = 0;
 1567         u_int32_t v;
 1568 
 1569         DPRINTF(sc, ("%s: gem_tint\n", sc->sc_dev.dv_xname));
 1570 
 1571         /* Unload collision counters ... */
 1572         v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
 1573             bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
 1574         ifp->if_collisions += v +
 1575             bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
 1576             bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
 1577         ifp->if_oerrors += v;
 1578 
 1579         /* ... then clear the hardware counters. */
 1580         bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
 1581         bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
 1582         bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
 1583         bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
 1584 
 1585         /*
 1586          * Go through our Tx list and free mbufs for those
 1587          * frames that have been transmitted.
 1588          */
 1589         while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
 1590                 /*
 1591                  * In theory, we could harvest some descriptors before
 1592                  * the ring is empty, but that's a bit complicated.
 1593                  *
 1594                  * GEM_TX_COMPLETION points to the last descriptor
 1595                  * processed +1.
 1596                  *
 1597                  * Let's assume that the NIC writes back to the Tx
 1598                  * descriptors before it updates the completion
 1599                  * register.  If the NIC has posted writes to the
 1600                  * Tx descriptors, PCI ordering requires that the
 1601                  * posted writes flush to RAM before the register-read
 1602                  * finishes.  So let's read the completion register,
 1603                  * before syncing the descriptors, so that we
 1604                  * examine Tx descriptors that are at least as
 1605                  * current as the completion register.
 1606                  */
 1607                 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION);
 1608                 DPRINTF(sc,
 1609                         ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n",
 1610                                 txs->txs_lastdesc, txlast));
 1611                 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
 1612                         if (txlast >= txs->txs_firstdesc &&
 1613                             txlast <= txs->txs_lastdesc)
 1614                                 break;
 1615                 } else if (txlast >= txs->txs_firstdesc ||
 1616                            txlast <= txs->txs_lastdesc)
 1617                         break;
 1618 
 1619                 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
 1620                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1621 
 1622 #ifdef GEM_DEBUG        /* XXX DMA synchronization? */
 1623                 if (ifp->if_flags & IFF_DEBUG) {
 1624                         printf("    txsoft %p transmit chain:\n", txs);
 1625                         gem_txsoft_print(sc, txs->txs_firstdesc,
 1626                             txs->txs_lastdesc);
 1627                 }
 1628 #endif
 1629 
 1630 
 1631                 DPRINTF(sc, ("gem_tint: releasing a desc\n"));
 1632                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
 1633 
 1634                 sc->sc_txfree += txs->txs_ndescs;
 1635 
 1636                 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap,
 1637                     0, txs->txs_dmamap->dm_mapsize,
 1638                     BUS_DMASYNC_POSTWRITE);
 1639                 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
 1640                 if (txs->txs_mbuf != NULL) {
 1641                         m_freem(txs->txs_mbuf);
 1642                         txs->txs_mbuf = NULL;
 1643                 }
 1644 
 1645                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
 1646 
 1647                 ifp->if_opackets++;
 1648                 progress = 1;
 1649         }
 1650 
 1651 #if 0
 1652         DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x "
 1653                 "GEM_TX_DATA_PTR %llx "
 1654                 "GEM_TX_COMPLETION %x\n",
 1655                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_STATE_MACHINE),
 1656                 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h1,
 1657                         GEM_TX_DATA_PTR_HI) << 32) |
 1658                              bus_space_read_4(sc->sc_bustag, sc->sc_h1,
 1659                         GEM_TX_DATA_PTR_LO),
 1660                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_COMPLETION)));
 1661 #endif
 1662 
 1663         if (progress) {
 1664                 if (sc->sc_txfree == GEM_NTXDESC - 1)
 1665                         sc->sc_txwin = 0;
 1666 
 1667                 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */
 1668                 ifp->if_flags &= ~IFF_OACTIVE;
 1669                 sc->sc_if_flags = ifp->if_flags;
 1670                 ifp->if_timer = SIMPLEQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
 1671                 gem_start(ifp);
 1672         }
 1673         DPRINTF(sc, ("%s: gem_tint: watchdog %d\n",
 1674                 sc->sc_dev.dv_xname, ifp->if_timer));
 1675 
 1676         return (1);
 1677 }
 1678 
 1679 /*
 1680  * Receive interrupt.
 1681  */
 1682 int
 1683 gem_rint(sc)
 1684         struct gem_softc *sc;
 1685 {
 1686         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1687         bus_space_tag_t t = sc->sc_bustag;
 1688         bus_space_handle_t h = sc->sc_h1;
 1689         struct gem_rxsoft *rxs;
 1690         struct mbuf *m;
 1691         u_int64_t rxstat;
 1692         u_int32_t rxcomp;
 1693         int i, len, progress = 0;
 1694 
 1695         DPRINTF(sc, ("%s: gem_rint\n", sc->sc_dev.dv_xname));
 1696 
 1697         /*
 1698          * Ignore spurious interrupt that sometimes occurs before
 1699          * we are set up when we network boot.
 1700          */
 1701         if (!sc->sc_meminited)
 1702                 return 1;
 1703 
 1704         /*
 1705          * Read the completion register once.  This limits
 1706          * how long the following loop can execute.
 1707          */
 1708         rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION);
 1709 
 1710         /*
 1711          * XXX Read the lastrx only once at the top for speed.
 1712          */
 1713         DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n",
 1714                 sc->sc_rxptr, rxcomp));
 1715 
 1716         /*
 1717          * Go into the loop at least once.
 1718          */
 1719         for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp;
 1720              i = GEM_NEXTRX(i)) {
 1721                 rxs = &sc->sc_rxsoft[i];
 1722 
 1723                 GEM_CDRXSYNC(sc, i,
 1724                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1725 
 1726                 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
 1727 
 1728                 if (rxstat & GEM_RD_OWN) {
 1729                         GEM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
 1730                         /*
 1731                          * We have processed all of the receive buffers.
 1732                          */
 1733                         break;
 1734                 }
 1735 
 1736                 progress++;
 1737                 ifp->if_ipackets++;
 1738 
 1739                 if (rxstat & GEM_RD_BAD_CRC) {
 1740                         ifp->if_ierrors++;
 1741                         printf("%s: receive error: CRC error\n",
 1742                                 sc->sc_dev.dv_xname);
 1743                         GEM_INIT_RXDESC(sc, i);
 1744                         continue;
 1745                 }
 1746 
 1747                 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1748                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 1749 #ifdef GEM_DEBUG
 1750                 if (ifp->if_flags & IFF_DEBUG) {
 1751                         printf("    rxsoft %p descriptor %d: ", rxs, i);
 1752                         printf("gd_flags: 0x%016llx\t", (long long)
 1753                                 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
 1754                         printf("gd_addr: 0x%016llx\n", (long long)
 1755                                 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
 1756                 }
 1757 #endif
 1758 
 1759                 /* No errors; receive the packet. */
 1760                 len = GEM_RD_BUFLEN(rxstat);
 1761 
 1762                 /*
 1763                  * Allocate a new mbuf cluster.  If that fails, we are
 1764                  * out of memory, and must drop the packet and recycle
 1765                  * the buffer that's already attached to this descriptor.
 1766                  */
 1767                 m = rxs->rxs_mbuf;
 1768                 if (gem_add_rxbuf(sc, i) != 0) {
 1769                         GEM_COUNTER_INCR(sc, sc_ev_rxnobuf);
 1770                         ifp->if_ierrors++;
 1771                         GEM_INIT_RXDESC(sc, i);
 1772                         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1773                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1774                         continue;
 1775                 }
 1776                 m->m_data += 2; /* We're already off by two */
 1777 
 1778                 m->m_pkthdr.rcvif = ifp;
 1779                 m->m_pkthdr.len = m->m_len = len;
 1780 
 1781 #if NBPFILTER > 0
 1782                 /*
 1783                  * Pass this up to any BPF listeners, but only
 1784                  * pass it up the stack if it's for us.
 1785                  */
 1786                 if (ifp->if_bpf)
 1787                         bpf_mtap(ifp->if_bpf, m);
 1788 #endif /* NBPFILTER > 0 */
 1789 
 1790 #ifdef INET
 1791                 /* hardware checksum */
 1792                 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
 1793                         struct ether_header *eh;
 1794                         struct ip *ip;
 1795                         int32_t hlen, pktlen;
 1796 
 1797                         if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) {
 1798                                 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN -
 1799                                          ETHER_VLAN_ENCAP_LEN;
 1800                                 eh = (struct ether_header *) (mtod(m, char *) +
 1801                                         ETHER_VLAN_ENCAP_LEN);
 1802                         } else {
 1803                                 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN;
 1804                                 eh = mtod(m, struct ether_header *);
 1805                         }
 1806                         if (ntohs(eh->ether_type) != ETHERTYPE_IP)
 1807                                 goto swcsum;
 1808                         ip = (struct ip *) ((caddr_t)eh + ETHER_HDR_LEN);
 1809 
 1810                         /* IPv4 only */
 1811                         if (ip->ip_v != IPVERSION)
 1812                                 goto swcsum;
 1813 
 1814                         hlen = ip->ip_hl << 2;
 1815                         if (hlen < sizeof(struct ip))
 1816                                 goto swcsum;
 1817 
 1818                         /*
 1819                          * bail if too short, has random trailing garbage,
 1820                          * truncated, fragment, or has ethernet pad.
 1821                          */
 1822                         if ((ntohs(ip->ip_len) < hlen) ||
 1823                             (ntohs(ip->ip_len) != pktlen) ||
 1824                             (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)))
 1825                                 goto swcsum;
 1826 
 1827                         switch (ip->ip_p) {
 1828                         case IPPROTO_TCP:
 1829                                 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
 1830                                         goto swcsum;
 1831                                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1832                                         goto swcsum;
 1833                                 m->m_pkthdr.csum_flags = M_CSUM_TCPv4;
 1834                                 break;
 1835                         case IPPROTO_UDP:
 1836                                 /* FALLTHROUGH */
 1837                         default:
 1838                                 goto swcsum;
 1839                         }
 1840 
 1841                         /* the uncomplemented sum is expected */
 1842                         m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM;
 1843 
 1844                         /* if the pkt had ip options, we have to deduct them */
 1845                         if (hlen > sizeof(struct ip)) {
 1846                                 uint16_t *opts;
 1847                                 uint32_t optsum, temp;
 1848 
 1849                                 optsum = 0;
 1850                                 temp = hlen - sizeof(struct ip);
 1851                                 opts = (uint16_t *) ((caddr_t) ip +
 1852                                         sizeof(struct ip));
 1853 
 1854                                 while (temp > 1) {
 1855                                         optsum += ntohs(*opts++);
 1856                                         temp -= 2;
 1857                                 }
 1858                                 while (optsum >> 16)
 1859                                         optsum = (optsum >> 16) +
 1860                                                  (optsum & 0xffff);
 1861 
 1862                                 /* Deduct ip opts sum from hwsum (rfc 1624). */
 1863                                 m->m_pkthdr.csum_data =
 1864                                         ~((~m->m_pkthdr.csum_data) - ~optsum);
 1865 
 1866                                 while (m->m_pkthdr.csum_data >> 16)
 1867                                         m->m_pkthdr.csum_data =
 1868                                                 (m->m_pkthdr.csum_data >> 16) +
 1869                                                 (m->m_pkthdr.csum_data &
 1870                                                  0xffff);
 1871                         }
 1872 
 1873                         m->m_pkthdr.csum_flags |= M_CSUM_DATA |
 1874                                                   M_CSUM_NO_PSEUDOHDR;
 1875                 } else
 1876 swcsum:
 1877                         m->m_pkthdr.csum_flags = 0;
 1878 #endif
 1879                 /* Pass it on. */
 1880                 (*ifp->if_input)(ifp, m);
 1881         }
 1882 
 1883         if (progress) {
 1884                 /* Update the receive pointer. */
 1885                 if (i == sc->sc_rxptr) {
 1886                         GEM_COUNTER_INCR(sc, sc_ev_rxfull);
 1887 #ifdef GEM_DEBUG
 1888                         if (ifp->if_flags & IFF_DEBUG)
 1889                                 printf("%s: rint: ring wrap\n",
 1890                                     sc->sc_dev.dv_xname);
 1891 #endif
 1892                 }
 1893                 sc->sc_rxptr = i;
 1894                 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i));
 1895         }
 1896 #ifdef GEM_COUNTERS
 1897         if (progress <= 4) {
 1898                 GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]);
 1899         } else if (progress < 32) {
 1900                 if (progress < 16)
 1901                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]);
 1902                 else
 1903                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]);
 1904 
 1905         } else {
 1906                 if (progress < 64)
 1907                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]);
 1908                 else
 1909                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]);
 1910         }
 1911 #endif
 1912 
 1913         DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
 1914                 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
 1915 
 1916         /* Read error counters ... */
 1917         ifp->if_ierrors +=
 1918             bus_space_read_4(t, h, GEM_MAC_RX_LEN_ERR_CNT) +
 1919             bus_space_read_4(t, h, GEM_MAC_RX_ALIGN_ERR) +
 1920             bus_space_read_4(t, h, GEM_MAC_RX_CRC_ERR_CNT) +
 1921             bus_space_read_4(t, h, GEM_MAC_RX_CODE_VIOL);
 1922 
 1923         /* ... then clear the hardware counters. */
 1924         bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
 1925         bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
 1926         bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
 1927         bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
 1928 
 1929         return (1);
 1930 }
 1931 
 1932 
 1933 /*
 1934  * gem_add_rxbuf:
 1935  *
 1936  *      Add a receive buffer to the indicated descriptor.
 1937  */
 1938 int
 1939 gem_add_rxbuf(struct gem_softc *sc, int idx)
 1940 {
 1941         struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
 1942         struct mbuf *m;
 1943         int error;
 1944 
 1945         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1946         if (m == NULL)
 1947                 return (ENOBUFS);
 1948 
 1949         MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
 1950         MCLGET(m, M_DONTWAIT);
 1951         if ((m->m_flags & M_EXT) == 0) {
 1952                 m_freem(m);
 1953                 return (ENOBUFS);
 1954         }
 1955 
 1956 #ifdef GEM_DEBUG
 1957 /* bzero the packet to check DMA */
 1958         memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
 1959 #endif
 1960 
 1961         if (rxs->rxs_mbuf != NULL)
 1962                 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
 1963 
 1964         rxs->rxs_mbuf = m;
 1965 
 1966         error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
 1967             m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
 1968             BUS_DMA_READ|BUS_DMA_NOWAIT);
 1969         if (error) {
 1970                 printf("%s: can't load rx DMA map %d, error = %d\n",
 1971                     sc->sc_dev.dv_xname, idx, error);
 1972                 panic("gem_add_rxbuf"); /* XXX */
 1973         }
 1974 
 1975         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1976             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1977 
 1978         GEM_INIT_RXDESC(sc, idx);
 1979 
 1980         return (0);
 1981 }
 1982 
 1983 
 1984 int
 1985 gem_eint(struct gem_softc *sc, u_int status)
 1986 {
 1987         char bits[128];
 1988         u_int32_t v;
 1989 
 1990         if ((status & GEM_INTR_MIF) != 0) {
 1991                 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
 1992                 return (1);
 1993         }
 1994 
 1995         if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
 1996                 gem_reset_rxdma(sc);
 1997                 return (1);
 1998         }
 1999 
 2000         if (status & GEM_INTR_BERR) {
 2001                 bus_space_read_4(sc->sc_bustag, sc->sc_h2, GEM_ERROR_STATUS);
 2002                 v = bus_space_read_4(sc->sc_bustag, sc->sc_h2,
 2003                     GEM_ERROR_STATUS);
 2004                 printf("%s: bus error interrupt: 0x%02x\n",
 2005                     sc->sc_dev.dv_xname, v);
 2006                 return (1);
 2007         }
 2008 
 2009         printf("%s: status=%s\n", sc->sc_dev.dv_xname,
 2010                 bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits)));
 2011         return (1);
 2012 }
 2013 
 2014 
 2015 /*
 2016  * PCS interrupts.
 2017  * We should receive these when the link status changes, but sometimes
 2018  * we don't receive them for link up.  We compensate for this in the
 2019  * gem_tick() callout.
 2020  */
 2021 int
 2022 gem_pint(struct gem_softc *sc)
 2023 {
 2024         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2025         bus_space_tag_t t = sc->sc_bustag;
 2026         bus_space_handle_t h = sc->sc_h1;
 2027         u_int32_t v, v2;
 2028 
 2029         /*
 2030          * Clear the PCS interrupt from GEM_STATUS.  The PCS register is
 2031          * latched, so we have to read it twice.  There is only one bit in
 2032          * use, so the value is meaningless.
 2033          */
 2034         bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
 2035         bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
 2036 
 2037         if ((ifp->if_flags & IFF_UP) == 0)
 2038                 return 1;
 2039 
 2040         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)
 2041                 return 1;
 2042 
 2043         v = bus_space_read_4(t, h, GEM_MII_STATUS);
 2044         /* If we see remote fault, our link partner is probably going away */
 2045         if ((v & GEM_MII_STATUS_REM_FLT) != 0) {
 2046                 gem_bitwait(sc, h, GEM_MII_STATUS, GEM_MII_STATUS_REM_FLT, 0);
 2047                 v = bus_space_read_4(t, h, GEM_MII_STATUS);
 2048         /* Otherwise, we may need to wait after auto-negotiation completes */
 2049         } else if ((v & (GEM_MII_STATUS_LINK_STS | GEM_MII_STATUS_ANEG_CPT)) ==
 2050             GEM_MII_STATUS_ANEG_CPT) {
 2051                 gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_LINK_STS);
 2052                 v = bus_space_read_4(t, h, GEM_MII_STATUS);
 2053         }
 2054         if ((v & GEM_MII_STATUS_LINK_STS) != 0) {
 2055                 if (sc->sc_flags & GEM_LINK) {
 2056                         return 1;
 2057                 }
 2058                 callout_stop(&sc->sc_tick_ch);
 2059                 v = bus_space_read_4(t, h, GEM_MII_ANAR);
 2060                 v2 = bus_space_read_4(t, h, GEM_MII_ANLPAR);
 2061                 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_1000_SX;
 2062                 sc->sc_mii.mii_media_status = IFM_AVALID | IFM_ACTIVE;
 2063                 v &= v2;
 2064                 if (v & GEM_MII_ANEG_FUL_DUPLX) {
 2065                         sc->sc_mii.mii_media_active |= IFM_FDX;
 2066 #ifdef GEM_DEBUG
 2067                         aprint_debug("%s: link up: full duplex\n",
 2068                             sc->sc_dev.dv_xname);
 2069 #endif
 2070                 } else if (v & GEM_MII_ANEG_HLF_DUPLX) {
 2071                         sc->sc_mii.mii_media_active |= IFM_HDX;
 2072 #ifdef GEM_DEBUG
 2073                         aprint_debug("%s: link up: half duplex\n",
 2074                             sc->sc_dev.dv_xname);
 2075 #endif
 2076                 } else {
 2077 #ifdef GEM_DEBUG
 2078                         aprint_debug("%s: duplex mismatch\n",
 2079                             sc->sc_dev.dv_xname);
 2080 #endif
 2081                 }
 2082                 gem_statuschange(sc);
 2083         } else {
 2084                 if ((sc->sc_flags & GEM_LINK) == 0) {
 2085                         return 1;
 2086                 }
 2087                 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
 2088                 sc->sc_mii.mii_media_status = IFM_AVALID;
 2089 #ifdef GEM_DEBUG
 2090                         aprint_debug("%s: link down\n",
 2091                             sc->sc_dev.dv_xname);
 2092 #endif
 2093                 gem_statuschange(sc);
 2094 
 2095                 /* Start the 10 second timer */
 2096                 callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc);
 2097         }
 2098         return 1;
 2099 }
 2100 
 2101 
 2102 
 2103 int
 2104 gem_intr(v)
 2105         void *v;
 2106 {
 2107         struct gem_softc *sc = (struct gem_softc *)v;
 2108         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2109         bus_space_tag_t t = sc->sc_bustag;
 2110         bus_space_handle_t h = sc->sc_h1;
 2111         u_int32_t status;
 2112         int r = 0;
 2113 #ifdef GEM_DEBUG
 2114         char bits[128];
 2115 #endif
 2116 
 2117         /* XXX We should probably mask out interrupts until we're done */
 2118 
 2119         sc->sc_ev_intr.ev_count++;
 2120 
 2121         status = bus_space_read_4(t, h, GEM_STATUS);
 2122         DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n",
 2123                 sc->sc_dev.dv_xname, (status >> 19),
 2124                 bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits))));
 2125 
 2126         if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
 2127                 r |= gem_eint(sc, status);
 2128 
 2129         /* We don't bother with GEM_INTR_TX_DONE */
 2130         if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) {
 2131                 GEM_COUNTER_INCR(sc, sc_ev_txint);
 2132                 r |= gem_tint(sc);
 2133         }
 2134 
 2135         if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) {
 2136                 GEM_COUNTER_INCR(sc, sc_ev_rxint);
 2137                 r |= gem_rint(sc);
 2138         }
 2139 
 2140         /* We should eventually do more than just print out error stats. */
 2141         if (status & GEM_INTR_TX_MAC) {
 2142                 int txstat = bus_space_read_4(t, h, GEM_MAC_TX_STATUS);
 2143                 if (txstat & ~GEM_MAC_TX_XMIT_DONE)
 2144                         printf("%s: MAC tx fault, status %x\n",
 2145                             sc->sc_dev.dv_xname, txstat);
 2146                 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
 2147                         gem_init(ifp);
 2148         }
 2149         if (status & GEM_INTR_RX_MAC) {
 2150                 int rxstat = bus_space_read_4(t, h, GEM_MAC_RX_STATUS);
 2151                 /*
 2152                  * At least with GEM_SUN_GEM and some GEM_SUN_ERI
 2153                  * revisions GEM_MAC_RX_OVERFLOW happen often due to a
 2154                  * silicon bug so handle them silently. Moreover, it's
 2155                  * likely that the receiver has hung so we reset it.
 2156                  */
 2157                 if (rxstat & GEM_MAC_RX_OVERFLOW) {
 2158                         ifp->if_ierrors++;
 2159                         gem_reset_rxdma(sc);
 2160                 } else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
 2161                         printf("%s: MAC rx fault, status 0x%02x\n",
 2162                             sc->sc_dev.dv_xname, rxstat);
 2163         }
 2164         if (status & GEM_INTR_PCS) {
 2165                 r |= gem_pint(sc);
 2166         }
 2167 
 2168 /* Do we need to do anything with these?
 2169         if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
 2170                 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS);
 2171                 if ((status2 & GEM_MAC_PAUSED) != 0)
 2172                         aprintf_debug("%s: PAUSE received (%d slots)\n",
 2173                             GEM_MAC_PAUSE_TIME(status2), sc->sc_dev.dv_xname);
 2174                 if ((status2 & GEM_MAC_PAUSE) != 0)
 2175                         aprintf_debug("%s: transited to PAUSE state\n",
 2176                             sc->sc_dev.dv_xname);
 2177                 if ((status2 & GEM_MAC_RESUME) != 0)
 2178                         aprintf_debug("%s: transited to non-PAUSE state\n",
 2179                             sc->sc_dev.dv_xname);
 2180         }
 2181         if ((status & GEM_INTR_MIF) != 0)
 2182                 aprintf_debug("%s: MIF interrupt\n", sc->sc_dev.dv_xname);
 2183 */
 2184 #if NRND > 0
 2185         rnd_add_uint32(&sc->rnd_source, status);
 2186 #endif
 2187         return (r);
 2188 }
 2189 
 2190 
 2191 void
 2192 gem_watchdog(ifp)
 2193         struct ifnet *ifp;
 2194 {
 2195         struct gem_softc *sc = ifp->if_softc;
 2196 
 2197         DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
 2198                 "GEM_MAC_RX_CONFIG %x\n",
 2199                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
 2200                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
 2201                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
 2202 
 2203         log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
 2204         ++ifp->if_oerrors;
 2205 
 2206         /* Try to get more packets going. */
 2207         gem_start(ifp);
 2208 }
 2209 
 2210 /*
 2211  * Initialize the MII Management Interface
 2212  */
 2213 void
 2214 gem_mifinit(sc)
 2215         struct gem_softc *sc;
 2216 {
 2217         bus_space_tag_t t = sc->sc_bustag;
 2218         bus_space_handle_t mif = sc->sc_h1;
 2219 
 2220         /* Configure the MIF in frame mode */
 2221         sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
 2222         sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
 2223         bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
 2224 }
 2225 
 2226 /*
 2227  * MII interface
 2228  *
 2229  * The GEM MII interface supports at least three different operating modes:
 2230  *
 2231  * Bitbang mode is implemented using data, clock and output enable registers.
 2232  *
 2233  * Frame mode is implemented by loading a complete frame into the frame
 2234  * register and polling the valid bit for completion.
 2235  *
 2236  * Polling mode uses the frame register but completion is indicated by
 2237  * an interrupt.
 2238  *
 2239  */
 2240 static int
 2241 gem_mii_readreg(self, phy, reg)
 2242         struct device *self;
 2243         int phy, reg;
 2244 {
 2245         struct gem_softc *sc = (void *)self;
 2246         bus_space_tag_t t = sc->sc_bustag;
 2247         bus_space_handle_t mif = sc->sc_h1;
 2248         int n;
 2249         u_int32_t v;
 2250 
 2251 #ifdef GEM_DEBUG1
 2252         if (sc->sc_debug)
 2253                 printf("gem_mii_readreg: PHY %d reg %d\n", phy, reg);
 2254 #endif
 2255 
 2256         /* Construct the frame command */
 2257         v = (reg << GEM_MIF_REG_SHIFT)  | (phy << GEM_MIF_PHY_SHIFT) |
 2258                 GEM_MIF_FRAME_READ;
 2259 
 2260         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 2261         for (n = 0; n < 100; n++) {
 2262                 DELAY(1);
 2263                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 2264                 if (v & GEM_MIF_FRAME_TA0)
 2265                         return (v & GEM_MIF_FRAME_DATA);
 2266         }
 2267 
 2268         printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
 2269         return (0);
 2270 }
 2271 
 2272 static void
 2273 gem_mii_writereg(self, phy, reg, val)
 2274         struct device *self;
 2275         int phy, reg, val;
 2276 {
 2277         struct gem_softc *sc = (void *)self;
 2278         bus_space_tag_t t = sc->sc_bustag;
 2279         bus_space_handle_t mif = sc->sc_h1;
 2280         int n;
 2281         u_int32_t v;
 2282 
 2283 #ifdef GEM_DEBUG1
 2284         if (sc->sc_debug)
 2285                 printf("gem_mii_writereg: PHY %d reg %d val %x\n",
 2286                         phy, reg, val);
 2287 #endif
 2288 
 2289         /* Construct the frame command */
 2290         v = GEM_MIF_FRAME_WRITE                 |
 2291             (phy << GEM_MIF_PHY_SHIFT)          |
 2292             (reg << GEM_MIF_REG_SHIFT)          |
 2293             (val & GEM_MIF_FRAME_DATA);
 2294 
 2295         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 2296         for (n = 0; n < 100; n++) {
 2297                 DELAY(1);
 2298                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 2299                 if (v & GEM_MIF_FRAME_TA0)
 2300                         return;
 2301         }
 2302 
 2303         printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
 2304 }
 2305 
 2306 static void
 2307 gem_mii_statchg(dev)
 2308         struct device *dev;
 2309 {
 2310         struct gem_softc *sc = (void *)dev;
 2311 #ifdef GEM_DEBUG
 2312         int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
 2313 #endif
 2314 
 2315 #ifdef GEM_DEBUG
 2316         if (sc->sc_debug)
 2317                 printf("gem_mii_statchg: status change: phy = %d\n",
 2318                         sc->sc_phys[instance]);
 2319 #endif
 2320         gem_statuschange(sc);
 2321 }
 2322 
 2323 /*
 2324  * Common status change for gem_mii_statchg() and gem_pint()
 2325  */
 2326 void
 2327 gem_statuschange(struct gem_softc* sc)
 2328 {
 2329         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2330         bus_space_tag_t t = sc->sc_bustag;
 2331         bus_space_handle_t mac = sc->sc_h1;
 2332         int gigabit;
 2333         u_int32_t rxcfg, txcfg, v;
 2334 
 2335         if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0 &&
 2336             IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE)
 2337                 sc->sc_flags |= GEM_LINK;
 2338         else
 2339                 sc->sc_flags &= ~GEM_LINK;
 2340 
 2341         if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
 2342                 gigabit = 1;
 2343         else
 2344                 gigabit = 0;
 2345 
 2346         /*
 2347          * The configuration done here corresponds to the steps F) and
 2348          * G) and as far as enabling of RX and TX MAC goes also step H)
 2349          * of the initialization sequence outlined in section 3.2.1 of
 2350          * the GEM Gigabit Ethernet ASIC Specification.
 2351          */
 2352 
 2353         rxcfg = bus_space_read_4(t, mac, GEM_MAC_RX_CONFIG);
 2354         rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
 2355         txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
 2356         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
 2357                 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
 2358         else if (gigabit) {
 2359                 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
 2360                 txcfg |= GEM_MAC_RX_CARR_EXTEND;
 2361         }
 2362         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
 2363         bus_space_barrier(t, mac, GEM_MAC_TX_CONFIG, 4,
 2364             BUS_SPACE_BARRIER_WRITE);
 2365         if (!gem_bitwait(sc, mac, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
 2366                 aprint_normal("%s: cannot disable TX MAC\n",
 2367                     sc->sc_dev.dv_xname);
 2368         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, txcfg);
 2369         bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 0);
 2370         bus_space_barrier(t, mac, GEM_MAC_RX_CONFIG, 4,
 2371             BUS_SPACE_BARRIER_WRITE);
 2372         if (!gem_bitwait(sc, mac, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
 2373                 aprint_normal("%s: cannot disable RX MAC\n",
 2374                     sc->sc_dev.dv_xname);
 2375         bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, rxcfg);
 2376 
 2377         v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG) &
 2378             ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
 2379         bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v);
 2380 
 2381         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) == 0 &&
 2382             gigabit != 0)
 2383                 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
 2384                     GEM_MAC_SLOT_TIME_CARR_EXTEND);
 2385         else
 2386                 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
 2387                     GEM_MAC_SLOT_TIME_NORMAL);
 2388 
 2389         /* XIF Configuration */
 2390         if (sc->sc_flags & GEM_LINK)
 2391                 v = GEM_MAC_XIF_LINK_LED;
 2392         else
 2393                 v = 0;
 2394         v |= GEM_MAC_XIF_TX_MII_ENA;
 2395 
 2396         /* If an external transceiver is connected, enable its MII drivers */
 2397         sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
 2398         if ((sc->sc_flags &(GEM_SERDES | GEM_SERIAL)) == 0) {
 2399                 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
 2400                         /* External MII needs echo disable if half duplex. */
 2401                         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) &
 2402                             IFM_FDX) != 0)
 2403                                 /* turn on full duplex LED */
 2404                                 v |= GEM_MAC_XIF_FDPLX_LED;
 2405                         else
 2406                                 /* half duplex -- disable echo */
 2407                                 v |= GEM_MAC_XIF_ECHO_DISABL;
 2408                         if (gigabit)
 2409                                 v |= GEM_MAC_XIF_GMII_MODE;
 2410                         else
 2411                                 v &= ~GEM_MAC_XIF_GMII_MODE;
 2412                 } else
 2413                         /* Internal MII needs buf enable */
 2414                         v |= GEM_MAC_XIF_MII_BUF_ENA;
 2415         } else {
 2416                 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
 2417                         v |= GEM_MAC_XIF_FDPLX_LED;
 2418                 v |= GEM_MAC_XIF_GMII_MODE;
 2419         }
 2420         bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
 2421 
 2422         if ((ifp->if_flags & IFF_RUNNING) != 0 &&
 2423             (sc->sc_flags & GEM_LINK) != 0) {
 2424                 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG,
 2425                     txcfg | GEM_MAC_TX_ENABLE);
 2426                 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG,
 2427                     rxcfg | GEM_MAC_RX_ENABLE);
 2428         }
 2429 }
 2430 
 2431 int
 2432 gem_mediachange(ifp)
 2433         struct ifnet *ifp;
 2434 {
 2435         struct gem_softc *sc = ifp->if_softc;
 2436         u_int s, t;
 2437 
 2438         if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
 2439                 return EINVAL;
 2440 
 2441         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) {
 2442                 s = IFM_SUBTYPE(sc->sc_media.ifm_media);
 2443                 if (s == IFM_AUTO) {
 2444                         if (sc->sc_mii_media != s) {
 2445 #ifdef GEM_DEBUG
 2446                                 aprint_debug("%s: setting media to auto\n",
 2447                                     sc->sc_dev.dv_xname);
 2448 #endif
 2449                                 sc->sc_mii_media = s;
 2450                                 if (ifp->if_flags & IFF_UP) {
 2451                                         gem_pcs_stop(sc, 0);
 2452                                         gem_pcs_start(sc);
 2453                                 }
 2454                         }
 2455                         return 0;
 2456                 }
 2457                 if (s == IFM_1000_SX) {
 2458                         t = IFM_OPTIONS(sc->sc_media.ifm_media);
 2459                         if (t == IFM_FDX || t == IFM_HDX) {
 2460                                 if (sc->sc_mii_media != t) {
 2461                                         sc->sc_mii_media = t;
 2462 #ifdef GEM_DEBUG
 2463                                         aprint_debug("%s:"
 2464                                             " setting media to 1000baseSX-%s\n",
 2465                                             sc->sc_dev.dv_xname,
 2466                                             t == IFM_FDX ? "FDX" : "HDX");
 2467 #endif
 2468                                         if (ifp->if_flags & IFF_UP) {
 2469                                                 gem_pcs_stop(sc, 0);
 2470                                                 gem_pcs_start(sc);
 2471                                         }
 2472                                 }
 2473                                 return 0;
 2474                         }
 2475                 }
 2476                 return EINVAL;
 2477         } else
 2478                 return (mii_mediachg(&sc->sc_mii));
 2479 }
 2480 
 2481 void
 2482 gem_mediastatus(ifp, ifmr)
 2483         struct ifnet *ifp;
 2484         struct ifmediareq *ifmr;
 2485 {
 2486         struct gem_softc *sc = ifp->if_softc;
 2487 
 2488         if ((ifp->if_flags & IFF_UP) == 0)
 2489                 return;
 2490 
 2491         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)
 2492                 mii_pollstat(&sc->sc_mii);
 2493         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 2494         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 2495 }
 2496 
 2497 /*
 2498  * Process an ioctl request.
 2499  */
 2500 int
 2501 gem_ioctl(ifp, cmd, data)
 2502         struct ifnet *ifp;
 2503         u_long cmd;
 2504         caddr_t data;
 2505 {
 2506         struct gem_softc *sc = ifp->if_softc;
 2507         struct ifreq *ifr = (struct ifreq *)data;
 2508         int s, error = 0;
 2509 
 2510         s = splnet();
 2511 
 2512         switch (cmd) {
 2513         case SIOCGIFMEDIA:
 2514         case SIOCSIFMEDIA:
 2515                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
 2516                 break;
 2517         case SIOCSIFFLAGS:
 2518 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
 2519                 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
 2520                     == (IFF_UP|IFF_RUNNING))
 2521                     && ((ifp->if_flags & (~RESETIGN))
 2522                     == (sc->sc_if_flags & (~RESETIGN)))) {
 2523                         gem_setladrf(sc);
 2524                         break;
 2525                 }
 2526 #undef RESETIGN
 2527                 /*FALLTHROUGH*/
 2528         default:
 2529                 error = ether_ioctl(ifp, cmd, data);
 2530                 if (error == ENETRESET) {
 2531                         /*
 2532                          * Multicast list has changed; set the hardware filter
 2533                          * accordingly.
 2534                          */
 2535                         if (ifp->if_flags & IFF_RUNNING)
 2536                                 gem_setladrf(sc);
 2537                         error = 0;
 2538                 }
 2539                 break;
 2540         }
 2541 
 2542         /* Try to get things going again */
 2543         if (ifp->if_flags & IFF_UP)
 2544                 gem_start(ifp);
 2545         splx(s);
 2546         return (error);
 2547 }
 2548 
 2549 
 2550 void
 2551 gem_shutdown(arg)
 2552         void *arg;
 2553 {
 2554         struct gem_softc *sc = (struct gem_softc *)arg;
 2555         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2556 
 2557         gem_stop(ifp, 1);
 2558 }
 2559 
 2560 /*
 2561  * Set up the logical address filter.
 2562  */
 2563 void
 2564 gem_setladrf(sc)
 2565         struct gem_softc *sc;
 2566 {
 2567         struct ethercom *ec = &sc->sc_ethercom;
 2568         struct ifnet *ifp = &ec->ec_if;
 2569         struct ether_multi *enm;
 2570         struct ether_multistep step;
 2571         bus_space_tag_t t = sc->sc_bustag;
 2572         bus_space_handle_t h = sc->sc_h1;
 2573         u_int32_t crc;
 2574         u_int32_t hash[16];
 2575         u_int32_t v;
 2576         int i;
 2577 
 2578         /* Get current RX configuration */
 2579         v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
 2580 
 2581         /*
 2582          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 2583          * and hash filter.  Depending on the case, the right bit will be
 2584          * enabled.
 2585          */
 2586         v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
 2587             GEM_MAC_RX_PROMISC_GRP);
 2588 
 2589         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 2590                 /* Turn on promiscuous mode */
 2591                 v |= GEM_MAC_RX_PROMISCUOUS;
 2592                 ifp->if_flags |= IFF_ALLMULTI;
 2593                 goto chipit;
 2594         }
 2595 
 2596         /*
 2597          * Set up multicast address filter by passing all multicast addresses
 2598          * through a crc generator, and then using the high order 8 bits as an
 2599          * index into the 256 bit logical address filter.  The high order 4
 2600          * bits selects the word, while the other 4 bits select the bit within
 2601          * the word (where bit 0 is the MSB).
 2602          */
 2603 
 2604         /* Clear hash table */
 2605         memset(hash, 0, sizeof(hash));
 2606 
 2607         ETHER_FIRST_MULTI(step, ec, enm);
 2608         while (enm != NULL) {
 2609                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 2610                         /*
 2611                          * We must listen to a range of multicast addresses.
 2612                          * For now, just accept all multicasts, rather than
 2613                          * trying to set only those filter bits needed to match
 2614                          * the range.  (At this time, the only use of address
 2615                          * ranges is for IP multicast routing, for which the
 2616                          * range is big enough to require all bits set.)
 2617                          * XXX should use the address filters for this
 2618                          */
 2619                         ifp->if_flags |= IFF_ALLMULTI;
 2620                         v |= GEM_MAC_RX_PROMISC_GRP;
 2621                         goto chipit;
 2622                 }
 2623 
 2624                 /* Get the LE CRC32 of the address */
 2625                 crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo));
 2626 
 2627                 /* Just want the 8 most significant bits. */
 2628                 crc >>= 24;
 2629 
 2630                 /* Set the corresponding bit in the filter. */
 2631                 hash[crc >> 4] |= 1 << (15 - (crc & 15));
 2632 
 2633                 ETHER_NEXT_MULTI(step, enm);
 2634         }
 2635 
 2636         v |= GEM_MAC_RX_HASH_FILTER;
 2637         ifp->if_flags &= ~IFF_ALLMULTI;
 2638 
 2639         /* Now load the hash table into the chip (if we are using it) */
 2640         for (i = 0; i < 16; i++) {
 2641                 bus_space_write_4(t, h,
 2642                     GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
 2643                     hash[i]);
 2644         }
 2645 
 2646 chipit:
 2647         sc->sc_if_flags = ifp->if_flags;
 2648         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
 2649 }
 2650 
 2651 #if notyet
 2652 
 2653 /*
 2654  * gem_power:
 2655  *
 2656  *      Power management (suspend/resume) hook.
 2657  */
 2658 void
 2659 gem_power(why, arg)
 2660         int why;
 2661         void *arg;
 2662 {
 2663         struct gem_softc *sc = arg;
 2664         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2665         int s;
 2666 
 2667         s = splnet();
 2668         switch (why) {
 2669         case PWR_SUSPEND:
 2670         case PWR_STANDBY:
 2671                 gem_stop(ifp, 1);
 2672                 if (sc->sc_power != NULL)
 2673                         (*sc->sc_power)(sc, why);
 2674                 break;
 2675         case PWR_RESUME:
 2676                 if (ifp->if_flags & IFF_UP) {
 2677                         if (sc->sc_power != NULL)
 2678                                 (*sc->sc_power)(sc, why);
 2679                         gem_init(ifp);
 2680                 }
 2681                 break;
 2682         case PWR_SOFTSUSPEND:
 2683         case PWR_SOFTSTANDBY:
 2684         case PWR_SOFTRESUME:
 2685                 break;
 2686         }
 2687         splx(s);
 2688 }
 2689 #endif

Cache object: 88cca656b1581eb41cd65502ce9505de


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.