The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/gem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: gem.c,v 1.127 2022/07/12 22:08:17 bluhm Exp $ */
    2 /*      $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */
    3 
    4 /*
    5  *
    6  * Copyright (C) 2001 Eduardo Horvath.
    7  * All rights reserved.
    8  *
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  */
   32 
   33 /*
   34  * Driver for Sun GEM ethernet controllers.
   35  */
   36 
   37 #include "bpfilter.h"
   38 
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/timeout.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/syslog.h>
   44 #include <sys/malloc.h>
   45 #include <sys/kernel.h>
   46 #include <sys/socket.h>
   47 #include <sys/ioctl.h>
   48 #include <sys/errno.h>
   49 #include <sys/device.h>
   50 #include <sys/endian.h>
   51 #include <sys/atomic.h>
   52 
   53 #include <net/if.h>
   54 #include <net/if_media.h>
   55 
   56 #include <netinet/in.h>
   57 #include <netinet/if_ether.h>
   58 
   59 #if NBPFILTER > 0
   60 #include <net/bpf.h>
   61 #endif
   62 
   63 #include <machine/bus.h>
   64 #include <machine/intr.h>
   65 
   66 #include <dev/mii/mii.h>
   67 #include <dev/mii/miivar.h>
   68 
   69 #include <dev/ic/gemreg.h>
   70 #include <dev/ic/gemvar.h>
   71 
   72 #define TRIES   10000
   73 
   74 struct cfdriver gem_cd = {
   75         NULL, "gem", DV_IFNET
   76 };
   77 
   78 void            gem_start(struct ifqueue *);
   79 void            gem_stop(struct ifnet *, int);
   80 int             gem_ioctl(struct ifnet *, u_long, caddr_t);
   81 void            gem_tick(void *);
   82 void            gem_watchdog(struct ifnet *);
   83 int             gem_init(struct ifnet *);
   84 void            gem_init_regs(struct gem_softc *);
   85 int             gem_ringsize(int);
   86 int             gem_meminit(struct gem_softc *);
   87 void            gem_mifinit(struct gem_softc *);
   88 int             gem_bitwait(struct gem_softc *, bus_space_handle_t, int,
   89                     u_int32_t, u_int32_t);
   90 void            gem_reset(struct gem_softc *);
   91 int             gem_reset_rx(struct gem_softc *);
   92 int             gem_reset_tx(struct gem_softc *);
   93 int             gem_disable_rx(struct gem_softc *);
   94 int             gem_disable_tx(struct gem_softc *);
   95 void            gem_rx_watchdog(void *);
   96 void            gem_rxdrain(struct gem_softc *);
   97 void            gem_fill_rx_ring(struct gem_softc *);
   98 int             gem_add_rxbuf(struct gem_softc *, int idx);
   99 int             gem_load_mbuf(struct gem_softc *, struct gem_sxd *,
  100                     struct mbuf *);
  101 void            gem_iff(struct gem_softc *);
  102 
  103 /* MII methods & callbacks */
  104 int             gem_mii_readreg(struct device *, int, int);
  105 void            gem_mii_writereg(struct device *, int, int, int);
  106 void            gem_mii_statchg(struct device *);
  107 int             gem_pcs_readreg(struct device *, int, int);
  108 void            gem_pcs_writereg(struct device *, int, int, int);
  109 
  110 int             gem_mediachange(struct ifnet *);
  111 void            gem_mediastatus(struct ifnet *, struct ifmediareq *);
  112 
  113 int             gem_eint(struct gem_softc *, u_int);
  114 int             gem_rint(struct gem_softc *);
  115 int             gem_tint(struct gem_softc *, u_int32_t);
  116 int             gem_pint(struct gem_softc *);
  117 
  118 #ifdef GEM_DEBUG
  119 #define DPRINTF(sc, x)  if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
  120                                 printf x
  121 #else
  122 #define DPRINTF(sc, x)  /* nothing */
  123 #endif
  124 
  125 /*
  126  * Attach a Gem interface to the system.
  127  */
  128 void
  129 gem_config(struct gem_softc *sc)
  130 {
  131         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  132         struct mii_data *mii = &sc->sc_mii;
  133         struct mii_softc *child;
  134         int i, error, mii_flags, phyad;
  135         struct ifmedia_entry *ifm;
  136 
  137         /* Make sure the chip is stopped. */
  138         ifp->if_softc = sc;
  139         gem_reset(sc);
  140 
  141         /*
  142          * Allocate the control data structures, and create and load the
  143          * DMA map for it.
  144          */
  145         if ((error = bus_dmamem_alloc(sc->sc_dmatag,
  146             sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
  147             1, &sc->sc_cdnseg, 0)) != 0) {
  148                 printf("\n%s: unable to allocate control data, error = %d\n",
  149                     sc->sc_dev.dv_xname, error);
  150                 goto fail_0;
  151         }
  152 
  153         /* XXX should map this in with correct endianness */
  154         if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
  155             sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
  156             BUS_DMA_COHERENT)) != 0) {
  157                 printf("\n%s: unable to map control data, error = %d\n",
  158                     sc->sc_dev.dv_xname, error);
  159                 goto fail_1;
  160         }
  161 
  162         if ((error = bus_dmamap_create(sc->sc_dmatag,
  163             sizeof(struct gem_control_data), 1,
  164             sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
  165                 printf("\n%s: unable to create control data DMA map, "
  166                     "error = %d\n", sc->sc_dev.dv_xname, error);
  167                 goto fail_2;
  168         }
  169 
  170         if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
  171             sc->sc_control_data, sizeof(struct gem_control_data), NULL,
  172             0)) != 0) {
  173                 printf("\n%s: unable to load control data DMA map, error = %d\n",
  174                     sc->sc_dev.dv_xname, error);
  175                 goto fail_3;
  176         }
  177 
  178         /*
  179          * Create the receive buffer DMA maps.
  180          */
  181         for (i = 0; i < GEM_NRXDESC; i++) {
  182                 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
  183                     MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  184                         printf("\n%s: unable to create rx DMA map %d, "
  185                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  186                         goto fail_5;
  187                 }
  188                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  189         }
  190         /*
  191          * Create the transmit buffer DMA maps.
  192          */
  193         for (i = 0; i < GEM_NTXDESC; i++) {
  194                 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
  195                     GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
  196                     &sc->sc_txd[i].sd_map)) != 0) {
  197                         printf("\n%s: unable to create tx DMA map %d, "
  198                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  199                         goto fail_6;
  200                 }
  201                 sc->sc_txd[i].sd_mbuf = NULL;
  202         }
  203 
  204         /*
  205          * From this point forward, the attachment cannot fail.  A failure
  206          * before this point releases all resources that may have been
  207          * allocated.
  208          */
  209 
  210         /* Announce ourselves. */
  211         printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
  212 
  213         /* Get RX FIFO size */
  214         sc->sc_rxfifosize = 64 *
  215             bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE);
  216 
  217         /* Initialize ifnet structure. */
  218         strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
  219         ifp->if_softc = sc;
  220         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  221         ifp->if_xflags = IFXF_MPSAFE;
  222         ifp->if_qstart = gem_start;
  223         ifp->if_ioctl = gem_ioctl;
  224         ifp->if_watchdog = gem_watchdog;
  225         ifq_set_maxlen(&ifp->if_snd, GEM_NTXDESC - 1);
  226 
  227         ifp->if_capabilities = IFCAP_VLAN_MTU;
  228 
  229         /* Initialize ifmedia structures and MII info */
  230         mii->mii_ifp = ifp;
  231         mii->mii_readreg = gem_mii_readreg;
  232         mii->mii_writereg = gem_mii_writereg;
  233         mii->mii_statchg = gem_mii_statchg;
  234 
  235         ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus);
  236 
  237         /* Bad things will happen if we touch this register on ERI. */
  238         if (sc->sc_variant != GEM_SUN_ERI)
  239                 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
  240                     GEM_MII_DATAPATH_MODE, 0);
  241 
  242         gem_mifinit(sc);
  243 
  244         mii_flags = MIIF_DOPAUSE;
  245 
  246         /* 
  247          * Look for an external PHY.
  248          */
  249         if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
  250                 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
  251                 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
  252                     GEM_MIF_CONFIG, sc->sc_mif_config);
  253 
  254                 switch (sc->sc_variant) {
  255                 case GEM_SUN_ERI:
  256                         phyad = GEM_PHYAD_EXTERNAL;
  257                         break;
  258                 default:
  259                         phyad = MII_PHY_ANY;
  260                         break;
  261                 }
  262 
  263                 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
  264                     MII_OFFSET_ANY, mii_flags);
  265         }
  266 
  267         /*
  268          * Fall back on an internal PHY if no external PHY was found.
  269          * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
  270          * trusted when the firmware has powered down the chip
  271          */
  272         child = LIST_FIRST(&mii->mii_phys);
  273         if (child == NULL &&
  274             (sc->sc_mif_config & GEM_MIF_CONFIG_MDI0 || GEM_IS_APPLE(sc))) {
  275                 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
  276                 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
  277                     GEM_MIF_CONFIG, sc->sc_mif_config);
  278 
  279                 switch (sc->sc_variant) {
  280                 case GEM_SUN_ERI:
  281                 case GEM_APPLE_K2_GMAC:
  282                         phyad = GEM_PHYAD_INTERNAL;
  283                         break;
  284                 case GEM_APPLE_GMAC:
  285                         phyad = GEM_PHYAD_EXTERNAL;
  286                         break;
  287                 default:
  288                         phyad = MII_PHY_ANY;
  289                         break;
  290                 }
  291 
  292                 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
  293                     MII_OFFSET_ANY, mii_flags);
  294         }
  295 
  296         /* 
  297          * Try the external PCS SERDES if we didn't find any MII
  298          * devices.
  299          */
  300         child = LIST_FIRST(&mii->mii_phys);
  301         if (child == NULL && sc->sc_variant != GEM_SUN_ERI) {
  302                 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
  303                     GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES);
  304 
  305                 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
  306                     GEM_MII_SLINK_CONTROL,
  307                     GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
  308 
  309                 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
  310                      GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
  311 
  312                 mii->mii_readreg = gem_pcs_readreg;
  313                 mii->mii_writereg = gem_pcs_writereg;
  314 
  315                 mii_flags |= MIIF_NOISOLATE;
  316 
  317                 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
  318                     MII_OFFSET_ANY, mii_flags);
  319         }
  320 
  321         child = LIST_FIRST(&mii->mii_phys);
  322         if (child == NULL) {
  323                 /* No PHY attached */
  324                 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
  325                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
  326         } else {
  327                 /*
  328                  * XXX - we can really do the following ONLY if the
  329                  * phy indeed has the auto negotiation capability!!
  330                  */
  331                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
  332         }
  333 
  334         /* Check if we support GigE media. */
  335         mtx_enter(&ifmedia_mtx);
  336         TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
  337                 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
  338                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
  339                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
  340                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
  341                         sc->sc_flags |= GEM_GIGABIT;
  342                         break;
  343                 }
  344         }
  345         mtx_leave(&ifmedia_mtx);
  346 
  347         /* Attach the interface. */
  348         if_attach(ifp);
  349         ether_ifattach(ifp);
  350 
  351         timeout_set(&sc->sc_tick_ch, gem_tick, sc);
  352         timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc);
  353         return;
  354 
  355         /*
  356          * Free any resources we've allocated during the failed attach
  357          * attempt.  Do this in reverse order and fall through.
  358          */
  359  fail_6:
  360         for (i = 0; i < GEM_NTXDESC; i++) {
  361                 if (sc->sc_txd[i].sd_map != NULL)
  362                         bus_dmamap_destroy(sc->sc_dmatag,
  363                             sc->sc_txd[i].sd_map);
  364         }
  365  fail_5:
  366         for (i = 0; i < GEM_NRXDESC; i++) {
  367                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  368                         bus_dmamap_destroy(sc->sc_dmatag,
  369                             sc->sc_rxsoft[i].rxs_dmamap);
  370         }
  371         bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
  372  fail_3:
  373         bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
  374  fail_2:
  375         bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
  376             sizeof(struct gem_control_data));
  377  fail_1:
  378         bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
  379  fail_0:
  380         return;
  381 }
  382 
  383 void
  384 gem_unconfig(struct gem_softc *sc)
  385 {
  386         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  387         int i;
  388 
  389         gem_stop(ifp, 1);
  390 
  391         for (i = 0; i < GEM_NTXDESC; i++) {
  392                 if (sc->sc_txd[i].sd_map != NULL)
  393                         bus_dmamap_destroy(sc->sc_dmatag,
  394                             sc->sc_txd[i].sd_map);
  395         }
  396         for (i = 0; i < GEM_NRXDESC; i++) {
  397                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  398                         bus_dmamap_destroy(sc->sc_dmatag,
  399                             sc->sc_rxsoft[i].rxs_dmamap);
  400         }
  401         bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
  402         bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
  403         bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
  404             sizeof(struct gem_control_data));
  405         bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
  406 
  407         /* Detach all PHYs */
  408         mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
  409 
  410         /* Delete all remaining media. */
  411         ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
  412 
  413         ether_ifdetach(ifp);
  414         if_detach(ifp);
  415 }
  416 
  417 
  418 void
  419 gem_tick(void *arg)
  420 {
  421         struct gem_softc *sc = arg;
  422         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  423         bus_space_tag_t t = sc->sc_bustag;
  424         bus_space_handle_t mac = sc->sc_h1;
  425         int s;
  426         u_int32_t v;
  427 
  428         s = splnet();
  429         /* unload collisions counters */
  430         v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
  431             bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
  432         ifp->if_collisions += v +
  433             bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
  434             bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
  435         ifp->if_oerrors += v;
  436 
  437         /* read error counters */
  438         ifp->if_ierrors +=
  439             bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) +
  440             bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) +
  441             bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) +
  442             bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL);
  443 
  444         /* clear the hardware counters */
  445         bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
  446         bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
  447         bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
  448         bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
  449         bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0);
  450         bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0);
  451         bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0);
  452         bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0);
  453 
  454         /*
  455          * If buffer allocation fails, the receive ring may become
  456          * empty. There is no receive interrupt to recover from that.
  457          */
  458         if (if_rxr_inuse(&sc->sc_rx_ring) == 0) {
  459                 gem_fill_rx_ring(sc);
  460                 bus_space_write_4(t, mac, GEM_RX_KICK, sc->sc_rx_prod);
  461         }
  462 
  463         mii_tick(&sc->sc_mii);
  464         splx(s);
  465 
  466         timeout_add_sec(&sc->sc_tick_ch, 1);
  467 }
  468 
  469 int
  470 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r,
  471    u_int32_t clr, u_int32_t set)
  472 {
  473         int i;
  474         u_int32_t reg;
  475 
  476         for (i = TRIES; i--; DELAY(100)) {
  477                 reg = bus_space_read_4(sc->sc_bustag, h, r);
  478                 if ((reg & clr) == 0 && (reg & set) == set)
  479                         return (1);
  480         }
  481 
  482         return (0);
  483 }
  484 
  485 void
  486 gem_reset(struct gem_softc *sc)
  487 {
  488         bus_space_tag_t t = sc->sc_bustag;
  489         bus_space_handle_t h = sc->sc_h2;
  490         int s;
  491 
  492         s = splnet();
  493         DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
  494         gem_reset_rx(sc);
  495         gem_reset_tx(sc);
  496 
  497         /* Do a full reset */
  498         bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
  499         if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
  500                 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
  501         splx(s);
  502 }
  503 
  504 
  505 /*
  506  * Drain the receive queue.
  507  */
  508 void
  509 gem_rxdrain(struct gem_softc *sc)
  510 {
  511         struct gem_rxsoft *rxs;
  512         int i;
  513 
  514         for (i = 0; i < GEM_NRXDESC; i++) {
  515                 rxs = &sc->sc_rxsoft[i];
  516                 if (rxs->rxs_mbuf != NULL) {
  517                         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
  518                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
  519                         bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
  520                         m_freem(rxs->rxs_mbuf);
  521                         rxs->rxs_mbuf = NULL;
  522                 }
  523         }
  524         sc->sc_rx_prod = sc->sc_rx_cons = 0;
  525 }
  526 
  527 /*
  528  * Reset the whole thing.
  529  */
  530 void
  531 gem_stop(struct ifnet *ifp, int softonly)
  532 {
  533         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
  534         struct gem_sxd *sd;
  535         u_int32_t i;
  536 
  537         DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
  538 
  539         timeout_del(&sc->sc_tick_ch);
  540 
  541         /*
  542          * Mark the interface down and cancel the watchdog timer.
  543          */
  544         ifp->if_flags &= ~IFF_RUNNING;
  545         ifq_clr_oactive(&ifp->if_snd);
  546         ifp->if_timer = 0;
  547 
  548         if (!softonly) {
  549                 mii_down(&sc->sc_mii);
  550 
  551                 gem_reset_rx(sc);
  552                 gem_reset_tx(sc);
  553         }
  554 
  555         intr_barrier(sc->sc_ih);
  556         ifq_barrier(&ifp->if_snd);
  557 
  558         KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
  559 
  560         /*
  561          * Release any queued transmit buffers.
  562          */
  563         for (i = 0; i < GEM_NTXDESC; i++) {
  564                 sd = &sc->sc_txd[i];
  565                 if (sd->sd_mbuf != NULL) {
  566                         bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
  567                             sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
  568                         bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
  569                         m_freem(sd->sd_mbuf);
  570                         sd->sd_mbuf = NULL;
  571                 }
  572         }
  573         sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
  574 
  575         gem_rxdrain(sc);
  576 }
  577 
  578 
  579 /*
  580  * Reset the receiver
  581  */
  582 int
  583 gem_reset_rx(struct gem_softc *sc)
  584 {
  585         bus_space_tag_t t = sc->sc_bustag;
  586         bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
  587 
  588         /*
  589          * Resetting while DMA is in progress can cause a bus hang, so we
  590          * disable DMA first.
  591          */
  592         gem_disable_rx(sc);
  593         bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
  594         /* Wait till it finishes */
  595         if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
  596                 printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
  597         /* Wait 5ms extra. */
  598         delay(5000);
  599 
  600         /* Finally, reset the ERX */
  601         bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
  602         /* Wait till it finishes */
  603         if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
  604                 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
  605                 return (1);
  606         }
  607         return (0);
  608 }
  609 
  610 
  611 /*
  612  * Reset the transmitter
  613  */
  614 int
  615 gem_reset_tx(struct gem_softc *sc)
  616 {
  617         bus_space_tag_t t = sc->sc_bustag;
  618         bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
  619 
  620         /*
  621          * Resetting while DMA is in progress can cause a bus hang, so we
  622          * disable DMA first.
  623          */
  624         gem_disable_tx(sc);
  625         bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
  626         /* Wait till it finishes */
  627         if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
  628                 printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
  629         /* Wait 5ms extra. */
  630         delay(5000);
  631 
  632         /* Finally, reset the ETX */
  633         bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
  634         /* Wait till it finishes */
  635         if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
  636                 printf("%s: cannot reset transmitter\n",
  637                         sc->sc_dev.dv_xname);
  638                 return (1);
  639         }
  640         return (0);
  641 }
  642 
  643 /*
  644  * Disable receiver.
  645  */
  646 int
  647 gem_disable_rx(struct gem_softc *sc)
  648 {
  649         bus_space_tag_t t = sc->sc_bustag;
  650         bus_space_handle_t h = sc->sc_h1;
  651         u_int32_t cfg;
  652 
  653         /* Flip the enable bit */
  654         cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
  655         cfg &= ~GEM_MAC_RX_ENABLE;
  656         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
  657 
  658         /* Wait for it to finish */
  659         return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
  660 }
  661 
  662 /*
  663  * Disable transmitter.
  664  */
  665 int
  666 gem_disable_tx(struct gem_softc *sc)
  667 {
  668         bus_space_tag_t t = sc->sc_bustag;
  669         bus_space_handle_t h = sc->sc_h1;
  670         u_int32_t cfg;
  671 
  672         /* Flip the enable bit */
  673         cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
  674         cfg &= ~GEM_MAC_TX_ENABLE;
  675         bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
  676 
  677         /* Wait for it to finish */
  678         return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
  679 }
  680 
  681 /*
  682  * Initialize interface.
  683  */
  684 int
  685 gem_meminit(struct gem_softc *sc)
  686 {
  687         int i;
  688 
  689         /*
  690          * Initialize the transmit descriptor ring.
  691          */
  692         for (i = 0; i < GEM_NTXDESC; i++) {
  693                 sc->sc_txdescs[i].gd_flags = 0;
  694                 sc->sc_txdescs[i].gd_addr = 0;
  695         }
  696         GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
  697             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  698 
  699         /*
  700          * Initialize the receive descriptor and receive job
  701          * descriptor rings.
  702          */
  703         for (i = 0; i < GEM_NRXDESC; i++) {
  704                 sc->sc_rxdescs[i].gd_flags = 0;
  705                 sc->sc_rxdescs[i].gd_addr = 0;
  706         }
  707         /* Hardware reads RX descriptors in multiples of four. */
  708         if_rxr_init(&sc->sc_rx_ring, 4, GEM_NRXDESC - 4);
  709         gem_fill_rx_ring(sc);
  710 
  711         return (0);
  712 }
  713 
  714 int
  715 gem_ringsize(int sz)
  716 {
  717         switch (sz) {
  718         case 32:
  719                 return GEM_RING_SZ_32;
  720         case 64:
  721                 return GEM_RING_SZ_64;
  722         case 128:
  723                 return GEM_RING_SZ_128;
  724         case 256:
  725                 return GEM_RING_SZ_256;
  726         case 512:
  727                 return GEM_RING_SZ_512;
  728         case 1024:
  729                 return GEM_RING_SZ_1024;
  730         case 2048:
  731                 return GEM_RING_SZ_2048;
  732         case 4096:
  733                 return GEM_RING_SZ_4096;
  734         case 8192:
  735                 return GEM_RING_SZ_8192;
  736         default:
  737                 printf("gem: invalid Receive Descriptor ring size %d\n", sz);
  738                 return GEM_RING_SZ_32;
  739         }
  740 }
  741 
  742 /*
  743  * Initialization of interface; set up initialization block
  744  * and transmit/receive descriptor rings.
  745  */
  746 int
  747 gem_init(struct ifnet *ifp)
  748 {
  749 
  750         struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
  751         bus_space_tag_t t = sc->sc_bustag;
  752         bus_space_handle_t h = sc->sc_h1;
  753         int s;
  754         u_int32_t v;
  755 
  756         s = splnet();
  757 
  758         DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
  759         /*
  760          * Initialization sequence. The numbered steps below correspond
  761          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  762          * Channel Engine manual (part of the PCIO manual).
  763          * See also the STP2002-STQ document from Sun Microsystems.
  764          */
  765 
  766         /* step 1 & 2. Reset the Ethernet Channel */
  767         gem_stop(ifp, 0);
  768         gem_reset(sc);
  769         DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
  770 
  771         /* Re-initialize the MIF */
  772         gem_mifinit(sc);
  773 
  774         /* Call MI reset function if any */
  775         if (sc->sc_hwreset)
  776                 (*sc->sc_hwreset)(sc);
  777 
  778         /* step 3. Setup data structures in host memory */
  779         gem_meminit(sc);
  780 
  781         /* step 4. TX MAC registers & counters */
  782         gem_init_regs(sc);
  783 
  784         /* step 5. RX MAC registers & counters */
  785         gem_iff(sc);
  786 
  787         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  788         bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 
  789             (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32));
  790         bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
  791 
  792         bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 
  793             (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32));
  794         bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
  795 
  796         /* step 8. Global Configuration & Interrupt Mask */
  797         bus_space_write_4(t, h, GEM_INTMASK,
  798                       ~(GEM_INTR_TX_INTME|
  799                         GEM_INTR_TX_EMPTY|
  800                         GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
  801                         GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
  802                         GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
  803                         GEM_INTR_BERR));
  804         bus_space_write_4(t, h, GEM_MAC_RX_MASK,
  805             GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
  806         bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
  807         bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
  808 
  809         /* step 9. ETX Configuration: use mostly default values */
  810 
  811         /* Enable DMA */
  812         v = gem_ringsize(GEM_NTXDESC /*XXX*/);
  813         v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x04ff) << 10) &
  814             GEM_TX_CONFIG_TXFIFO_TH;
  815         bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
  816         bus_space_write_4(t, h, GEM_TX_KICK, 0);
  817 
  818         /* step 10. ERX Configuration */
  819 
  820         /* Encode Receive Descriptor ring size: four possible values */
  821         v = gem_ringsize(GEM_NRXDESC /*XXX*/);
  822         /* Enable DMA */
  823         bus_space_write_4(t, h, GEM_RX_CONFIG, 
  824                 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
  825                 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
  826                 (0<<GEM_RX_CONFIG_CXM_START_SHFT));
  827         /*
  828          * The following value is for an OFF Threshold of about 3/4 full
  829          * and an ON Threshold of 1/4 full.
  830          */
  831         bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
  832             (3 * sc->sc_rxfifosize / 256) |
  833             ((sc->sc_rxfifosize / 256) << 12));
  834         bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6);
  835 
  836         /* step 11. Configure Media */
  837         mii_mediachg(&sc->sc_mii);
  838 
  839         /* step 12. RX_MAC Configuration Register */
  840         v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
  841         v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
  842         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
  843 
  844         /* step 14. Issue Transmit Pending command */
  845 
  846         /* Call MI initialization function if any */
  847         if (sc->sc_hwinit)
  848                 (*sc->sc_hwinit)(sc);
  849 
  850         /* step 15.  Give the receiver a swift kick */
  851         bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
  852 
  853         /* Start the one second timer. */
  854         timeout_add_sec(&sc->sc_tick_ch, 1);
  855 
  856         ifp->if_flags |= IFF_RUNNING;
  857         ifq_clr_oactive(&ifp->if_snd);
  858 
  859         splx(s);
  860 
  861         return (0);
  862 }
  863 
  864 void
  865 gem_init_regs(struct gem_softc *sc)
  866 {
  867         bus_space_tag_t t = sc->sc_bustag;
  868         bus_space_handle_t h = sc->sc_h1;
  869         u_int32_t v;
  870 
  871         /* These regs are not cleared on reset */
  872         sc->sc_inited = 0;
  873         if (!sc->sc_inited) {
  874                 /* Load recommended values */
  875                 bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
  876                 bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
  877                 bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
  878 
  879                 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
  880                 /* Max frame and max burst size */
  881                 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
  882                     (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
  883 
  884                 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
  885                 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
  886                 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
  887                 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
  888                 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
  889                     ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
  890 
  891                 /* Secondary MAC addr set to 0:0:0:0:0:0 */
  892                 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
  893                 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
  894                 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
  895 
  896                 /* MAC control addr set to 0:1:c2:0:1:80 */
  897                 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
  898                 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
  899                 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
  900 
  901                 /* MAC filter addr set to 0:0:0:0:0:0 */
  902                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
  903                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
  904                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
  905 
  906                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
  907                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
  908 
  909                 sc->sc_inited = 1;
  910         }
  911 
  912         /* Counters need to be zeroed */
  913         bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
  914         bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
  915         bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
  916         bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
  917         bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
  918         bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
  919         bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
  920         bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
  921         bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
  922         bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
  923         bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
  924 
  925         /* Set XOFF PAUSE time */
  926         bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1bf0);
  927 
  928         /*
  929          * Set the internal arbitration to "infinite" bursts of the
  930          * maximum length of 31 * 64 bytes so DMA transfers aren't
  931          * split up in cache line size chunks. This greatly improves
  932          * especially RX performance.
  933          * Enable silicon bug workarounds for the Apple variants.
  934          */
  935         v = GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT;
  936         if (sc->sc_pci)
  937                 v |= GEM_CONFIG_BURST_INF;
  938         else
  939                 v |= GEM_CONFIG_BURST_64;
  940         if (sc->sc_variant != GEM_SUN_GEM && sc->sc_variant != GEM_SUN_ERI)
  941                 v |= GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX;
  942         bus_space_write_4(t, h, GEM_CONFIG, v);
  943 
  944         /*
  945          * Set the station address.
  946          */
  947         bus_space_write_4(t, h, GEM_MAC_ADDR0, 
  948                 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
  949         bus_space_write_4(t, h, GEM_MAC_ADDR1, 
  950                 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
  951         bus_space_write_4(t, h, GEM_MAC_ADDR2, 
  952                 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
  953 }
  954 
  955 /*
  956  * Receive interrupt.
  957  */
  958 int
  959 gem_rint(struct gem_softc *sc)
  960 {
  961         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  962         bus_space_tag_t t = sc->sc_bustag;
  963         bus_space_handle_t h = sc->sc_h1;
  964         struct gem_rxsoft *rxs;
  965         struct mbuf_list ml = MBUF_LIST_INITIALIZER();
  966         struct mbuf *m;
  967         u_int64_t rxstat;
  968         int i, len;
  969 
  970         if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
  971                 return (0);
  972 
  973         for (i = sc->sc_rx_cons; if_rxr_inuse(&sc->sc_rx_ring) > 0;
  974             i = GEM_NEXTRX(i)) {
  975                 rxs = &sc->sc_rxsoft[i];
  976 
  977                 GEM_CDRXSYNC(sc, i,
  978                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  979 
  980                 rxstat = GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags);
  981 
  982                 if (rxstat & GEM_RD_OWN) {
  983                         /* We have processed all of the receive buffers. */
  984                         break;
  985                 }
  986 
  987                 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
  988                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
  989                 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
  990 
  991                 m = rxs->rxs_mbuf;
  992                 rxs->rxs_mbuf = NULL;
  993 
  994                 if_rxr_put(&sc->sc_rx_ring, 1);
  995 
  996                 if (rxstat & GEM_RD_BAD_CRC) {
  997                         ifp->if_ierrors++;
  998 #ifdef GEM_DEBUG
  999                         printf("%s: receive error: CRC error\n",
 1000                                 sc->sc_dev.dv_xname);
 1001 #endif
 1002                         m_freem(m);
 1003                         continue;
 1004                 }
 1005 
 1006 #ifdef GEM_DEBUG
 1007                 if (ifp->if_flags & IFF_DEBUG) {
 1008                         printf("    rxsoft %p descriptor %d: ", rxs, i);
 1009                         printf("gd_flags: 0x%016llx\t", (long long)
 1010                                 GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags));
 1011                         printf("gd_addr: 0x%016llx\n", (long long)
 1012                                 GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_addr));
 1013                 }
 1014 #endif
 1015 
 1016                 /* No errors; receive the packet. */
 1017                 len = GEM_RD_BUFLEN(rxstat);
 1018 
 1019                 m->m_data += 2; /* We're already off by two */
 1020                 m->m_pkthdr.len = m->m_len = len;
 1021 
 1022                 ml_enqueue(&ml, m);
 1023         }
 1024 
 1025         if (ifiq_input(&ifp->if_rcv, &ml))
 1026                 if_rxr_livelocked(&sc->sc_rx_ring);
 1027 
 1028         /* Update the receive pointer. */
 1029         sc->sc_rx_cons = i;
 1030         gem_fill_rx_ring(sc);
 1031         bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
 1032 
 1033         DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n",
 1034                 sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
 1035 
 1036         return (1);
 1037 }
 1038 
 1039 void
 1040 gem_fill_rx_ring(struct gem_softc *sc)
 1041 {
 1042         u_int slots;
 1043 
 1044         for (slots = if_rxr_get(&sc->sc_rx_ring, GEM_NRXDESC - 4);
 1045             slots > 0; slots--) {
 1046                 if (gem_add_rxbuf(sc, sc->sc_rx_prod))
 1047                         break;
 1048         }
 1049         if_rxr_put(&sc->sc_rx_ring, slots);
 1050 }
 1051 
 1052 /*
 1053  * Add a receive buffer to the indicated descriptor.
 1054  */
 1055 int
 1056 gem_add_rxbuf(struct gem_softc *sc, int idx)
 1057 {
 1058         struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
 1059         struct mbuf *m;
 1060         int error;
 1061 
 1062         m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
 1063         if (!m)
 1064                 return (ENOBUFS);
 1065         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1066 
 1067 #ifdef GEM_DEBUG
 1068 /* bzero the packet to check dma */
 1069         memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
 1070 #endif
 1071 
 1072         rxs->rxs_mbuf = m;
 1073 
 1074         error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m,
 1075             BUS_DMA_READ|BUS_DMA_NOWAIT);
 1076         if (error) {
 1077                 printf("%s: can't load rx DMA map %d, error = %d\n",
 1078                     sc->sc_dev.dv_xname, idx, error);
 1079                 panic("gem_add_rxbuf"); /* XXX */
 1080         }
 1081 
 1082         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1083             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1084 
 1085         GEM_INIT_RXDESC(sc, idx);
 1086 
 1087         sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod);
 1088 
 1089         return (0);
 1090 }
 1091 
 1092 int
 1093 gem_eint(struct gem_softc *sc, u_int status)
 1094 {
 1095         if ((status & GEM_INTR_MIF) != 0) {
 1096 #ifdef GEM_DEBUG
 1097                 printf("%s: link status changed\n", sc->sc_dev.dv_xname);
 1098 #endif
 1099                 return (1);
 1100         }
 1101 
 1102         printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS);
 1103         return (1);
 1104 }
 1105 
 1106 int
 1107 gem_pint(struct gem_softc *sc)
 1108 {
 1109         bus_space_tag_t t = sc->sc_bustag;
 1110         bus_space_handle_t seb = sc->sc_h1;
 1111         u_int32_t status;
 1112 
 1113         status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
 1114         status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
 1115 #ifdef GEM_DEBUG
 1116         if (status)
 1117                 printf("%s: link status changed\n", sc->sc_dev.dv_xname);
 1118 #endif
 1119         return (1);
 1120 }
 1121 
 1122 int
 1123 gem_intr(void *v)
 1124 {
 1125         struct gem_softc *sc = (struct gem_softc *)v;
 1126         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1127         bus_space_tag_t t = sc->sc_bustag;
 1128         bus_space_handle_t seb = sc->sc_h1;
 1129         u_int32_t status;
 1130         int r = 0;
 1131 
 1132         status = bus_space_read_4(t, seb, GEM_STATUS);
 1133         DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n",
 1134                 sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS));
 1135 
 1136         if (status == 0xffffffff)
 1137                 return (0);
 1138 
 1139         if ((status & GEM_INTR_PCS) != 0)
 1140                 r |= gem_pint(sc);
 1141 
 1142         if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
 1143                 r |= gem_eint(sc, status);
 1144 
 1145         if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
 1146                 r |= gem_tint(sc, status);
 1147 
 1148         if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
 1149                 r |= gem_rint(sc);
 1150 
 1151         /* We should eventually do more than just print out error stats. */
 1152         if (status & GEM_INTR_TX_MAC) {
 1153                 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
 1154 #ifdef GEM_DEBUG
 1155                 if (txstat & ~GEM_MAC_TX_XMIT_DONE)
 1156                         printf("%s: MAC tx fault, status %x\n",
 1157                             sc->sc_dev.dv_xname, txstat);
 1158 #endif
 1159                 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) {
 1160                         KERNEL_LOCK();
 1161                         gem_init(ifp);
 1162                         KERNEL_UNLOCK();
 1163                 }
 1164         }
 1165         if (status & GEM_INTR_RX_MAC) {
 1166                 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
 1167 #ifdef GEM_DEBUG
 1168                 if (rxstat & ~GEM_MAC_RX_DONE)
 1169                         printf("%s: MAC rx fault, status %x\n",
 1170                             sc->sc_dev.dv_xname, rxstat);
 1171 #endif
 1172                 if (rxstat & GEM_MAC_RX_OVERFLOW) {
 1173                         ifp->if_ierrors++;
 1174 
 1175                         /*
 1176                          * Apparently a silicon bug causes ERI to hang
 1177                          * from time to time.  So if we detect an RX
 1178                          * FIFO overflow, we fire off a timer, and
 1179                          * check whether we're still making progress
 1180                          * by looking at the RX FIFO write and read
 1181                          * pointers.
 1182                          */
 1183                         sc->sc_rx_fifo_wr_ptr =
 1184                                 bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR);
 1185                         sc->sc_rx_fifo_rd_ptr =
 1186                                 bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR);
 1187                         timeout_add_msec(&sc->sc_rx_watchdog, 400);
 1188                 }
 1189 #ifdef GEM_DEBUG
 1190                 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
 1191                         printf("%s: MAC rx fault, status %x\n",
 1192                             sc->sc_dev.dv_xname, rxstat);
 1193 #endif
 1194         }
 1195         return (r);
 1196 }
 1197 
 1198 void
 1199 gem_rx_watchdog(void *arg)
 1200 {
 1201         struct gem_softc *sc = arg;
 1202         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1203         bus_space_tag_t t = sc->sc_bustag;
 1204         bus_space_handle_t h = sc->sc_h1;
 1205         u_int32_t rx_fifo_wr_ptr;
 1206         u_int32_t rx_fifo_rd_ptr;
 1207         u_int32_t state;
 1208 
 1209         if ((ifp->if_flags & IFF_RUNNING) == 0)
 1210                 return;
 1211 
 1212         rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR);
 1213         rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR);
 1214         state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE);
 1215         if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW) {
 1216                 if ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) ||
 1217                      ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) &&
 1218                       (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr))) {
 1219                         /*
 1220                          * The RX state machine is still in overflow state and
 1221                          * the RX FIFO write and read pointers seem to be
 1222                          * stuck.  Whack the chip over the head to get things
 1223                          * going again.
 1224                          */
 1225                         gem_init(ifp);
 1226                 } else {
 1227                         /*
 1228                          * We made some progress, but is not certain that the
 1229                          * overflow condition has been resolved.  Check again.
 1230                          */
 1231                         sc->sc_rx_fifo_wr_ptr = rx_fifo_wr_ptr;
 1232                         sc->sc_rx_fifo_rd_ptr = rx_fifo_rd_ptr;
 1233                         timeout_add_msec(&sc->sc_rx_watchdog, 400);
 1234                 }
 1235         }
 1236 }
 1237 
 1238 void
 1239 gem_watchdog(struct ifnet *ifp)
 1240 {
 1241         struct gem_softc *sc = ifp->if_softc;
 1242 
 1243         DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
 1244                 "GEM_MAC_RX_CONFIG %x\n",
 1245                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
 1246                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
 1247                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
 1248 
 1249         log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
 1250         ++ifp->if_oerrors;
 1251 
 1252         /* Try to get more packets going. */
 1253         gem_init(ifp);
 1254 }
 1255 
 1256 /*
 1257  * Initialize the MII Management Interface
 1258  */
 1259 void
 1260 gem_mifinit(struct gem_softc *sc)
 1261 {
 1262         bus_space_tag_t t = sc->sc_bustag;
 1263         bus_space_handle_t mif = sc->sc_h1;
 1264 
 1265         /* Configure the MIF in frame mode */
 1266         sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
 1267         sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
 1268         bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
 1269 }
 1270 
 1271 /*
 1272  * MII interface
 1273  *
 1274  * The GEM MII interface supports at least three different operating modes:
 1275  *
 1276  * Bitbang mode is implemented using data, clock and output enable registers.
 1277  *
 1278  * Frame mode is implemented by loading a complete frame into the frame
 1279  * register and polling the valid bit for completion.
 1280  *
 1281  * Polling mode uses the frame register but completion is indicated by
 1282  * an interrupt.
 1283  *
 1284  */
 1285 int
 1286 gem_mii_readreg(struct device *self, int phy, int reg)
 1287 {
 1288         struct gem_softc *sc = (void *)self;
 1289         bus_space_tag_t t = sc->sc_bustag;
 1290         bus_space_handle_t mif = sc->sc_h1;
 1291         int n;
 1292         u_int32_t v;
 1293 
 1294 #ifdef GEM_DEBUG
 1295         if (sc->sc_debug)
 1296                 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
 1297 #endif
 1298 
 1299         /* Construct the frame command */
 1300         v = (reg << GEM_MIF_REG_SHIFT)  | (phy << GEM_MIF_PHY_SHIFT) |
 1301                 GEM_MIF_FRAME_READ;
 1302 
 1303         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 1304         for (n = 0; n < 100; n++) {
 1305                 DELAY(1);
 1306                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 1307                 if (v & GEM_MIF_FRAME_TA0)
 1308                         return (v & GEM_MIF_FRAME_DATA);
 1309         }
 1310 
 1311         printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
 1312         return (0);
 1313 }
 1314 
 1315 void
 1316 gem_mii_writereg(struct device *self, int phy, int reg, int val)
 1317 {
 1318         struct gem_softc *sc = (void *)self;
 1319         bus_space_tag_t t = sc->sc_bustag;
 1320         bus_space_handle_t mif = sc->sc_h1;
 1321         int n;
 1322         u_int32_t v;
 1323 
 1324 #ifdef GEM_DEBUG
 1325         if (sc->sc_debug)
 1326                 printf("gem_mii_writereg: phy %d reg %d val %x\n",
 1327                         phy, reg, val);
 1328 #endif
 1329 
 1330         /* Construct the frame command */
 1331         v = GEM_MIF_FRAME_WRITE                 |
 1332             (phy << GEM_MIF_PHY_SHIFT)          |
 1333             (reg << GEM_MIF_REG_SHIFT)          |
 1334             (val & GEM_MIF_FRAME_DATA);
 1335 
 1336         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 1337         for (n = 0; n < 100; n++) {
 1338                 DELAY(1);
 1339                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 1340                 if (v & GEM_MIF_FRAME_TA0)
 1341                         return;
 1342         }
 1343 
 1344         printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
 1345 }
 1346 
 1347 void
 1348 gem_mii_statchg(struct device *dev)
 1349 {
 1350         struct gem_softc *sc = (void *)dev;
 1351 #ifdef GEM_DEBUG
 1352         uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
 1353 #endif
 1354         bus_space_tag_t t = sc->sc_bustag;
 1355         bus_space_handle_t mac = sc->sc_h1;
 1356         u_int32_t v;
 1357 
 1358 #ifdef GEM_DEBUG
 1359         if (sc->sc_debug)
 1360                 printf("gem_mii_statchg: status change: phy = %lld\n", instance);
 1361 #endif
 1362 
 1363         /* Set tx full duplex options */
 1364         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
 1365         delay(10000); /* reg must be cleared and delay before changing. */
 1366         v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
 1367                 GEM_MAC_TX_ENABLE;
 1368         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
 1369                 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
 1370         }
 1371         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
 1372 
 1373         /* XIF Configuration */
 1374         v = GEM_MAC_XIF_TX_MII_ENA;
 1375         v |= GEM_MAC_XIF_LINK_LED;
 1376 
 1377         /* External MII needs echo disable if half duplex. */
 1378         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
 1379                 /* turn on full duplex LED */
 1380                 v |= GEM_MAC_XIF_FDPLX_LED;
 1381         else
 1382                 /* half duplex -- disable echo */
 1383                 v |= GEM_MAC_XIF_ECHO_DISABL;
 1384 
 1385         switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
 1386         case IFM_1000_T:  /* Gigabit using GMII interface */
 1387         case IFM_1000_SX:
 1388                 v |= GEM_MAC_XIF_GMII_MODE;
 1389                 break;
 1390         default:
 1391                 v &= ~GEM_MAC_XIF_GMII_MODE;
 1392         }
 1393         bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
 1394 
 1395         /*
 1396          * 802.3x flow control
 1397          */
 1398         v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG);
 1399         v &= ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
 1400         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_RXPAUSE) != 0)
 1401                 v |= GEM_MAC_CC_RX_PAUSE;
 1402         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_TXPAUSE) != 0)
 1403                 v |= GEM_MAC_CC_TX_PAUSE;
 1404         bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v);
 1405 }
 1406 
 1407 int
 1408 gem_pcs_readreg(struct device *self, int phy, int reg)
 1409 {
 1410         struct gem_softc *sc = (void *)self;
 1411         bus_space_tag_t t = sc->sc_bustag;
 1412         bus_space_handle_t pcs = sc->sc_h1;
 1413 
 1414 #ifdef GEM_DEBUG
 1415         if (sc->sc_debug)
 1416                 printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg);
 1417 #endif
 1418 
 1419         if (phy != GEM_PHYAD_EXTERNAL)
 1420                 return (0);
 1421 
 1422         switch (reg) {
 1423         case MII_BMCR:
 1424                 reg = GEM_MII_CONTROL;
 1425                 break;
 1426         case MII_BMSR:
 1427                 reg = GEM_MII_STATUS;
 1428                 break;
 1429         case MII_ANAR:
 1430                 reg = GEM_MII_ANAR;
 1431                 break;
 1432         case MII_ANLPAR:
 1433                 reg = GEM_MII_ANLPAR;
 1434                 break;
 1435         case MII_EXTSR:
 1436                 return (EXTSR_1000XFDX|EXTSR_1000XHDX);
 1437         default:
 1438                 return (0);
 1439         }
 1440 
 1441         return bus_space_read_4(t, pcs, reg);
 1442 }
 1443 
 1444 void
 1445 gem_pcs_writereg(struct device *self, int phy, int reg, int val)
 1446 {
 1447         struct gem_softc *sc = (void *)self;
 1448         bus_space_tag_t t = sc->sc_bustag;
 1449         bus_space_handle_t pcs = sc->sc_h1;
 1450         int reset = 0;
 1451 
 1452 #ifdef GEM_DEBUG
 1453         if (sc->sc_debug)
 1454                 printf("gem_pcs_writereg: phy %d reg %d val %x\n",
 1455                         phy, reg, val);
 1456 #endif
 1457 
 1458         if (phy != GEM_PHYAD_EXTERNAL)
 1459                 return;
 1460 
 1461         if (reg == MII_ANAR)
 1462                 bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0);
 1463 
 1464         switch (reg) {
 1465         case MII_BMCR:
 1466                 reset = (val & GEM_MII_CONTROL_RESET);
 1467                 reg = GEM_MII_CONTROL;
 1468                 break;
 1469         case MII_BMSR:
 1470                 reg = GEM_MII_STATUS;
 1471                 break;
 1472         case MII_ANAR:
 1473                 reg = GEM_MII_ANAR;
 1474                 break;
 1475         case MII_ANLPAR:
 1476                 reg = GEM_MII_ANLPAR;
 1477                 break;
 1478         default:
 1479                 return;
 1480         }
 1481 
 1482         bus_space_write_4(t, pcs, reg, val);
 1483 
 1484         if (reset)
 1485                 gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0);
 1486 
 1487         if (reg == GEM_MII_ANAR || reset) {
 1488                 bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,
 1489                     GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
 1490                 bus_space_write_4(t, pcs, GEM_MII_CONFIG,
 1491                     GEM_MII_CONFIG_ENABLE);
 1492         }
 1493 }
 1494 
 1495 int
 1496 gem_mediachange(struct ifnet *ifp)
 1497 {
 1498         struct gem_softc *sc = ifp->if_softc;
 1499         struct mii_data *mii = &sc->sc_mii;
 1500 
 1501         if (mii->mii_instance) {
 1502                 struct mii_softc *miisc;
 1503                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 1504                         mii_phy_reset(miisc);
 1505         }
 1506 
 1507         return (mii_mediachg(&sc->sc_mii));
 1508 }
 1509 
 1510 void
 1511 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1512 {
 1513         struct gem_softc *sc = ifp->if_softc;
 1514 
 1515         mii_pollstat(&sc->sc_mii);
 1516         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 1517         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 1518 }
 1519 
 1520 /*
 1521  * Process an ioctl request.
 1522  */
 1523 int
 1524 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1525 {
 1526         struct gem_softc *sc = ifp->if_softc;
 1527         struct ifreq *ifr = (struct ifreq *)data;
 1528         int s, error = 0;
 1529 
 1530         s = splnet();
 1531 
 1532         switch (cmd) {
 1533         case SIOCSIFADDR:
 1534                 ifp->if_flags |= IFF_UP;
 1535                 if ((ifp->if_flags & IFF_RUNNING) == 0)
 1536                         gem_init(ifp);
 1537                 break;
 1538 
 1539         case SIOCSIFFLAGS:
 1540                 if (ifp->if_flags & IFF_UP) {
 1541                         if (ifp->if_flags & IFF_RUNNING)
 1542                                 error = ENETRESET;
 1543                         else
 1544                                 gem_init(ifp);
 1545                 } else {
 1546                         if (ifp->if_flags & IFF_RUNNING)
 1547                                 gem_stop(ifp, 0);
 1548                 }
 1549 #ifdef GEM_DEBUG
 1550                 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
 1551 #endif
 1552                 break;
 1553 
 1554         case SIOCGIFMEDIA:
 1555         case SIOCSIFMEDIA:
 1556                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
 1557                 break;
 1558 
 1559         case SIOCGIFRXR:
 1560                 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
 1561                     NULL, MCLBYTES, &sc->sc_rx_ring);
 1562                 break;
 1563 
 1564         default:
 1565                 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
 1566         }
 1567 
 1568         if (error == ENETRESET) {
 1569                 if (ifp->if_flags & IFF_RUNNING)
 1570                         gem_iff(sc);
 1571                 error = 0;
 1572         }
 1573 
 1574         splx(s);
 1575         return (error);
 1576 }
 1577 
 1578 void
 1579 gem_iff(struct gem_softc *sc)
 1580 {
 1581         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1582         struct arpcom *ac = &sc->sc_arpcom;
 1583         struct ether_multi *enm;
 1584         struct ether_multistep step;
 1585         bus_space_tag_t t = sc->sc_bustag;
 1586         bus_space_handle_t h = sc->sc_h1;
 1587         u_int32_t crc, hash[16], rxcfg;
 1588         int i;
 1589 
 1590         rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
 1591         rxcfg &= ~(GEM_MAC_RX_HASH_FILTER | GEM_MAC_RX_PROMISCUOUS |
 1592             GEM_MAC_RX_PROMISC_GRP);
 1593         ifp->if_flags &= ~IFF_ALLMULTI;
 1594 
 1595         if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
 1596                 ifp->if_flags |= IFF_ALLMULTI;
 1597                 if (ifp->if_flags & IFF_PROMISC)
 1598                         rxcfg |= GEM_MAC_RX_PROMISCUOUS;
 1599                 else
 1600                         rxcfg |= GEM_MAC_RX_PROMISC_GRP;
 1601         } else {
 1602                 /*
 1603                  * Set up multicast address filter by passing all multicast
 1604                  * addresses through a crc generator, and then using the
 1605                  * high order 8 bits as an index into the 256 bit logical
 1606                  * address filter.  The high order 4 bits selects the word,
 1607                  * while the other 4 bits select the bit within the word
 1608                  * (where bit 0 is the MSB).
 1609                  */
 1610 
 1611                 rxcfg |= GEM_MAC_RX_HASH_FILTER;
 1612 
 1613                 /* Clear hash table */
 1614                 for (i = 0; i < 16; i++)
 1615                         hash[i] = 0;
 1616 
 1617                 ETHER_FIRST_MULTI(step, ac, enm);
 1618                 while (enm != NULL) {
 1619                         crc = ether_crc32_le(enm->enm_addrlo,
 1620                             ETHER_ADDR_LEN);
 1621 
 1622                         /* Just want the 8 most significant bits. */
 1623                         crc >>= 24;
 1624 
 1625                         /* Set the corresponding bit in the filter. */
 1626                         hash[crc >> 4] |= 1 << (15 - (crc & 15));
 1627 
 1628                         ETHER_NEXT_MULTI(step, enm);
 1629                 }
 1630 
 1631                 /* Now load the hash table into the chip (if we are using it) */
 1632                 for (i = 0; i < 16; i++) {
 1633                         bus_space_write_4(t, h,
 1634                             GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
 1635                             hash[i]);
 1636                 }
 1637         }
 1638 
 1639         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg);
 1640 }
 1641 
 1642 /*
 1643  * Transmit interrupt.
 1644  */
 1645 int
 1646 gem_tint(struct gem_softc *sc, u_int32_t status)
 1647 {
 1648         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1649         struct gem_sxd *sd;
 1650         u_int32_t cons, prod;
 1651         int free = 0;
 1652 
 1653         prod = status >> 19;
 1654         cons = sc->sc_tx_cons;
 1655         while (cons != prod) {
 1656                 sd = &sc->sc_txd[cons];
 1657                 if (sd->sd_mbuf != NULL) {
 1658                         bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
 1659                             sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 1660                         bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
 1661                         m_freem(sd->sd_mbuf);
 1662                         sd->sd_mbuf = NULL;
 1663                 }
 1664 
 1665                 free = 1;
 1666 
 1667                 cons++;
 1668                 cons &= GEM_NTXDESC - 1;
 1669         }
 1670 
 1671         if (free == 0)
 1672                 return (0);
 1673 
 1674         sc->sc_tx_cons = cons;
 1675 
 1676         if (sc->sc_tx_prod == cons)
 1677                 ifp->if_timer = 0;
 1678 
 1679         if (ifq_is_oactive(&ifp->if_snd))
 1680                 ifq_restart(&ifp->if_snd);
 1681 
 1682         return (1);
 1683 }
 1684 
 1685 int
 1686 gem_load_mbuf(struct gem_softc *sc, struct gem_sxd *sd, struct mbuf *m)
 1687 {
 1688         int error;
 1689 
 1690         error = bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,
 1691             BUS_DMA_NOWAIT);
 1692         switch (error) {
 1693         case 0:
 1694                 break;
 1695 
 1696         case EFBIG: /* mbuf chain is too fragmented */
 1697                 if (m_defrag(m, M_DONTWAIT) == 0 &&
 1698                     bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,
 1699                     BUS_DMA_NOWAIT) == 0)
 1700                         break;
 1701                 /* FALLTHROUGH */
 1702         default:
 1703                 return (1);
 1704         }
 1705 
 1706         return (0);
 1707 }
 1708 
 1709 void
 1710 gem_start(struct ifqueue *ifq)
 1711 {
 1712         struct ifnet *ifp = ifq->ifq_if;
 1713         struct gem_softc *sc = ifp->if_softc;
 1714         struct gem_sxd *sd;
 1715         struct mbuf *m;
 1716         uint64_t flags, nflags;
 1717         bus_dmamap_t map;
 1718         uint32_t prod;
 1719         uint32_t free, used = 0;
 1720         uint32_t first, last;
 1721         int i;
 1722 
 1723         prod = sc->sc_tx_prod;
 1724 
 1725         /* figure out space */
 1726         free = sc->sc_tx_cons;
 1727         if (free <= prod)
 1728                 free += GEM_NTXDESC;
 1729         free -= prod;
 1730 
 1731         bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
 1732             0, sizeof(struct gem_desc) * GEM_NTXDESC,
 1733             BUS_DMASYNC_PREWRITE);
 1734 
 1735         for (;;) {
 1736                 if (used + GEM_NTXSEGS + 1 > free) {
 1737                         ifq_set_oactive(&ifp->if_snd);
 1738                         break;
 1739                 }
 1740 
 1741                 m = ifq_dequeue(ifq);
 1742                 if (m == NULL)
 1743                         break;
 1744 
 1745                 first = prod;
 1746                 sd = &sc->sc_txd[first];
 1747                 map = sd->sd_map;
 1748 
 1749                 if (gem_load_mbuf(sc, sd, m)) {
 1750                         m_freem(m);
 1751                         ifp->if_oerrors++;
 1752                         continue;
 1753                 }
 1754 
 1755 #if NBPFILTER > 0
 1756                 if (ifp->if_bpf)
 1757                         bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 1758 #endif
 1759 
 1760                 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
 1761                     BUS_DMASYNC_PREWRITE);
 1762 
 1763                 nflags = GEM_TD_START_OF_PACKET;
 1764                 for (i = 0; i < map->dm_nsegs; i++) {
 1765                         flags = nflags |
 1766                             (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE);
 1767 
 1768                         GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_addr,
 1769                             map->dm_segs[i].ds_addr);
 1770                         GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_flags,
 1771                             flags);
 1772 
 1773                         last = prod;
 1774                         prod++;
 1775                         prod &= GEM_NTXDESC - 1;
 1776 
 1777                         nflags = 0;
 1778                 }
 1779                 GEM_DMA_WRITE(sc, &sc->sc_txdescs[last].gd_flags,
 1780                     GEM_TD_END_OF_PACKET | flags);
 1781 
 1782                 used += map->dm_nsegs;
 1783                 sc->sc_txd[last].sd_mbuf = m;
 1784                 sc->sc_txd[first].sd_map = sc->sc_txd[last].sd_map;
 1785                 sc->sc_txd[last].sd_map = map;
 1786         }
 1787 
 1788         bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
 1789             0, sizeof(struct gem_desc) * GEM_NTXDESC,
 1790             BUS_DMASYNC_POSTWRITE);
 1791 
 1792         if (used == 0)
 1793                 return;
 1794 
 1795         /* Commit. */
 1796         sc->sc_tx_prod = prod;
 1797 
 1798         /* Transmit. */
 1799         bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, prod);
 1800 
 1801         /* Set timeout in case hardware has problems transmitting. */
 1802         ifp->if_timer = 5;
 1803 }

Cache object: b25b04b40dee8d55579439b05a2b3859


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.