The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/gem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: gem.c,v 1.135 2022/09/25 18:43:32 thorpej Exp $ */
    2 
    3 /*
    4  *
    5  * Copyright (C) 2001 Eduardo Horvath.
    6  * Copyright (c) 2001-2003 Thomas Moestl
    7  * All rights reserved.
    8  *
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  */
   32 
   33 /*
   34  * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
   35  * See `GEM Gigabit Ethernet ASIC Specification'
   36  *   http://www.sun.com/processors/manuals/ge.pdf
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.135 2022/09/25 18:43:32 thorpej Exp $");
   41 
   42 #include "opt_inet.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/callout.h>
   47 #include <sys/mbuf.h>
   48 #include <sys/syslog.h>
   49 #include <sys/kernel.h>
   50 #include <sys/socket.h>
   51 #include <sys/ioctl.h>
   52 #include <sys/errno.h>
   53 #include <sys/device.h>
   54 
   55 #include <machine/endian.h>
   56 
   57 #include <net/if.h>
   58 #include <net/if_dl.h>
   59 #include <net/if_media.h>
   60 #include <net/if_ether.h>
   61 
   62 #ifdef INET
   63 #include <netinet/in.h>
   64 #include <netinet/in_systm.h>
   65 #include <netinet/in_var.h>
   66 #include <netinet/ip.h>
   67 #include <netinet/tcp.h>
   68 #include <netinet/udp.h>
   69 #endif
   70 
   71 #include <net/bpf.h>
   72 
   73 #include <sys/bus.h>
   74 #include <sys/intr.h>
   75 
   76 #include <dev/mii/mii.h>
   77 #include <dev/mii/miivar.h>
   78 #include <dev/mii/mii_bitbang.h>
   79 
   80 #include <dev/ic/gemreg.h>
   81 #include <dev/ic/gemvar.h>
   82 
   83 #define TRIES   10000
   84 
   85 static void     gem_inten(struct gem_softc *);
   86 static void     gem_start(struct ifnet *);
   87 static void     gem_stop(struct ifnet *, int);
   88 int             gem_ioctl(struct ifnet *, u_long, void *);
   89 void            gem_tick(void *);
   90 void            gem_watchdog(struct ifnet *);
   91 void            gem_rx_watchdog(void *);
   92 void            gem_pcs_start(struct gem_softc *sc);
   93 void            gem_pcs_stop(struct gem_softc *sc, int);
   94 int             gem_init(struct ifnet *);
   95 void            gem_init_regs(struct gem_softc *sc);
   96 static int      gem_ringsize(int sz);
   97 static int      gem_meminit(struct gem_softc *);
   98 void            gem_mifinit(struct gem_softc *);
   99 static int      gem_bitwait(struct gem_softc *sc, bus_space_handle_t, int,
  100                     uint32_t, uint32_t);
  101 void            gem_reset(struct gem_softc *);
  102 int             gem_reset_rx(struct gem_softc *sc);
  103 static void     gem_reset_rxdma(struct gem_softc *sc);
  104 static void     gem_rx_common(struct gem_softc *sc);
  105 int             gem_reset_tx(struct gem_softc *sc);
  106 int             gem_disable_rx(struct gem_softc *sc);
  107 int             gem_disable_tx(struct gem_softc *sc);
  108 static void     gem_rxdrain(struct gem_softc *sc);
  109 int             gem_add_rxbuf(struct gem_softc *sc, int idx);
  110 void            gem_setladrf(struct gem_softc *);
  111 
  112 /* MII methods & callbacks */
  113 static int      gem_mii_readreg(device_t, int, int, uint16_t *);
  114 static int      gem_mii_writereg(device_t, int, int, uint16_t);
  115 static void     gem_mii_statchg(struct ifnet *);
  116 
  117 static int      gem_ifflags_cb(struct ethercom *);
  118 
  119 void            gem_statuschange(struct gem_softc *);
  120 
  121 int             gem_ser_mediachange(struct ifnet *);
  122 void            gem_ser_mediastatus(struct ifnet *, struct ifmediareq *);
  123 
  124 static void     gem_partial_detach(struct gem_softc *, enum gem_attach_stage);
  125 
  126 struct mbuf     *gem_get(struct gem_softc *, int, int);
  127 int             gem_put(struct gem_softc *, int, struct mbuf *);
  128 void            gem_read(struct gem_softc *, int, int);
  129 int             gem_pint(struct gem_softc *);
  130 int             gem_eint(struct gem_softc *, u_int);
  131 int             gem_rint(struct gem_softc *);
  132 int             gem_tint(struct gem_softc *);
  133 void            gem_power(int, void *);
  134 
  135 #ifdef GEM_DEBUG
  136 static void gem_txsoft_print(const struct gem_softc *, int, int);
  137 #define DPRINTF(sc, x)  if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
  138                                 printf x
  139 #else
  140 #define DPRINTF(sc, x)  /* nothing */
  141 #endif
  142 
  143 #define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header))
  144 
  145 int
  146 gem_detach(struct gem_softc *sc, int flags)
  147 {
  148         int i;
  149         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  150         bus_space_tag_t t = sc->sc_bustag;
  151         bus_space_handle_t h = sc->sc_h1;
  152 
  153         /*
  154          * Free any resources we've allocated during the attach.
  155          * Do this in reverse order and fall through.
  156          */
  157         switch (sc->sc_att_stage) {
  158         case GEM_ATT_BACKEND_2:
  159         case GEM_ATT_BACKEND_1:
  160         case GEM_ATT_FINISHED:
  161                 bus_space_write_4(t, h, GEM_INTMASK, ~(uint32_t)0);
  162                 gem_stop(&sc->sc_ethercom.ec_if, 1);
  163 
  164 #ifdef GEM_COUNTERS
  165                 for (i = __arraycount(sc->sc_ev_rxhist); --i >= 0; )
  166                         evcnt_detach(&sc->sc_ev_rxhist[i]);
  167                 evcnt_detach(&sc->sc_ev_rxnobuf);
  168                 evcnt_detach(&sc->sc_ev_rxfull);
  169                 evcnt_detach(&sc->sc_ev_rxint);
  170                 evcnt_detach(&sc->sc_ev_txint);
  171                 evcnt_detach(&sc->sc_ev_rxoverflow);
  172 #endif
  173                 evcnt_detach(&sc->sc_ev_intr);
  174 
  175                 rnd_detach_source(&sc->rnd_source);
  176                 ether_ifdetach(ifp);
  177                 if_detach(ifp);
  178 
  179                 callout_destroy(&sc->sc_tick_ch);
  180                 callout_destroy(&sc->sc_rx_watchdog);
  181 
  182                 /*FALLTHROUGH*/
  183         case GEM_ATT_MII:
  184                 sc->sc_att_stage = GEM_ATT_MII;
  185                 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
  186                 ifmedia_fini(&sc->sc_mii.mii_media);
  187 
  188                 /*FALLTHROUGH*/
  189         case GEM_ATT_7:
  190                 for (i = 0; i < GEM_NRXDESC; i++) {
  191                         if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  192                                 bus_dmamap_destroy(sc->sc_dmatag,
  193                                     sc->sc_rxsoft[i].rxs_dmamap);
  194                 }
  195                 /*FALLTHROUGH*/
  196         case GEM_ATT_6:
  197                 for (i = 0; i < GEM_TXQUEUELEN; i++) {
  198                         if (sc->sc_txsoft[i].txs_dmamap != NULL)
  199                                 bus_dmamap_destroy(sc->sc_dmatag,
  200                                     sc->sc_txsoft[i].txs_dmamap);
  201                 }
  202                 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
  203                 /*FALLTHROUGH*/
  204         case GEM_ATT_5:
  205                 bus_dmamap_unload(sc->sc_dmatag, sc->sc_nulldmamap);
  206                 /*FALLTHROUGH*/
  207         case GEM_ATT_4:
  208                 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap);
  209                 /*FALLTHROUGH*/
  210         case GEM_ATT_3:
  211                 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
  212                 /*FALLTHROUGH*/
  213         case GEM_ATT_2:
  214                 bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data,
  215                     sizeof(struct gem_control_data));
  216                 /*FALLTHROUGH*/
  217         case GEM_ATT_1:
  218                 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
  219                 /*FALLTHROUGH*/
  220         case GEM_ATT_0:
  221                 sc->sc_att_stage = GEM_ATT_0;
  222                 /*FALLTHROUGH*/
  223         case GEM_ATT_BACKEND_0:
  224                 break;
  225         }
  226         return 0;
  227 }
  228 
  229 static void
  230 gem_partial_detach(struct gem_softc *sc, enum gem_attach_stage stage)
  231 {
  232         cfattach_t ca = device_cfattach(sc->sc_dev);
  233 
  234         sc->sc_att_stage = stage;
  235         (*ca->ca_detach)(sc->sc_dev, 0);
  236 }
  237 
  238 /*
  239  * gem_attach:
  240  *
  241  *      Attach a Gem interface to the system.
  242  */
  243 void
  244 gem_attach(struct gem_softc *sc, const uint8_t *enaddr)
  245 {
  246         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  247         struct mii_data *mii = &sc->sc_mii;
  248         bus_space_tag_t t = sc->sc_bustag;
  249         bus_space_handle_t h = sc->sc_h1;
  250         struct ifmedia_entry *ife;
  251         int i, error, phyaddr;
  252         uint32_t v;
  253         char *nullbuf;
  254 
  255         /* Make sure the chip is stopped. */
  256         ifp->if_softc = sc;
  257         gem_reset(sc);
  258 
  259         /*
  260          * Allocate the control data structures, and create and load the
  261          * DMA map for it. gem_control_data is 9216 bytes, we have space for
  262          * the padding buffer in the bus_dmamem_alloc()'d memory.
  263          */
  264         if ((error = bus_dmamem_alloc(sc->sc_dmatag,
  265             sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE,
  266             0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) {
  267                 aprint_error_dev(sc->sc_dev,
  268                    "unable to allocate control data, error = %d\n",
  269                     error);
  270                 gem_partial_detach(sc, GEM_ATT_0);
  271                 return;
  272         }
  273 
  274         /* XXX should map this in with correct endianness */
  275         if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
  276             sizeof(struct gem_control_data), (void **)&sc->sc_control_data,
  277             BUS_DMA_COHERENT)) != 0) {
  278                 aprint_error_dev(sc->sc_dev,
  279                     "unable to map control data, error = %d\n", error);
  280                 gem_partial_detach(sc, GEM_ATT_1);
  281                 return;
  282         }
  283 
  284         nullbuf =
  285             (char *)sc->sc_control_data + sizeof(struct gem_control_data);
  286 
  287         if ((error = bus_dmamap_create(sc->sc_dmatag,
  288             sizeof(struct gem_control_data), 1,
  289             sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
  290                 aprint_error_dev(sc->sc_dev,
  291                     "unable to create control data DMA map, error = %d\n",
  292                     error);
  293                 gem_partial_detach(sc, GEM_ATT_2);
  294                 return;
  295         }
  296 
  297         if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
  298             sc->sc_control_data, sizeof(struct gem_control_data), NULL,
  299             0)) != 0) {
  300                 aprint_error_dev(sc->sc_dev,
  301                     "unable to load control data DMA map, error = %d\n",
  302                     error);
  303                 gem_partial_detach(sc, GEM_ATT_3);
  304                 return;
  305         }
  306 
  307         memset(nullbuf, 0, ETHER_MIN_TX);
  308         if ((error = bus_dmamap_create(sc->sc_dmatag,
  309             ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) {
  310                 aprint_error_dev(sc->sc_dev,
  311                     "unable to create padding DMA map, error = %d\n", error);
  312                 gem_partial_detach(sc, GEM_ATT_4);
  313                 return;
  314         }
  315 
  316         if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap,
  317             nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) {
  318                 aprint_error_dev(sc->sc_dev,
  319                     "unable to load padding DMA map, error = %d\n", error);
  320                 gem_partial_detach(sc, GEM_ATT_5);
  321                 return;
  322         }
  323 
  324         bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX,
  325             BUS_DMASYNC_PREWRITE);
  326 
  327         /*
  328          * Initialize the transmit job descriptors.
  329          */
  330         SIMPLEQ_INIT(&sc->sc_txfreeq);
  331         SIMPLEQ_INIT(&sc->sc_txdirtyq);
  332 
  333         /*
  334          * Create the transmit buffer DMA maps.
  335          */
  336         for (i = 0; i < GEM_TXQUEUELEN; i++) {
  337                 struct gem_txsoft *txs;
  338 
  339                 txs = &sc->sc_txsoft[i];
  340                 txs->txs_mbuf = NULL;
  341                 if ((error = bus_dmamap_create(sc->sc_dmatag,
  342                     ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS,
  343                     ETHER_MAX_LEN_JUMBO, 0, 0,
  344                     &txs->txs_dmamap)) != 0) {
  345                         aprint_error_dev(sc->sc_dev,
  346                             "unable to create tx DMA map %d, error = %d\n",
  347                             i, error);
  348                         gem_partial_detach(sc, GEM_ATT_6);
  349                         return;
  350                 }
  351                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  352         }
  353 
  354         /*
  355          * Create the receive buffer DMA maps.
  356          */
  357         for (i = 0; i < GEM_NRXDESC; i++) {
  358                 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
  359                     MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  360                         aprint_error_dev(sc->sc_dev,
  361                             "unable to create rx DMA map %d, error = %d\n",
  362                             i, error);
  363                         gem_partial_detach(sc, GEM_ATT_7);
  364                         return;
  365                 }
  366                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  367         }
  368 
  369         /* Initialize ifmedia structures and MII info */
  370         mii->mii_ifp = ifp;
  371         mii->mii_readreg = gem_mii_readreg;
  372         mii->mii_writereg = gem_mii_writereg;
  373         mii->mii_statchg = gem_mii_statchg;
  374 
  375         sc->sc_ethercom.ec_mii = mii;
  376 
  377         /*
  378          * Initialization based  on `GEM Gigabit Ethernet ASIC Specification'
  379          * Section 3.2.1 `Initialization Sequence'.
  380          * However, we can't assume SERDES or Serialink if neither
  381          * GEM_MIF_CONFIG_MDI0 nor GEM_MIF_CONFIG_MDI1 are set
  382          * being set, as both are set on Sun X1141A (with SERDES).  So,
  383          * we rely on our bus attachment setting GEM_SERDES or GEM_SERIAL.
  384          * Also, for variants that report 2 PHY's, we prefer the external
  385          * PHY over the internal PHY, so we look for that first.
  386          */
  387         gem_mifinit(sc);
  388 
  389         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) {
  390                 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
  391                     ether_mediastatus);
  392                 /* Look for external PHY */
  393                 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
  394                         sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
  395                         bus_space_write_4(t, h, GEM_MIF_CONFIG,
  396                             sc->sc_mif_config);
  397                         switch (sc->sc_variant) {
  398                         case GEM_SUN_ERI:
  399                                 phyaddr = GEM_PHYAD_EXTERNAL;
  400                                 break;
  401                         default:
  402                                 phyaddr = MII_PHY_ANY;
  403                                 break;
  404                         }
  405                         mii_attach(sc->sc_dev, mii, 0xffffffff, phyaddr,
  406                             MII_OFFSET_ANY, MIIF_FORCEANEG);
  407                 }
  408 #ifdef GEM_DEBUG
  409                   else
  410                         aprint_debug_dev(sc->sc_dev, "using external PHY\n");
  411 #endif
  412                 /* Look for internal PHY if no external PHY was found */
  413                 if (LIST_EMPTY(&mii->mii_phys) &&
  414                     ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI0) ||
  415                      (sc->sc_variant == GEM_APPLE_K2_GMAC))) {
  416                         sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
  417                         bus_space_write_4(t, h, GEM_MIF_CONFIG,
  418                             sc->sc_mif_config);
  419                         switch (sc->sc_variant) {
  420                         case GEM_SUN_ERI:
  421                         case GEM_APPLE_K2_GMAC:
  422                                 phyaddr = GEM_PHYAD_INTERNAL;
  423                                 break;
  424                         case GEM_APPLE_GMAC:
  425                                 phyaddr = GEM_PHYAD_EXTERNAL;
  426                                 break;
  427                         default:
  428                                 phyaddr = MII_PHY_ANY;
  429                                 break;
  430                         }
  431                         mii_attach(sc->sc_dev, mii, 0xffffffff, phyaddr,
  432                             MII_OFFSET_ANY, MIIF_FORCEANEG);
  433 #ifdef GEM_DEBUG
  434                         if (!LIST_EMPTY(&mii->mii_phys))
  435                                 aprint_debug_dev(sc->sc_dev,
  436                                     "using internal PHY\n");
  437 #endif
  438                 }
  439                 if (LIST_EMPTY(&mii->mii_phys)) {
  440                                 /* No PHY attached */
  441                                 aprint_error_dev(sc->sc_dev,
  442                                     "PHY probe failed\n");
  443                                 gem_partial_detach(sc, GEM_ATT_MII);
  444                                 return;
  445                 } else {
  446                         struct mii_softc *child;
  447 
  448                         /*
  449                          * Walk along the list of attached MII devices and
  450                          * establish an `MII instance' to `PHY number'
  451                          * mapping.
  452                          */
  453                         LIST_FOREACH(child, &mii->mii_phys, mii_list) {
  454                                 /*
  455                                  * Note: we support just one PHY: the internal
  456                                  * or external MII is already selected for us
  457                                  * by the GEM_MIF_CONFIG  register.
  458                                  */
  459                                 if (child->mii_phy > 1 || child->mii_inst > 0) {
  460                                         aprint_error_dev(sc->sc_dev,
  461                                             "cannot accommodate MII device"
  462                                             " %s at PHY %d, instance %d\n",
  463                                                device_xname(child->mii_dev),
  464                                                child->mii_phy, child->mii_inst);
  465                                         continue;
  466                                 }
  467                                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  468                         }
  469 
  470                         if (sc->sc_variant != GEM_SUN_ERI)
  471                                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  472                                     GEM_MII_DATAPATH_MII);
  473 
  474                         /*
  475                          * XXX - we can really do the following ONLY if the
  476                          * PHY indeed has the auto negotiation capability!!
  477                          */
  478                         ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
  479                 }
  480         } else {
  481                 ifmedia_init(&mii->mii_media, IFM_IMASK, gem_ser_mediachange,
  482                     gem_ser_mediastatus);
  483                 /* SERDES or Serialink */
  484                 if (sc->sc_flags & GEM_SERDES) {
  485                         bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  486                             GEM_MII_DATAPATH_SERDES);
  487                 } else {
  488                         sc->sc_flags |= GEM_SERIAL;
  489                         bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
  490                             GEM_MII_DATAPATH_SERIAL);
  491                 }
  492 
  493                 aprint_normal_dev(sc->sc_dev, "using external PCS %s: ",
  494                     sc->sc_flags & GEM_SERDES ? "SERDES" : "Serialink");
  495 
  496                 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
  497                 /* Check for FDX and HDX capabilities */
  498                 sc->sc_mii_anar = bus_space_read_4(t, h, GEM_MII_ANAR);
  499                 if (sc->sc_mii_anar & GEM_MII_ANEG_FUL_DUPLX) {
  500                         ifmedia_add(&mii->mii_media, IFM_ETHER |
  501                             IFM_1000_SX | IFM_MANUAL | IFM_FDX, 0, NULL);
  502                         aprint_normal("1000baseSX-FDX, ");
  503                 }
  504                 if (sc->sc_mii_anar & GEM_MII_ANEG_HLF_DUPLX) {
  505                         ifmedia_add(&mii->mii_media, IFM_ETHER |
  506                             IFM_1000_SX | IFM_MANUAL | IFM_HDX, 0, NULL);
  507                         aprint_normal("1000baseSX-HDX, ");
  508                 }
  509                 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
  510                 sc->sc_mii_media = IFM_AUTO;
  511                 aprint_normal("auto\n");
  512 
  513                 gem_pcs_stop(sc, 1);
  514         }
  515 
  516         /*
  517          * From this point forward, the attachment cannot fail.  A failure
  518          * before this point releases all resources that may have been
  519          * allocated.
  520          */
  521 
  522         /* Announce ourselves. */
  523         aprint_normal_dev(sc->sc_dev, "Ethernet address %s",
  524             ether_sprintf(enaddr));
  525 
  526         /* Get RX FIFO size */
  527         sc->sc_rxfifosize = 64 *
  528             bus_space_read_4(t, h, GEM_RX_FIFO_SIZE);
  529         aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024);
  530 
  531         /* Get TX FIFO size */
  532         v = bus_space_read_4(t, h, GEM_TX_FIFO_SIZE);
  533         aprint_normal(", %uKB TX fifo\n", v / 16);
  534 
  535         /* Initialize ifnet structure. */
  536         strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
  537         ifp->if_softc = sc;
  538         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  539         sc->sc_if_flags = ifp->if_flags;
  540 #if 0
  541         /*
  542          * The GEM hardware supports basic TCP checksum offloading only.
  543          * Several (all?) revisions (Sun rev. 01 and Apple rev. 00 and 80)
  544          * have bugs in the receive checksum, so don't enable it for now.
  545          */
  546         if ((GEM_IS_SUN(sc) && sc->sc_chiprev != 1) ||
  547             (GEM_IS_APPLE(sc) &&
  548             (sc->sc_chiprev != 0 && sc->sc_chiprev != 0x80)))
  549                 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
  550 #endif
  551         ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
  552         ifp->if_start = gem_start;
  553         ifp->if_ioctl = gem_ioctl;
  554         ifp->if_watchdog = gem_watchdog;
  555         ifp->if_stop = gem_stop;
  556         ifp->if_init = gem_init;
  557         IFQ_SET_READY(&ifp->if_snd);
  558 
  559         /*
  560          * If we support GigE media, we support jumbo frames too.
  561          * Unless we are Apple.
  562          */
  563         TAILQ_FOREACH(ife, &mii->mii_media.ifm_list, ifm_list) {
  564                 if (IFM_SUBTYPE(ife->ifm_media) == IFM_1000_T ||
  565                     IFM_SUBTYPE(ife->ifm_media) == IFM_1000_SX ||
  566                     IFM_SUBTYPE(ife->ifm_media) == IFM_1000_LX ||
  567                     IFM_SUBTYPE(ife->ifm_media) == IFM_1000_CX) {
  568                         if (!GEM_IS_APPLE(sc))
  569                                 sc->sc_ethercom.ec_capabilities
  570                                     |= ETHERCAP_JUMBO_MTU;
  571                         sc->sc_flags |= GEM_GIGABIT;
  572                         break;
  573                 }
  574         }
  575 
  576         /* claim 802.1q capability */
  577         sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
  578 
  579         /* Attach the interface. */
  580         if_attach(ifp);
  581         if_deferred_start_init(ifp, NULL);
  582         ether_ifattach(ifp, enaddr);
  583         ether_set_ifflags_cb(&sc->sc_ethercom, gem_ifflags_cb);
  584 
  585         rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
  586                           RND_TYPE_NET, RND_FLAG_DEFAULT);
  587 
  588         evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
  589             NULL, device_xname(sc->sc_dev), "interrupts");
  590 #ifdef GEM_COUNTERS
  591         evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR,
  592             &sc->sc_ev_intr, device_xname(sc->sc_dev), "tx interrupts");
  593         evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR,
  594             &sc->sc_ev_intr, device_xname(sc->sc_dev), "rx interrupts");
  595         evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR,
  596             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx ring full");
  597         evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR,
  598             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx malloc failure");
  599         evcnt_attach_dynamic(&sc->sc_ev_rxoverflow, EVCNT_TYPE_INTR,
  600             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx overflow");
  601         evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR,
  602             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 0desc");
  603         evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR,
  604             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 1desc");
  605         evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR,
  606             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 2desc");
  607         evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR,
  608             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 3desc");
  609         evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR,
  610             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >3desc");
  611         evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR,
  612             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >7desc");
  613         evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR,
  614             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >15desc");
  615         evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR,
  616             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >31desc");
  617         evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR,
  618             &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >63desc");
  619 #endif
  620 
  621         callout_init(&sc->sc_tick_ch, 0);
  622         callout_setfunc(&sc->sc_tick_ch, gem_tick, sc);
  623 
  624         callout_init(&sc->sc_rx_watchdog, 0);
  625         callout_setfunc(&sc->sc_rx_watchdog, gem_rx_watchdog, sc);
  626 
  627         sc->sc_att_stage = GEM_ATT_FINISHED;
  628 
  629         return;
  630 }
  631 
  632 void
  633 gem_tick(void *arg)
  634 {
  635         struct gem_softc *sc = arg;
  636         int s;
  637 
  638         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) {
  639                 /*
  640                  * We have to reset everything if we failed to get a
  641                  * PCS interrupt.  Restarting the callout is handled
  642                  * in gem_pcs_start().
  643                  */
  644                 gem_init(&sc->sc_ethercom.ec_if);
  645         } else {
  646                 s = splnet();
  647                 mii_tick(&sc->sc_mii);
  648                 splx(s);
  649                 callout_schedule(&sc->sc_tick_ch, hz);
  650         }
  651 }
  652 
  653 static int
  654 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, uint32_t clr,
  655     uint32_t set)
  656 {
  657         int i;
  658         uint32_t reg;
  659 
  660         for (i = TRIES; i--; DELAY(100)) {
  661                 reg = bus_space_read_4(sc->sc_bustag, h, r);
  662                 if ((reg & clr) == 0 && (reg & set) == set)
  663                         return (1);
  664         }
  665         return (0);
  666 }
  667 
  668 void
  669 gem_reset(struct gem_softc *sc)
  670 {
  671         bus_space_tag_t t = sc->sc_bustag;
  672         bus_space_handle_t h = sc->sc_h2;
  673         int s;
  674 
  675         s = splnet();
  676         DPRINTF(sc, ("%s: gem_reset\n", device_xname(sc->sc_dev)));
  677         gem_reset_rx(sc);
  678         gem_reset_tx(sc);
  679 
  680         /* Do a full reset */
  681         bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
  682         if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
  683                 aprint_error_dev(sc->sc_dev, "cannot reset device\n");
  684         splx(s);
  685 }
  686 
  687 
  688 /*
  689  * gem_rxdrain:
  690  *
  691  *      Drain the receive queue.
  692  */
  693 static void
  694 gem_rxdrain(struct gem_softc *sc)
  695 {
  696         struct gem_rxsoft *rxs;
  697         int i;
  698 
  699         for (i = 0; i < GEM_NRXDESC; i++) {
  700                 rxs = &sc->sc_rxsoft[i];
  701                 if (rxs->rxs_mbuf != NULL) {
  702                         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
  703                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
  704                         bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
  705                         m_freem(rxs->rxs_mbuf);
  706                         rxs->rxs_mbuf = NULL;
  707                 }
  708         }
  709 }
  710 
  711 /*
  712  * Reset the whole thing.
  713  */
  714 static void
  715 gem_stop(struct ifnet *ifp, int disable)
  716 {
  717         struct gem_softc *sc = ifp->if_softc;
  718         struct gem_txsoft *txs;
  719 
  720         DPRINTF(sc, ("%s: gem_stop\n", device_xname(sc->sc_dev)));
  721 
  722         callout_halt(&sc->sc_tick_ch, NULL);
  723         callout_halt(&sc->sc_rx_watchdog, NULL);
  724         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
  725                 gem_pcs_stop(sc, disable);
  726         else
  727                 mii_down(&sc->sc_mii);
  728 
  729         /* XXX - Should we reset these instead? */
  730         gem_disable_tx(sc);
  731         gem_disable_rx(sc);
  732 
  733         /*
  734          * Release any queued transmit buffers.
  735          */
  736         while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
  737                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
  738                 if (txs->txs_mbuf != NULL) {
  739                         bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 0,
  740                             txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
  741                         bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
  742                         m_freem(txs->txs_mbuf);
  743                         txs->txs_mbuf = NULL;
  744                 }
  745                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  746         }
  747 
  748         /*
  749          * Mark the interface down and cancel the watchdog timer.
  750          */
  751         ifp->if_flags &= ~IFF_RUNNING;
  752         sc->sc_if_flags = ifp->if_flags;
  753         ifp->if_timer = 0;
  754 
  755         if (disable)
  756                 gem_rxdrain(sc);
  757 }
  758 
  759 
  760 /*
  761  * Reset the receiver
  762  */
  763 int
  764 gem_reset_rx(struct gem_softc *sc)
  765 {
  766         bus_space_tag_t t = sc->sc_bustag;
  767         bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
  768 
  769         /*
  770          * Resetting while DMA is in progress can cause a bus hang, so we
  771          * disable DMA first.
  772          */
  773         gem_disable_rx(sc);
  774         bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
  775         bus_space_barrier(t, h, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  776         /* Wait till it finishes */
  777         if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
  778                 aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n");
  779         /* Wait 5ms extra. */
  780         delay(5000);
  781 
  782         /* Finally, reset the ERX */
  783         bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
  784         bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
  785         /* Wait till it finishes */
  786         if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
  787                 aprint_error_dev(sc->sc_dev, "cannot reset receiver\n");
  788                 return (1);
  789         }
  790         return (0);
  791 }
  792 
  793 
  794 /*
  795  * Reset the receiver DMA engine.
  796  *
  797  * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
  798  * etc in order to reset the receiver DMA engine only and not do a full
  799  * reset which amongst others also downs the link and clears the FIFOs.
  800  */
  801 static void
  802 gem_reset_rxdma(struct gem_softc *sc)
  803 {
  804         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  805         bus_space_tag_t t = sc->sc_bustag;
  806         bus_space_handle_t h = sc->sc_h1;
  807         int i;
  808 
  809         if (gem_reset_rx(sc) != 0) {
  810                 gem_init(ifp);
  811                 return;
  812         }
  813         for (i = 0; i < GEM_NRXDESC; i++)
  814                 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
  815                         GEM_UPDATE_RXDESC(sc, i);
  816         sc->sc_rxptr = 0;
  817         GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
  818         GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
  819 
  820         /* Reprogram Descriptor Ring Base Addresses */
  821         bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
  822             ((uint64_t)GEM_CDRXADDR(sc, 0)) >> 32);
  823         bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
  824 
  825         /* Redo ERX Configuration */
  826         gem_rx_common(sc);
  827 
  828         /* Give the receiver a swift kick */
  829         bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC - 4);
  830 }
  831 
  832 /*
  833  * Common RX configuration for gem_init() and gem_reset_rxdma().
  834  */
  835 static void
  836 gem_rx_common(struct gem_softc *sc)
  837 {
  838         bus_space_tag_t t = sc->sc_bustag;
  839         bus_space_handle_t h = sc->sc_h1;
  840         uint32_t v;
  841 
  842         /* Encode Receive Descriptor ring size: four possible values */
  843         v = gem_ringsize(GEM_NRXDESC /*XXX*/);
  844 
  845         /* Set receive h/w checksum offset */
  846 #ifdef INET
  847         v |= (ETHER_HDR_LEN + sizeof(struct ip) +
  848             ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
  849             ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT;
  850 #endif
  851 
  852         /* Enable RX DMA */
  853         bus_space_write_4(t, h, GEM_RX_CONFIG,
  854             v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
  855             (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN);
  856 
  857         /*
  858          * The following value is for an OFF Threshold of about 3/4 full
  859          * and an ON Threshold of 1/4 full.
  860          */
  861         bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
  862             (3 * sc->sc_rxfifosize / 256) |
  863             ((sc->sc_rxfifosize / 256) << 12));
  864         bus_space_write_4(t, h, GEM_RX_BLANKING,
  865             (6 << GEM_RX_BLANKING_TIME_SHIFT) | 8);
  866 }
  867 
  868 /*
  869  * Reset the transmitter
  870  */
  871 int
  872 gem_reset_tx(struct gem_softc *sc)
  873 {
  874         bus_space_tag_t t = sc->sc_bustag;
  875         bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
  876 
  877         /*
  878          * Resetting while DMA is in progress can cause a bus hang, so we
  879          * disable DMA first.
  880          */
  881         gem_disable_tx(sc);
  882         bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
  883         bus_space_barrier(t, h, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  884         /* Wait till it finishes */
  885         if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
  886                 aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n");
  887         /* Wait 5ms extra. */
  888         delay(5000);
  889 
  890         /* Finally, reset the ETX */
  891         bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
  892         bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
  893         /* Wait till it finishes */
  894         if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
  895                 aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n");
  896                 return (1);
  897         }
  898         return (0);
  899 }
  900 
  901 /*
  902  * disable receiver.
  903  */
  904 int
  905 gem_disable_rx(struct gem_softc *sc)
  906 {
  907         bus_space_tag_t t = sc->sc_bustag;
  908         bus_space_handle_t h = sc->sc_h1;
  909         uint32_t cfg;
  910 
  911         /* Flip the enable bit */
  912         cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
  913         cfg &= ~GEM_MAC_RX_ENABLE;
  914         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
  915         bus_space_barrier(t, h, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  916         /* Wait for it to finish */
  917         return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
  918 }
  919 
  920 /*
  921  * disable transmitter.
  922  */
  923 int
  924 gem_disable_tx(struct gem_softc *sc)
  925 {
  926         bus_space_tag_t t = sc->sc_bustag;
  927         bus_space_handle_t h = sc->sc_h1;
  928         uint32_t cfg;
  929 
  930         /* Flip the enable bit */
  931         cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
  932         cfg &= ~GEM_MAC_TX_ENABLE;
  933         bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
  934         bus_space_barrier(t, h, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
  935         /* Wait for it to finish */
  936         return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
  937 }
  938 
  939 /*
  940  * Initialize interface.
  941  */
  942 int
  943 gem_meminit(struct gem_softc *sc)
  944 {
  945         struct gem_rxsoft *rxs;
  946         int i, error;
  947 
  948         /*
  949          * Initialize the transmit descriptor ring.
  950          */
  951         memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
  952         for (i = 0; i < GEM_NTXDESC; i++) {
  953                 sc->sc_txdescs[i].gd_flags = 0;
  954                 sc->sc_txdescs[i].gd_addr = 0;
  955         }
  956         GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
  957             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  958         sc->sc_txfree = GEM_NTXDESC-1;
  959         sc->sc_txnext = 0;
  960         sc->sc_txwin = 0;
  961 
  962         /*
  963          * Initialize the receive descriptor and receive job
  964          * descriptor rings.
  965          */
  966         for (i = 0; i < GEM_NRXDESC; i++) {
  967                 rxs = &sc->sc_rxsoft[i];
  968                 if (rxs->rxs_mbuf == NULL) {
  969                         if ((error = gem_add_rxbuf(sc, i)) != 0) {
  970                                 aprint_error_dev(sc->sc_dev,
  971                                     "unable to allocate or map rx "
  972                                     "buffer %d, error = %d\n",
  973                                     i, error);
  974                                 /*
  975                                  * XXX Should attempt to run with fewer receive
  976                                  * XXX buffers instead of just failing.
  977                                  */
  978                                 gem_rxdrain(sc);
  979                                 return (1);
  980                         }
  981                 } else
  982                         GEM_INIT_RXDESC(sc, i);
  983         }
  984         sc->sc_rxptr = 0;
  985         sc->sc_meminited = 1;
  986         GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
  987         GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
  988 
  989         return (0);
  990 }
  991 
  992 static int
  993 gem_ringsize(int sz)
  994 {
  995         switch (sz) {
  996         case 32:
  997                 return GEM_RING_SZ_32;
  998         case 64:
  999                 return GEM_RING_SZ_64;
 1000         case 128:
 1001                 return GEM_RING_SZ_128;
 1002         case 256:
 1003                 return GEM_RING_SZ_256;
 1004         case 512:
 1005                 return GEM_RING_SZ_512;
 1006         case 1024:
 1007                 return GEM_RING_SZ_1024;
 1008         case 2048:
 1009                 return GEM_RING_SZ_2048;
 1010         case 4096:
 1011                 return GEM_RING_SZ_4096;
 1012         case 8192:
 1013                 return GEM_RING_SZ_8192;
 1014         default:
 1015                 printf("gem: invalid Receive Descriptor ring size %d\n", sz);
 1016                 return GEM_RING_SZ_32;
 1017         }
 1018 }
 1019 
 1020 
 1021 /*
 1022  * Start PCS
 1023  */
 1024 void
 1025 gem_pcs_start(struct gem_softc *sc)
 1026 {
 1027         bus_space_tag_t t = sc->sc_bustag;
 1028         bus_space_handle_t h = sc->sc_h1;
 1029         uint32_t v;
 1030 
 1031 #ifdef GEM_DEBUG
 1032         aprint_debug_dev(sc->sc_dev, "gem_pcs_start()\n");
 1033 #endif
 1034 
 1035         /*
 1036          * Set up.  We must disable the MII before modifying the
 1037          * GEM_MII_ANAR register
 1038          */
 1039         if (sc->sc_flags & GEM_SERDES) {
 1040                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
 1041                     GEM_MII_DATAPATH_SERDES);
 1042                 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
 1043                     GEM_MII_SLINK_LOOPBACK);
 1044         } else {
 1045                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
 1046                     GEM_MII_DATAPATH_SERIAL);
 1047                 bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 0);
 1048         }
 1049         bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
 1050         v = bus_space_read_4(t, h, GEM_MII_ANAR);
 1051         v |= (GEM_MII_ANEG_SYM_PAUSE | GEM_MII_ANEG_ASYM_PAUSE);
 1052         if (IFM_SUBTYPE(sc->sc_mii_media) == IFM_AUTO)
 1053                 v |= (GEM_MII_ANEG_FUL_DUPLX | GEM_MII_ANEG_HLF_DUPLX);
 1054         else if ((IFM_OPTIONS(sc->sc_mii_media) & IFM_FDX) != 0) {
 1055                 v |= GEM_MII_ANEG_FUL_DUPLX;
 1056                 v &= ~GEM_MII_ANEG_HLF_DUPLX;
 1057         } else if ((IFM_OPTIONS(sc->sc_mii_media) & IFM_HDX) != 0) {
 1058                 v &= ~GEM_MII_ANEG_FUL_DUPLX;
 1059                 v |= GEM_MII_ANEG_HLF_DUPLX;
 1060         }
 1061 
 1062         /* Configure link. */
 1063         bus_space_write_4(t, h, GEM_MII_ANAR, v);
 1064         bus_space_write_4(t, h, GEM_MII_CONTROL,
 1065             GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
 1066         bus_space_write_4(t, h, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
 1067         gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_ANEG_CPT);
 1068 
 1069         /* Start the 10 second timer */
 1070         callout_schedule(&sc->sc_tick_ch, hz * 10);
 1071 }
 1072 
 1073 /*
 1074  * Stop PCS
 1075  */
 1076 void
 1077 gem_pcs_stop(struct gem_softc *sc, int disable)
 1078 {
 1079         bus_space_tag_t t = sc->sc_bustag;
 1080         bus_space_handle_t h = sc->sc_h1;
 1081 
 1082 #ifdef GEM_DEBUG
 1083         aprint_debug_dev(sc->sc_dev, "gem_pcs_stop()\n");
 1084 #endif
 1085 
 1086         /* Tell link partner that we're going away */
 1087         bus_space_write_4(t, h, GEM_MII_ANAR, GEM_MII_ANEG_RF);
 1088 
 1089         /*
 1090          * Disable PCS MII.  The documentation suggests that setting
 1091          * GEM_MII_CONFIG_ENABLE to zero and then restarting auto-
 1092          * negotiation will shut down the link.  However, it appears
 1093          * that we also need to unset the datapath mode.
 1094          */
 1095         bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
 1096         bus_space_write_4(t, h, GEM_MII_CONTROL,
 1097             GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
 1098         bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII);
 1099         bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
 1100 
 1101         if (disable) {
 1102                 if (sc->sc_flags & GEM_SERDES)
 1103                         bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
 1104                                 GEM_MII_SLINK_POWER_OFF);
 1105                 else
 1106                         bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
 1107                             GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_POWER_OFF);
 1108         }
 1109 
 1110         sc->sc_flags &= ~GEM_LINK;
 1111         sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
 1112         sc->sc_mii.mii_media_status = IFM_AVALID;
 1113 }
 1114 
 1115 
 1116 /*
 1117  * Initialization of interface; set up initialization block
 1118  * and transmit/receive descriptor rings.
 1119  */
 1120 int
 1121 gem_init(struct ifnet *ifp)
 1122 {
 1123         struct gem_softc *sc = ifp->if_softc;
 1124         bus_space_tag_t t = sc->sc_bustag;
 1125         bus_space_handle_t h = sc->sc_h1;
 1126         int rc = 0, s;
 1127         u_int max_frame_size;
 1128         uint32_t v;
 1129 
 1130         s = splnet();
 1131 
 1132         DPRINTF(sc, ("%s: gem_init: calling stop\n", device_xname(sc->sc_dev)));
 1133         /*
 1134          * Initialization sequence. The numbered steps below correspond
 1135          * to the sequence outlined in section 6.3.5.1 in the Ethernet
 1136          * Channel Engine manual (part of the PCIO manual).
 1137          * See also the STP2002-STQ document from Sun Microsystems.
 1138          */
 1139 
 1140         /* step 1 & 2. Reset the Ethernet Channel */
 1141         gem_stop(ifp, 0);
 1142         gem_reset(sc);
 1143         DPRINTF(sc, ("%s: gem_init: restarting\n", device_xname(sc->sc_dev)));
 1144 
 1145         /* Re-initialize the MIF */
 1146         gem_mifinit(sc);
 1147 
 1148         /* Set up correct datapath for non-SERDES/Serialink */
 1149         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 &&
 1150             sc->sc_variant != GEM_SUN_ERI)
 1151                 bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
 1152                     GEM_MII_DATAPATH_MII);
 1153 
 1154         /* Call MI reset function if any */
 1155         if (sc->sc_hwreset)
 1156                 (*sc->sc_hwreset)(sc);
 1157 
 1158         /* step 3. Setup data structures in host memory */
 1159         if (gem_meminit(sc) != 0) {
 1160                 splx(s);
 1161                 return 1;
 1162         }
 1163 
 1164         /* step 4. TX MAC registers & counters */
 1165         gem_init_regs(sc);
 1166         max_frame_size = uimax(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU);
 1167         max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN;
 1168         if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
 1169                 max_frame_size += ETHER_VLAN_ENCAP_LEN;
 1170         bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
 1171             max_frame_size|/* burst size */(0x2000<<16));
 1172 
 1173         /* step 5. RX MAC registers & counters */
 1174         gem_setladrf(sc);
 1175 
 1176         /* step 6 & 7. Program Descriptor Ring Base Addresses */
 1177         bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,
 1178             ((uint64_t)GEM_CDTXADDR(sc, 0)) >> 32);
 1179         bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
 1180 
 1181         bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
 1182             ((uint64_t)GEM_CDRXADDR(sc, 0)) >> 32);
 1183         bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
 1184 
 1185         /* step 8. Global Configuration & Interrupt Mask */
 1186         gem_inten(sc);
 1187         bus_space_write_4(t, h, GEM_MAC_RX_MASK,
 1188                         GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
 1189         bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXX */
 1190         bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK,
 1191             GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
 1192 
 1193         /* step 9. ETX Configuration: use mostly default values */
 1194 
 1195         /* Enable TX DMA */
 1196         v = gem_ringsize(GEM_NTXDESC /*XXX*/);
 1197         bus_space_write_4(t, h, GEM_TX_CONFIG,
 1198             v | GEM_TX_CONFIG_TXDMA_EN |
 1199             (((sc->sc_flags & GEM_GIGABIT ? 0x4FF : 0x100) << 10) &
 1200             GEM_TX_CONFIG_TXFIFO_TH));
 1201         bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext);
 1202 
 1203         /* step 10. ERX Configuration */
 1204         gem_rx_common(sc);
 1205 
 1206         /* step 11. Configure Media */
 1207         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 &&
 1208             (rc = mii_ifmedia_change(&sc->sc_mii)) != 0)
 1209                 goto out;
 1210 
 1211         /* step 12. RX_MAC Configuration Register */
 1212         v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
 1213         v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
 1214         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
 1215 
 1216         /* step 14. Issue Transmit Pending command */
 1217 
 1218         /* Call MI initialization function if any */
 1219         if (sc->sc_hwinit)
 1220                 (*sc->sc_hwinit)(sc);
 1221 
 1222         /* step 15.  Give the receiver a swift kick */
 1223         bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
 1224 
 1225         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
 1226                 /* Configure PCS */
 1227                 gem_pcs_start(sc);
 1228         else
 1229                 /* Start the one second timer. */
 1230                 callout_schedule(&sc->sc_tick_ch, hz);
 1231 
 1232         sc->sc_flags &= ~GEM_LINK;
 1233         ifp->if_flags |= IFF_RUNNING;
 1234         ifp->if_timer = 0;
 1235         sc->sc_if_flags = ifp->if_flags;
 1236 out:
 1237         splx(s);
 1238 
 1239         return (0);
 1240 }
 1241 
 1242 void
 1243 gem_init_regs(struct gem_softc *sc)
 1244 {
 1245         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1246         bus_space_tag_t t = sc->sc_bustag;
 1247         bus_space_handle_t h = sc->sc_h1;
 1248         const u_char *laddr = CLLADDR(ifp->if_sadl);
 1249         uint32_t v;
 1250 
 1251         /* These regs are not cleared on reset */
 1252         if (!sc->sc_inited) {
 1253 
 1254                 /* Load recommended values */
 1255                 bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
 1256                 bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
 1257                 bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
 1258 
 1259                 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
 1260                 /* Max frame and max burst size */
 1261                 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
 1262                     ETHER_MAX_LEN | (0x2000<<16));
 1263 
 1264                 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
 1265                 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
 1266                 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
 1267                 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
 1268                 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
 1269                     ((laddr[5]<<8)|laddr[4])&0x3ff);
 1270 
 1271                 /* Secondary MAC addr set to 0:0:0:0:0:0 */
 1272                 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
 1273                 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
 1274                 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
 1275 
 1276                 /* MAC control addr set to 01:80:c2:00:00:01 */
 1277                 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
 1278                 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
 1279                 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
 1280 
 1281                 /* MAC filter addr set to 0:0:0:0:0:0 */
 1282                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
 1283                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
 1284                 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
 1285 
 1286                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
 1287                 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
 1288 
 1289                 sc->sc_inited = 1;
 1290         }
 1291 
 1292         /* Counters need to be zeroed */
 1293         bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
 1294         bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
 1295         bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
 1296         bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
 1297         bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
 1298         bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
 1299         bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
 1300         bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
 1301         bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
 1302         bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
 1303         bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
 1304 
 1305         /* Set XOFF PAUSE time. */
 1306         bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
 1307 
 1308         /*
 1309          * Set the internal arbitration to "infinite" bursts of the
 1310          * maximum length of 31 * 64 bytes so DMA transfers aren't
 1311          * split up in cache line size chunks. This greatly improves
 1312          * especially RX performance.
 1313          * Enable silicon bug workarounds for the Apple variants.
 1314          */
 1315         bus_space_write_4(t, h, GEM_CONFIG,
 1316             GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
 1317             ((sc->sc_flags & GEM_PCI) ?
 1318             GEM_CONFIG_BURST_INF : GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
 1319             GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
 1320 
 1321         /*
 1322          * Set the station address.
 1323          */
 1324         bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
 1325         bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
 1326         bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
 1327 
 1328         /*
 1329          * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
 1330          */
 1331         sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
 1332         v = GEM_MAC_XIF_TX_MII_ENA;
 1333         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) {
 1334                 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
 1335                         v |= GEM_MAC_XIF_FDPLX_LED;
 1336                                 if (sc->sc_flags & GEM_GIGABIT)
 1337                                         v |= GEM_MAC_XIF_GMII_MODE;
 1338                 }
 1339         } else {
 1340                 v |= GEM_MAC_XIF_GMII_MODE;
 1341         }
 1342         bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
 1343 }
 1344 
 1345 #ifdef GEM_DEBUG
 1346 static void
 1347 gem_txsoft_print(const struct gem_softc *sc, int firstdesc, int lastdesc)
 1348 {
 1349         int i;
 1350 
 1351         for (i = firstdesc;; i = GEM_NEXTTX(i)) {
 1352                 printf("descriptor %d:\t", i);
 1353                 printf("gd_flags:   0x%016" PRIx64 "\t",
 1354                         GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
 1355                 printf("gd_addr: 0x%016" PRIx64 "\n",
 1356                         GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
 1357                 if (i == lastdesc)
 1358                         break;
 1359         }
 1360 }
 1361 #endif
 1362 
 1363 static void
 1364 gem_start(struct ifnet *ifp)
 1365 {
 1366         struct gem_softc *sc = ifp->if_softc;
 1367         struct mbuf *m0, *m;
 1368         struct gem_txsoft *txs;
 1369         bus_dmamap_t dmamap;
 1370         int error, firsttx, nexttx = -1, lasttx = -1, ofree, seg;
 1371 #ifdef GEM_DEBUG
 1372         int otxnext;
 1373 #endif
 1374         uint64_t flags = 0;
 1375 
 1376         if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
 1377                 return;
 1378 
 1379         /*
 1380          * Remember the previous number of free descriptors and
 1381          * the first descriptor we'll use.
 1382          */
 1383         ofree = sc->sc_txfree;
 1384 #ifdef GEM_DEBUG
 1385         otxnext = sc->sc_txnext;
 1386 #endif
 1387 
 1388         DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n",
 1389             device_xname(sc->sc_dev), ofree, otxnext));
 1390 
 1391         /*
 1392          * Loop through the send queue, setting up transmit descriptors
 1393          * until we drain the queue, or use up all available transmit
 1394          * descriptors.
 1395          */
 1396 #ifdef INET
 1397 next:
 1398 #endif
 1399         while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
 1400             sc->sc_txfree != 0) {
 1401                 /*
 1402                  * Grab a packet off the queue.
 1403                  */
 1404                 IFQ_POLL(&ifp->if_snd, m0);
 1405                 if (m0 == NULL)
 1406                         break;
 1407                 m = NULL;
 1408 
 1409                 dmamap = txs->txs_dmamap;
 1410 
 1411                 /*
 1412                  * Load the DMA map.  If this fails, the packet either
 1413                  * didn't fit in the alloted number of segments, or we were
 1414                  * short on resources.  In this case, we'll copy and try
 1415                  * again.
 1416                  */
 1417                 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0,
 1418                       BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0 ||
 1419                       (m0->m_pkthdr.len < ETHER_MIN_TX &&
 1420                        dmamap->dm_nsegs == GEM_NTXSEGS)) {
 1421                         if (m0->m_pkthdr.len > MCLBYTES) {
 1422                                 aprint_error_dev(sc->sc_dev,
 1423                                     "unable to allocate jumbo Tx cluster\n");
 1424                                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1425                                 m_freem(m0);
 1426                                 continue;
 1427                         }
 1428                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1429                         if (m == NULL) {
 1430                                 aprint_error_dev(sc->sc_dev,
 1431                                     "unable to allocate Tx mbuf\n");
 1432                                 break;
 1433                         }
 1434                         MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
 1435                         if (m0->m_pkthdr.len > MHLEN) {
 1436                                 MCLGET(m, M_DONTWAIT);
 1437                                 if ((m->m_flags & M_EXT) == 0) {
 1438                                         aprint_error_dev(sc->sc_dev,
 1439                                             "unable to allocate Tx cluster\n");
 1440                                         m_freem(m);
 1441                                         break;
 1442                                 }
 1443                         }
 1444                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
 1445                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
 1446                         error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap,
 1447                             m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
 1448                         if (error) {
 1449                                 aprint_error_dev(sc->sc_dev,
 1450                                     "unable to load Tx buffer, error = %d\n",
 1451                                     error);
 1452                                 break;
 1453                         }
 1454                 }
 1455 
 1456                 /*
 1457                  * Ensure we have enough descriptors free to describe
 1458                  * the packet.
 1459                  */
 1460                 if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ?
 1461                      (sc->sc_txfree - 1) : sc->sc_txfree)) {
 1462                         /*
 1463                          * Not enough free descriptors to transmit this
 1464                          * packet.
 1465                          */
 1466                         bus_dmamap_unload(sc->sc_dmatag, dmamap);
 1467                         if (m != NULL)
 1468                                 m_freem(m);
 1469                         break;
 1470                 }
 1471 
 1472                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1473                 if (m != NULL) {
 1474                         m_freem(m0);
 1475                         m0 = m;
 1476                 }
 1477 
 1478                 /*
 1479                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
 1480                  */
 1481 
 1482                 /* Sync the DMA map. */
 1483                 bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize,
 1484                     BUS_DMASYNC_PREWRITE);
 1485 
 1486                 /*
 1487                  * Initialize the transmit descriptors.
 1488                  */
 1489                 firsttx = sc->sc_txnext;
 1490                 for (nexttx = firsttx, seg = 0;
 1491                      seg < dmamap->dm_nsegs;
 1492                      seg++, nexttx = GEM_NEXTTX(nexttx)) {
 1493 
 1494                         /*
 1495                          * If this is the first descriptor we're
 1496                          * enqueueing, set the start of packet flag,
 1497                          * and the checksum stuff if we want the hardware
 1498                          * to do it.
 1499                          */
 1500                         flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE;
 1501                         if (nexttx == firsttx) {
 1502                                 flags |= GEM_TD_START_OF_PACKET;
 1503 #ifdef INET
 1504                                 /* h/w checksum */
 1505                                 if (ifp->if_csum_flags_tx & M_CSUM_TCPv4 &&
 1506                                     m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
 1507                                         struct ether_header *eh;
 1508                                         uint16_t offset, start;
 1509 
 1510                                         eh = mtod(m0, struct ether_header *);
 1511                                         switch (ntohs(eh->ether_type)) {
 1512                                         case ETHERTYPE_IP:
 1513                                                 start = ETHER_HDR_LEN;
 1514                                                 break;
 1515                                         case ETHERTYPE_VLAN:
 1516                                                 start = ETHER_HDR_LEN +
 1517                                                         ETHER_VLAN_ENCAP_LEN;
 1518                                                 break;
 1519                                         default:
 1520                                                 /* unsupported, drop it */
 1521                                                 bus_dmamap_unload(sc->sc_dmatag,
 1522                                                         dmamap);
 1523                                                 m_freem(m0);
 1524                                                 goto next;
 1525                                         }
 1526                                         start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
 1527                                         offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start;
 1528                                         flags |= (start <<
 1529                                                   GEM_TD_CXSUM_STARTSHFT) |
 1530                                                  (offset <<
 1531                                                   GEM_TD_CXSUM_STUFFSHFT) |
 1532                                                  GEM_TD_CXSUM_ENABLE;
 1533                                 }
 1534 #endif
 1535                                 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
 1536                                         sc->sc_txwin = 0;
 1537                                         flags |= GEM_TD_INTERRUPT_ME;
 1538                                 }
 1539                         }
 1540                         sc->sc_txdescs[nexttx].gd_addr =
 1541                             GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr);
 1542                         if (seg == dmamap->dm_nsegs - 1) {
 1543                                 flags |= GEM_TD_END_OF_PACKET;
 1544                         } else {
 1545                                 /* last flag set outside of loop */
 1546                                 sc->sc_txdescs[nexttx].gd_flags =
 1547                                         GEM_DMA_WRITE(sc, flags);
 1548                         }
 1549                         lasttx = nexttx;
 1550                 }
 1551                 if (m0->m_pkthdr.len < ETHER_MIN_TX) {
 1552                         /* add padding buffer at end of chain */
 1553                         flags &= ~GEM_TD_END_OF_PACKET;
 1554                         sc->sc_txdescs[lasttx].gd_flags =
 1555                             GEM_DMA_WRITE(sc, flags);
 1556 
 1557                         sc->sc_txdescs[nexttx].gd_addr =
 1558                             GEM_DMA_WRITE(sc,
 1559                             sc->sc_nulldmamap->dm_segs[0].ds_addr);
 1560                         flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) &
 1561                             GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET;
 1562                         lasttx = nexttx;
 1563                         nexttx = GEM_NEXTTX(nexttx);
 1564                         seg++;
 1565                 }
 1566                 sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags);
 1567 
 1568                 KASSERT(lasttx != -1);
 1569 
 1570                 /*
 1571                  * Store a pointer to the packet so we can free it later,
 1572                  * and remember what txdirty will be once the packet is
 1573                  * done.
 1574                  */
 1575                 txs->txs_mbuf = m0;
 1576                 txs->txs_firstdesc = sc->sc_txnext;
 1577                 txs->txs_lastdesc = lasttx;
 1578                 txs->txs_ndescs = seg;
 1579 
 1580 #ifdef GEM_DEBUG
 1581                 if (ifp->if_flags & IFF_DEBUG) {
 1582                         printf("     gem_start %p transmit chain:\n", txs);
 1583                         gem_txsoft_print(sc, txs->txs_firstdesc,
 1584                             txs->txs_lastdesc);
 1585                 }
 1586 #endif
 1587 
 1588                 /* Sync the descriptors we're using. */
 1589                 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
 1590                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1591 
 1592                 /* Advance the tx pointer. */
 1593                 sc->sc_txfree -= txs->txs_ndescs;
 1594                 sc->sc_txnext = nexttx;
 1595 
 1596                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
 1597                 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
 1598 
 1599                 /*
 1600                  * Pass the packet to any BPF listeners.
 1601                  */
 1602                 bpf_mtap(ifp, m0, BPF_D_OUT);
 1603         }
 1604 
 1605         if (sc->sc_txfree != ofree) {
 1606                 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
 1607                     device_xname(sc->sc_dev), lasttx, otxnext));
 1608                 /*
 1609                  * The entire packet chain is set up.
 1610                  * Kick the transmitter.
 1611                  */
 1612                 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n",
 1613                         device_xname(sc->sc_dev), nexttx));
 1614                 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK,
 1615                         sc->sc_txnext);
 1616 
 1617                 /* Set a watchdog timer in case the chip flakes out. */
 1618                 ifp->if_timer = 5;
 1619                 DPRINTF(sc, ("%s: gem_start: watchdog %d\n",
 1620                         device_xname(sc->sc_dev), ifp->if_timer));
 1621         }
 1622 }
 1623 
 1624 /*
 1625  * Transmit interrupt.
 1626  */
 1627 int
 1628 gem_tint(struct gem_softc *sc)
 1629 {
 1630         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1631         bus_space_tag_t t = sc->sc_bustag;
 1632         bus_space_handle_t mac = sc->sc_h1;
 1633         struct gem_txsoft *txs;
 1634         int txlast;
 1635         int progress = 0;
 1636         uint32_t v;
 1637 
 1638         net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
 1639 
 1640         DPRINTF(sc, ("%s: gem_tint\n", device_xname(sc->sc_dev)));
 1641 
 1642         /* Unload collision counters ... */
 1643         v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
 1644             bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
 1645         if_statadd_ref(nsr, if_collisions, v +
 1646             bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
 1647             bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT));
 1648         if_statadd_ref(nsr, if_oerrors, v);
 1649 
 1650         /* ... then clear the hardware counters. */
 1651         bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
 1652         bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
 1653         bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
 1654         bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
 1655 
 1656         /*
 1657          * Go through our Tx list and free mbufs for those
 1658          * frames that have been transmitted.
 1659          */
 1660         while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
 1661                 /*
 1662                  * In theory, we could harvest some descriptors before
 1663                  * the ring is empty, but that's a bit complicated.
 1664                  *
 1665                  * GEM_TX_COMPLETION points to the last descriptor
 1666                  * processed +1.
 1667                  *
 1668                  * Let's assume that the NIC writes back to the Tx
 1669                  * descriptors before it updates the completion
 1670                  * register.  If the NIC has posted writes to the
 1671                  * Tx descriptors, PCI ordering requires that the
 1672                  * posted writes flush to RAM before the register-read
 1673                  * finishes.  So let's read the completion register,
 1674                  * before syncing the descriptors, so that we
 1675                  * examine Tx descriptors that are at least as
 1676                  * current as the completion register.
 1677                  */
 1678                 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION);
 1679                 DPRINTF(sc,
 1680                         ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n",
 1681                                 txs->txs_lastdesc, txlast));
 1682                 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
 1683                         if (txlast >= txs->txs_firstdesc &&
 1684                             txlast <= txs->txs_lastdesc)
 1685                                 break;
 1686                 } else if (txlast >= txs->txs_firstdesc ||
 1687                            txlast <= txs->txs_lastdesc)
 1688                         break;
 1689 
 1690                 GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
 1691                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1692 
 1693 #ifdef GEM_DEBUG        /* XXX DMA synchronization? */
 1694                 if (ifp->if_flags & IFF_DEBUG) {
 1695                         printf("    txsoft %p transmit chain:\n", txs);
 1696                         gem_txsoft_print(sc, txs->txs_firstdesc,
 1697                             txs->txs_lastdesc);
 1698                 }
 1699 #endif
 1700 
 1701 
 1702                 DPRINTF(sc, ("gem_tint: releasing a desc\n"));
 1703                 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
 1704 
 1705                 sc->sc_txfree += txs->txs_ndescs;
 1706 
 1707                 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap,
 1708                     0, txs->txs_dmamap->dm_mapsize,
 1709                     BUS_DMASYNC_POSTWRITE);
 1710                 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
 1711                 if (txs->txs_mbuf != NULL) {
 1712                         m_freem(txs->txs_mbuf);
 1713                         txs->txs_mbuf = NULL;
 1714                 }
 1715 
 1716                 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
 1717 
 1718                 if_statinc_ref(nsr, if_opackets);
 1719                 progress = 1;
 1720         }
 1721 
 1722         IF_STAT_PUTREF(ifp);
 1723 
 1724 #if 0
 1725         DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x "
 1726                 "GEM_TX_DATA_PTR %" PRIx64 "GEM_TX_COMPLETION %" PRIx32 "\n",
 1727                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_STATE_MACHINE),
 1728                 ((uint64_t)bus_space_read_4(sc->sc_bustag, sc->sc_h1,
 1729                         GEM_TX_DATA_PTR_HI) << 32) |
 1730                              bus_space_read_4(sc->sc_bustag, sc->sc_h1,
 1731                         GEM_TX_DATA_PTR_LO),
 1732                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_COMPLETION)));
 1733 #endif
 1734 
 1735         if (progress) {
 1736                 if (sc->sc_txfree == GEM_NTXDESC - 1)
 1737                         sc->sc_txwin = 0;
 1738 
 1739                 ifp->if_timer = SIMPLEQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
 1740                 if_schedule_deferred_start(ifp);
 1741         }
 1742         DPRINTF(sc, ("%s: gem_tint: watchdog %d\n",
 1743                 device_xname(sc->sc_dev), ifp->if_timer));
 1744 
 1745         return (1);
 1746 }
 1747 
 1748 /*
 1749  * Receive interrupt.
 1750  */
 1751 int
 1752 gem_rint(struct gem_softc *sc)
 1753 {
 1754         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1755         bus_space_tag_t t = sc->sc_bustag;
 1756         bus_space_handle_t h = sc->sc_h1;
 1757         struct gem_rxsoft *rxs;
 1758         struct mbuf *m;
 1759         uint64_t rxstat;
 1760         uint32_t rxcomp;
 1761         int i, len, progress = 0;
 1762 
 1763         DPRINTF(sc, ("%s: gem_rint\n", device_xname(sc->sc_dev)));
 1764 
 1765         /*
 1766          * Ignore spurious interrupt that sometimes occurs before
 1767          * we are set up when we network boot.
 1768          */
 1769         if (!sc->sc_meminited)
 1770                 return 1;
 1771 
 1772         /*
 1773          * Read the completion register once.  This limits
 1774          * how long the following loop can execute.
 1775          */
 1776         rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION);
 1777 
 1778         /*
 1779          * XXX Read the lastrx only once at the top for speed.
 1780          */
 1781         DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n",
 1782                 sc->sc_rxptr, rxcomp));
 1783 
 1784         /*
 1785          * Go into the loop at least once.
 1786          */
 1787         for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp;
 1788              i = GEM_NEXTRX(i)) {
 1789                 rxs = &sc->sc_rxsoft[i];
 1790 
 1791                 GEM_CDRXSYNC(sc, i,
 1792                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1793 
 1794                 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
 1795 
 1796                 if (rxstat & GEM_RD_OWN) {
 1797                         GEM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
 1798                         /*
 1799                          * We have processed all of the receive buffers.
 1800                          */
 1801                         break;
 1802                 }
 1803 
 1804                 progress++;
 1805 
 1806                 if (rxstat & GEM_RD_BAD_CRC) {
 1807                         if_statinc(ifp, if_ierrors);
 1808                         DPRINTF(sc, ("%s: receive error: CRC error\n",
 1809                             device_xname(sc->sc_dev)));
 1810                         GEM_INIT_RXDESC(sc, i);
 1811                         continue;
 1812                 }
 1813 
 1814                 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1815                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 1816 #ifdef GEM_DEBUG
 1817                 if (ifp->if_flags & IFF_DEBUG) {
 1818                         printf("    rxsoft %p descriptor %d: ", rxs, i);
 1819                         printf("gd_flags: 0x%016llx\t", (long long)
 1820                                 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
 1821                         printf("gd_addr: 0x%016llx\n", (long long)
 1822                                 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
 1823                 }
 1824 #endif
 1825 
 1826                 /* No errors; receive the packet. */
 1827                 len = GEM_RD_BUFLEN(rxstat);
 1828 
 1829                 /*
 1830                  * Allocate a new mbuf cluster.  If that fails, we are
 1831                  * out of memory, and must drop the packet and recycle
 1832                  * the buffer that's already attached to this descriptor.
 1833                  */
 1834                 m = rxs->rxs_mbuf;
 1835                 if (gem_add_rxbuf(sc, i) != 0) {
 1836                         GEM_COUNTER_INCR(sc, sc_ev_rxnobuf);
 1837                         if_statinc(ifp, if_ierrors);
 1838                         aprint_error_dev(sc->sc_dev,
 1839                             "receive error: RX no buffer space\n");
 1840                         GEM_INIT_RXDESC(sc, i);
 1841                         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 1842                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1843                         continue;
 1844                 }
 1845                 m->m_data += 2; /* We're already off by two */
 1846 
 1847                 m_set_rcvif(m, ifp);
 1848                 m->m_pkthdr.len = m->m_len = len;
 1849 
 1850 #ifdef INET
 1851                 /* hardware checksum */
 1852                 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
 1853                         struct ether_header *eh;
 1854                         struct ip *ip;
 1855                         int32_t hlen, pktlen;
 1856 
 1857                         if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) {
 1858                                 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN -
 1859                                          ETHER_VLAN_ENCAP_LEN;
 1860                                 eh = (struct ether_header *) (mtod(m, char *) +
 1861                                         ETHER_VLAN_ENCAP_LEN);
 1862                         } else {
 1863                                 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN;
 1864                                 eh = mtod(m, struct ether_header *);
 1865                         }
 1866                         if (ntohs(eh->ether_type) != ETHERTYPE_IP)
 1867                                 goto swcsum;
 1868                         ip = (struct ip *) ((char *)eh + ETHER_HDR_LEN);
 1869 
 1870                         /* IPv4 only */
 1871                         if (ip->ip_v != IPVERSION)
 1872                                 goto swcsum;
 1873 
 1874                         hlen = ip->ip_hl << 2;
 1875                         if (hlen < sizeof(struct ip))
 1876                                 goto swcsum;
 1877 
 1878                         /*
 1879                          * bail if too short, has random trailing garbage,
 1880                          * truncated, fragment, or has ethernet pad.
 1881                          */
 1882                         if ((ntohs(ip->ip_len) < hlen) ||
 1883                             (ntohs(ip->ip_len) != pktlen) ||
 1884                             (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)))
 1885                                 goto swcsum;
 1886 
 1887                         switch (ip->ip_p) {
 1888                         case IPPROTO_TCP:
 1889                                 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
 1890                                         goto swcsum;
 1891                                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1892                                         goto swcsum;
 1893                                 m->m_pkthdr.csum_flags = M_CSUM_TCPv4;
 1894                                 break;
 1895                         case IPPROTO_UDP:
 1896                                 /* FALLTHROUGH */
 1897                         default:
 1898                                 goto swcsum;
 1899                         }
 1900 
 1901                         /* the uncomplemented sum is expected */
 1902                         m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM;
 1903 
 1904                         /* if the pkt had ip options, we have to deduct them */
 1905                         if (hlen > sizeof(struct ip)) {
 1906                                 uint16_t *opts;
 1907                                 uint32_t optsum, temp;
 1908 
 1909                                 optsum = 0;
 1910                                 temp = hlen - sizeof(struct ip);
 1911                                 opts = (uint16_t *) ((char *) ip +
 1912                                         sizeof(struct ip));
 1913 
 1914                                 while (temp > 1) {
 1915                                         optsum += ntohs(*opts++);
 1916                                         temp -= 2;
 1917                                 }
 1918                                 while (optsum >> 16)
 1919                                         optsum = (optsum >> 16) +
 1920                                                  (optsum & 0xffff);
 1921 
 1922                                 /* Deduct ip opts sum from hwsum. */
 1923                                 m->m_pkthdr.csum_data += (uint16_t)~optsum;
 1924 
 1925                                 while (m->m_pkthdr.csum_data >> 16)
 1926                                         m->m_pkthdr.csum_data =
 1927                                                 (m->m_pkthdr.csum_data >> 16) +
 1928                                                 (m->m_pkthdr.csum_data &
 1929                                                  0xffff);
 1930                         }
 1931 
 1932                         m->m_pkthdr.csum_flags |= M_CSUM_DATA |
 1933                                                   M_CSUM_NO_PSEUDOHDR;
 1934                 } else
 1935 swcsum:
 1936                         m->m_pkthdr.csum_flags = 0;
 1937 #endif
 1938                 /* Pass it on. */
 1939                 if_percpuq_enqueue(ifp->if_percpuq, m);
 1940         }
 1941 
 1942         if (progress) {
 1943                 /* Update the receive pointer. */
 1944                 if (i == sc->sc_rxptr) {
 1945                         GEM_COUNTER_INCR(sc, sc_ev_rxfull);
 1946 #ifdef GEM_DEBUG
 1947                         if (ifp->if_flags & IFF_DEBUG)
 1948                                 printf("%s: rint: ring wrap\n",
 1949                                     device_xname(sc->sc_dev));
 1950 #endif
 1951                 }
 1952                 sc->sc_rxptr = i;
 1953                 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i));
 1954         }
 1955 #ifdef GEM_COUNTERS
 1956         if (progress <= 4) {
 1957                 GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]);
 1958         } else if (progress < 32) {
 1959                 if (progress < 16)
 1960                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]);
 1961                 else
 1962                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]);
 1963 
 1964         } else {
 1965                 if (progress < 64)
 1966                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]);
 1967                 else
 1968                         GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]);
 1969         }
 1970 #endif
 1971 
 1972         DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
 1973                 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
 1974 
 1975         /* Read error counters ... */
 1976         if_statadd(ifp, if_ierrors,
 1977             bus_space_read_4(t, h, GEM_MAC_RX_LEN_ERR_CNT) +
 1978             bus_space_read_4(t, h, GEM_MAC_RX_ALIGN_ERR) +
 1979             bus_space_read_4(t, h, GEM_MAC_RX_CRC_ERR_CNT) +
 1980             bus_space_read_4(t, h, GEM_MAC_RX_CODE_VIOL));
 1981 
 1982         /* ... then clear the hardware counters. */
 1983         bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
 1984         bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
 1985         bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
 1986         bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
 1987 
 1988         return (1);
 1989 }
 1990 
 1991 
 1992 /*
 1993  * gem_add_rxbuf:
 1994  *
 1995  *      Add a receive buffer to the indicated descriptor.
 1996  */
 1997 int
 1998 gem_add_rxbuf(struct gem_softc *sc, int idx)
 1999 {
 2000         struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
 2001         struct mbuf *m;
 2002         int error;
 2003 
 2004         MGETHDR(m, M_DONTWAIT, MT_DATA);
 2005         if (m == NULL)
 2006                 return (ENOBUFS);
 2007 
 2008         MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
 2009         MCLGET(m, M_DONTWAIT);
 2010         if ((m->m_flags & M_EXT) == 0) {
 2011                 m_freem(m);
 2012                 return (ENOBUFS);
 2013         }
 2014 
 2015 #ifdef GEM_DEBUG
 2016 /* bzero the packet to check DMA */
 2017         memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
 2018 #endif
 2019 
 2020         if (rxs->rxs_mbuf != NULL)
 2021                 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
 2022 
 2023         rxs->rxs_mbuf = m;
 2024 
 2025         error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
 2026             m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
 2027             BUS_DMA_READ | BUS_DMA_NOWAIT);
 2028         if (error) {
 2029                 aprint_error_dev(sc->sc_dev,
 2030                     "can't load rx DMA map %d, error = %d\n", idx, error);
 2031                 panic("gem_add_rxbuf"); /* XXX */
 2032         }
 2033 
 2034         bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
 2035             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 2036 
 2037         GEM_INIT_RXDESC(sc, idx);
 2038 
 2039         return (0);
 2040 }
 2041 
 2042 
 2043 int
 2044 gem_eint(struct gem_softc *sc, u_int status)
 2045 {
 2046         char bits[128];
 2047         uint32_t r, v;
 2048 
 2049         if ((status & GEM_INTR_MIF) != 0) {
 2050                 printf("%s: XXXlink status changed\n", device_xname(sc->sc_dev));
 2051                 return (1);
 2052         }
 2053 
 2054         if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
 2055                 gem_reset_rxdma(sc);
 2056                 return (1);
 2057         }
 2058 
 2059         if (status & GEM_INTR_BERR) {
 2060                 if (sc->sc_flags & GEM_PCI)
 2061                         r = GEM_ERROR_STATUS;
 2062                 else
 2063                         r = GEM_SBUS_ERROR_STATUS;
 2064                 bus_space_read_4(sc->sc_bustag, sc->sc_h2, r);
 2065                 v = bus_space_read_4(sc->sc_bustag, sc->sc_h2, r);
 2066                 aprint_error_dev(sc->sc_dev, "bus error interrupt: 0x%02x\n",
 2067                     v);
 2068                 return (1);
 2069         }
 2070         snprintb(bits, sizeof(bits), GEM_INTR_BITS, status);
 2071         printf("%s: status=%s\n", device_xname(sc->sc_dev), bits);
 2072 
 2073         return (1);
 2074 }
 2075 
 2076 
 2077 /*
 2078  * PCS interrupts.
 2079  * We should receive these when the link status changes, but sometimes
 2080  * we don't receive them for link up.  We compensate for this in the
 2081  * gem_tick() callout.
 2082  */
 2083 int
 2084 gem_pint(struct gem_softc *sc)
 2085 {
 2086         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2087         bus_space_tag_t t = sc->sc_bustag;
 2088         bus_space_handle_t h = sc->sc_h1;
 2089         uint32_t v, v2;
 2090 
 2091         /*
 2092          * Clear the PCS interrupt from GEM_STATUS.  The PCS register is
 2093          * latched, so we have to read it twice.  There is only one bit in
 2094          * use, so the value is meaningless.
 2095          */
 2096         bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
 2097         bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
 2098 
 2099         if ((ifp->if_flags & IFF_UP) == 0)
 2100                 return 1;
 2101 
 2102         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)
 2103                 return 1;
 2104 
 2105         v = bus_space_read_4(t, h, GEM_MII_STATUS);
 2106         /* If we see remote fault, our link partner is probably going away */
 2107         if ((v & GEM_MII_STATUS_REM_FLT) != 0) {
 2108                 gem_bitwait(sc, h, GEM_MII_STATUS, GEM_MII_STATUS_REM_FLT, 0);
 2109                 v = bus_space_read_4(t, h, GEM_MII_STATUS);
 2110         /* Otherwise, we may need to wait after auto-negotiation completes */
 2111         } else if ((v & (GEM_MII_STATUS_LINK_STS | GEM_MII_STATUS_ANEG_CPT)) ==
 2112             GEM_MII_STATUS_ANEG_CPT) {
 2113                 gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_LINK_STS);
 2114                 v = bus_space_read_4(t, h, GEM_MII_STATUS);
 2115         }
 2116         if ((v & GEM_MII_STATUS_LINK_STS) != 0) {
 2117                 if (sc->sc_flags & GEM_LINK) {
 2118                         return 1;
 2119                 }
 2120                 callout_stop(&sc->sc_tick_ch);
 2121                 v = bus_space_read_4(t, h, GEM_MII_ANAR);
 2122                 v2 = bus_space_read_4(t, h, GEM_MII_ANLPAR);
 2123                 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_1000_SX;
 2124                 sc->sc_mii.mii_media_status = IFM_AVALID | IFM_ACTIVE;
 2125                 v &= v2;
 2126                 if (v & GEM_MII_ANEG_FUL_DUPLX) {
 2127                         sc->sc_mii.mii_media_active |= IFM_FDX;
 2128 #ifdef GEM_DEBUG
 2129                         aprint_debug_dev(sc->sc_dev, "link up: full duplex\n");
 2130 #endif
 2131                 } else if (v & GEM_MII_ANEG_HLF_DUPLX) {
 2132                         sc->sc_mii.mii_media_active |= IFM_HDX;
 2133 #ifdef GEM_DEBUG
 2134                         aprint_debug_dev(sc->sc_dev, "link up: half duplex\n");
 2135 #endif
 2136                 } else {
 2137 #ifdef GEM_DEBUG
 2138                         aprint_debug_dev(sc->sc_dev, "duplex mismatch\n");
 2139 #endif
 2140                 }
 2141                 gem_statuschange(sc);
 2142         } else {
 2143                 if ((sc->sc_flags & GEM_LINK) == 0) {
 2144                         return 1;
 2145                 }
 2146                 sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
 2147                 sc->sc_mii.mii_media_status = IFM_AVALID;
 2148 #ifdef GEM_DEBUG
 2149                         aprint_debug_dev(sc->sc_dev, "link down\n");
 2150 #endif
 2151                 gem_statuschange(sc);
 2152 
 2153                 /* Start the 10 second timer */
 2154                 callout_schedule(&sc->sc_tick_ch, hz * 10);
 2155         }
 2156         return 1;
 2157 }
 2158 
 2159 
 2160 
 2161 int
 2162 gem_intr(void *v)
 2163 {
 2164         struct gem_softc *sc = v;
 2165         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2166         bus_space_tag_t t = sc->sc_bustag;
 2167         bus_space_handle_t h = sc->sc_h1;
 2168         uint32_t status;
 2169         int r = 0;
 2170 #ifdef GEM_DEBUG
 2171         char bits[128];
 2172 #endif
 2173 
 2174         /* XXX We should probably mask out interrupts until we're done */
 2175 
 2176         sc->sc_ev_intr.ev_count++;
 2177 
 2178         status = bus_space_read_4(t, h, GEM_STATUS);
 2179 #ifdef GEM_DEBUG
 2180         snprintb(bits, sizeof(bits), GEM_INTR_BITS, status);
 2181 #endif
 2182         DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n",
 2183                 device_xname(sc->sc_dev), (status >> 19), bits));
 2184 
 2185         if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
 2186                 r |= gem_eint(sc, status);
 2187 
 2188         /* We don't bother with GEM_INTR_TX_DONE */
 2189         if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) {
 2190                 GEM_COUNTER_INCR(sc, sc_ev_txint);
 2191                 r |= gem_tint(sc);
 2192         }
 2193 
 2194         if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) {
 2195                 GEM_COUNTER_INCR(sc, sc_ev_rxint);
 2196                 r |= gem_rint(sc);
 2197         }
 2198 
 2199         /* We should eventually do more than just print out error stats. */
 2200         if (status & GEM_INTR_TX_MAC) {
 2201                 int txstat = bus_space_read_4(t, h, GEM_MAC_TX_STATUS);
 2202                 if (txstat & ~GEM_MAC_TX_XMIT_DONE)
 2203                         printf("%s: MAC tx fault, status %x\n",
 2204                             device_xname(sc->sc_dev), txstat);
 2205                 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
 2206                         gem_init(ifp);
 2207         }
 2208         if (status & GEM_INTR_RX_MAC) {
 2209                 int rxstat = bus_space_read_4(t, h, GEM_MAC_RX_STATUS);
 2210                 /*
 2211                  * At least with GEM_SUN_GEM and some GEM_SUN_ERI
 2212                  * revisions GEM_MAC_RX_OVERFLOW happen often due to a
 2213                  * silicon bug so handle them silently.  So if we detect
 2214                  * an RX FIFO overflow, we fire off a timer, and check
 2215                  * whether we're still making progress by looking at the
 2216                  * RX FIFO write and read pointers.
 2217                  */
 2218                 if (rxstat & GEM_MAC_RX_OVERFLOW) {
 2219                         if_statinc(ifp, if_ierrors);
 2220                         GEM_COUNTER_INCR(sc, sc_ev_rxoverflow);
 2221 #ifdef GEM_DEBUG
 2222                         aprint_error_dev(sc->sc_dev,
 2223                             "receive error: RX overflow sc->rxptr %d, complete %d\n", sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION));
 2224 #endif
 2225                         sc->sc_rx_fifo_wr_ptr =
 2226                                 bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR);
 2227                         sc->sc_rx_fifo_rd_ptr =
 2228                                 bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR);
 2229                         callout_schedule(&sc->sc_rx_watchdog, 400);
 2230                 } else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
 2231                         printf("%s: MAC rx fault, status 0x%02x\n",
 2232                             device_xname(sc->sc_dev), rxstat);
 2233         }
 2234         if (status & GEM_INTR_PCS) {
 2235                 r |= gem_pint(sc);
 2236         }
 2237 
 2238 /* Do we need to do anything with these?
 2239         if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
 2240                 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS);
 2241                 if ((status2 & GEM_MAC_PAUSED) != 0)
 2242                         aprintf_debug_dev(sc->sc_dev, "PAUSE received (%d slots)\n",
 2243                             GEM_MAC_PAUSE_TIME(status2));
 2244                 if ((status2 & GEM_MAC_PAUSE) != 0)
 2245                         aprintf_debug_dev(sc->sc_dev, "transited to PAUSE state\n");
 2246                 if ((status2 & GEM_MAC_RESUME) != 0)
 2247                         aprintf_debug_dev(sc->sc_dev, "transited to non-PAUSE state\n");
 2248         }
 2249         if ((status & GEM_INTR_MIF) != 0)
 2250                 aprintf_debug_dev(sc->sc_dev, "MIF interrupt\n");
 2251 */
 2252         rnd_add_uint32(&sc->rnd_source, status);
 2253         return (r);
 2254 }
 2255 
 2256 void
 2257 gem_rx_watchdog(void *arg)
 2258 {
 2259         struct gem_softc *sc = arg;
 2260         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2261         bus_space_tag_t t = sc->sc_bustag;
 2262         bus_space_handle_t h = sc->sc_h1;
 2263         uint32_t rx_fifo_wr_ptr;
 2264         uint32_t rx_fifo_rd_ptr;
 2265         uint32_t state;
 2266 
 2267         if ((ifp->if_flags & IFF_RUNNING) == 0) {
 2268                 aprint_error_dev(sc->sc_dev, "receiver not running\n");
 2269                 return;
 2270         }
 2271 
 2272         rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR);
 2273         rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR);
 2274         state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE);
 2275         if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW &&
 2276             ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) ||
 2277              ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) &&
 2278               (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr))))
 2279         {
 2280                 /*
 2281                  * The RX state machine is still in overflow state and
 2282                  * the RX FIFO write and read pointers seem to be
 2283                  * stuck.  Whack the chip over the head to get things
 2284                  * going again.
 2285                  */
 2286                 aprint_error_dev(sc->sc_dev,
 2287                     "receiver stuck in overflow, resetting\n");
 2288                 gem_init(ifp);
 2289         } else {
 2290                 int needreset = 1;
 2291                 if ((state & GEM_MAC_STATE_OVERFLOW) != GEM_MAC_STATE_OVERFLOW) {
 2292                         DPRINTF(sc,
 2293                             ("%s: rx_watchdog: not in overflow state: 0x%x\n",
 2294                             device_xname(sc->sc_dev), state));
 2295                 }
 2296                 if (rx_fifo_wr_ptr != rx_fifo_rd_ptr) {
 2297                         DPRINTF(sc,
 2298                             ("%s: rx_watchdog: wr & rd ptr different\n",
 2299                             device_xname(sc->sc_dev)));
 2300                         needreset = 0;
 2301                 }
 2302                 if (sc->sc_rx_fifo_wr_ptr != rx_fifo_wr_ptr) {
 2303                         DPRINTF(sc, ("%s: rx_watchdog: wr pointer != saved\n",
 2304                             device_xname(sc->sc_dev)));
 2305                         needreset = 0;
 2306                 }
 2307                 if (sc->sc_rx_fifo_rd_ptr != rx_fifo_rd_ptr) {
 2308                         DPRINTF(sc, ("%s: rx_watchdog: rd pointer != saved\n",
 2309                             device_xname(sc->sc_dev)));
 2310                         needreset = 0;
 2311                 }
 2312                 if (needreset) {
 2313                         aprint_error_dev(sc->sc_dev,
 2314                             "rx_watchdog: resetting anyway\n");
 2315                         gem_init(ifp);
 2316                 }
 2317         }
 2318 }
 2319 
 2320 void
 2321 gem_watchdog(struct ifnet *ifp)
 2322 {
 2323         struct gem_softc *sc = ifp->if_softc;
 2324 
 2325         DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
 2326                 "GEM_MAC_RX_CONFIG %x\n",
 2327                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
 2328                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
 2329                 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
 2330 
 2331         log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
 2332         if_statinc(ifp, if_oerrors);
 2333 
 2334         /* Try to get more packets going. */
 2335         gem_init(ifp);
 2336         gem_start(ifp);
 2337 }
 2338 
 2339 /*
 2340  * Initialize the MII Management Interface
 2341  */
 2342 void
 2343 gem_mifinit(struct gem_softc *sc)
 2344 {
 2345         bus_space_tag_t t = sc->sc_bustag;
 2346         bus_space_handle_t mif = sc->sc_h1;
 2347 
 2348         /* Configure the MIF in frame mode */
 2349         sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
 2350         sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
 2351         bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
 2352 }
 2353 
 2354 /*
 2355  * MII interface
 2356  *
 2357  * The GEM MII interface supports at least three different operating modes:
 2358  *
 2359  * Bitbang mode is implemented using data, clock and output enable registers.
 2360  *
 2361  * Frame mode is implemented by loading a complete frame into the frame
 2362  * register and polling the valid bit for completion.
 2363  *
 2364  * Polling mode uses the frame register but completion is indicated by
 2365  * an interrupt.
 2366  *
 2367  */
 2368 static int
 2369 gem_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
 2370 {
 2371         struct gem_softc *sc = device_private(self);
 2372         bus_space_tag_t t = sc->sc_bustag;
 2373         bus_space_handle_t mif = sc->sc_h1;
 2374         int n;
 2375         uint32_t v;
 2376 
 2377 #ifdef GEM_DEBUG1
 2378         if (sc->sc_debug)
 2379                 printf("gem_mii_readreg: PHY %d reg %d\n", phy, reg);
 2380 #endif
 2381 
 2382         /* Construct the frame command */
 2383         v = (reg << GEM_MIF_REG_SHIFT)  | (phy << GEM_MIF_PHY_SHIFT) |
 2384                 GEM_MIF_FRAME_READ;
 2385 
 2386         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 2387         for (n = 0; n < 100; n++) {
 2388                 DELAY(1);
 2389                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 2390                 if (v & GEM_MIF_FRAME_TA0) {
 2391                         *val = v & GEM_MIF_FRAME_DATA;
 2392                         return 0;
 2393                 }
 2394         }
 2395 
 2396         printf("%s: mii_read timeout\n", device_xname(sc->sc_dev));
 2397         return ETIMEDOUT;
 2398 }
 2399 
 2400 static int
 2401 gem_mii_writereg(device_t self, int phy, int reg, uint16_t val)
 2402 {
 2403         struct gem_softc *sc = device_private(self);
 2404         bus_space_tag_t t = sc->sc_bustag;
 2405         bus_space_handle_t mif = sc->sc_h1;
 2406         int n;
 2407         uint32_t v;
 2408 
 2409 #ifdef GEM_DEBUG1
 2410         if (sc->sc_debug)
 2411                 printf("gem_mii_writereg: PHY %d reg %d val %x\n",
 2412                         phy, reg, val);
 2413 #endif
 2414 
 2415         /* Construct the frame command */
 2416         v = GEM_MIF_FRAME_WRITE                 |
 2417             (phy << GEM_MIF_PHY_SHIFT)          |
 2418             (reg << GEM_MIF_REG_SHIFT)          |
 2419             (val & GEM_MIF_FRAME_DATA);
 2420 
 2421         bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
 2422         for (n = 0; n < 100; n++) {
 2423                 DELAY(1);
 2424                 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
 2425                 if (v & GEM_MIF_FRAME_TA0)
 2426                         return 0;
 2427         }
 2428 
 2429         printf("%s: mii_write timeout\n", device_xname(sc->sc_dev));
 2430         return ETIMEDOUT;
 2431 }
 2432 
 2433 static void
 2434 gem_mii_statchg(struct ifnet *ifp)
 2435 {
 2436         struct gem_softc *sc = ifp->if_softc;
 2437 #ifdef GEM_DEBUG
 2438         int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
 2439 #endif
 2440 
 2441 #ifdef GEM_DEBUG
 2442         if (sc->sc_debug)
 2443                 printf("gem_mii_statchg: status change: phy = %d\n",
 2444                         sc->sc_phys[instance]);
 2445 #endif
 2446         gem_statuschange(sc);
 2447 }
 2448 
 2449 /*
 2450  * Common status change for gem_mii_statchg() and gem_pint()
 2451  */
 2452 void
 2453 gem_statuschange(struct gem_softc* sc)
 2454 {
 2455         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2456         bus_space_tag_t t = sc->sc_bustag;
 2457         bus_space_handle_t mac = sc->sc_h1;
 2458         int gigabit;
 2459         uint32_t rxcfg, txcfg, v;
 2460 
 2461         if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0 &&
 2462             IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE)
 2463                 sc->sc_flags |= GEM_LINK;
 2464         else
 2465                 sc->sc_flags &= ~GEM_LINK;
 2466 
 2467         if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
 2468                 gigabit = 1;
 2469         else
 2470                 gigabit = 0;
 2471 
 2472         /*
 2473          * The configuration done here corresponds to the steps F) and
 2474          * G) and as far as enabling of RX and TX MAC goes also step H)
 2475          * of the initialization sequence outlined in section 3.2.1 of
 2476          * the GEM Gigabit Ethernet ASIC Specification.
 2477          */
 2478 
 2479         rxcfg = bus_space_read_4(t, mac, GEM_MAC_RX_CONFIG);
 2480         rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
 2481         txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
 2482         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
 2483                 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
 2484         else if (gigabit) {
 2485                 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
 2486                 txcfg |= GEM_MAC_RX_CARR_EXTEND;
 2487         }
 2488         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
 2489         bus_space_barrier(t, mac, GEM_MAC_TX_CONFIG, 4,
 2490             BUS_SPACE_BARRIER_WRITE);
 2491         if (!gem_bitwait(sc, mac, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
 2492                 aprint_normal_dev(sc->sc_dev, "cannot disable TX MAC\n");
 2493         bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, txcfg);
 2494         bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 0);
 2495         bus_space_barrier(t, mac, GEM_MAC_RX_CONFIG, 4,
 2496             BUS_SPACE_BARRIER_WRITE);
 2497         if (!gem_bitwait(sc, mac, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
 2498                 aprint_normal_dev(sc->sc_dev, "cannot disable RX MAC\n");
 2499         bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, rxcfg);
 2500 
 2501         v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG) &
 2502             ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
 2503         bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v);
 2504 
 2505         if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) == 0 &&
 2506             gigabit != 0)
 2507                 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
 2508                     GEM_MAC_SLOT_TIME_CARR_EXTEND);
 2509         else
 2510                 bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
 2511                     GEM_MAC_SLOT_TIME_NORMAL);
 2512 
 2513         /* XIF Configuration */
 2514         if (sc->sc_flags & GEM_LINK)
 2515                 v = GEM_MAC_XIF_LINK_LED;
 2516         else
 2517                 v = 0;
 2518         v |= GEM_MAC_XIF_TX_MII_ENA;
 2519 
 2520         /* If an external transceiver is connected, enable its MII drivers */
 2521         sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
 2522         if ((sc->sc_flags &(GEM_SERDES | GEM_SERIAL)) == 0) {
 2523                 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
 2524                         if (gigabit)
 2525                                 v |= GEM_MAC_XIF_GMII_MODE;
 2526                         else
 2527                                 v &= ~GEM_MAC_XIF_GMII_MODE;
 2528                 } else
 2529                         /* Internal MII needs buf enable */
 2530                         v |= GEM_MAC_XIF_MII_BUF_ENA;
 2531                 /* MII needs echo disable if half duplex. */
 2532                 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
 2533                         /* turn on full duplex LED */
 2534                         v |= GEM_MAC_XIF_FDPLX_LED;
 2535                 else
 2536                         /* half duplex -- disable echo */
 2537                         v |= GEM_MAC_XIF_ECHO_DISABL;
 2538         } else {
 2539                 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
 2540                         v |= GEM_MAC_XIF_FDPLX_LED;
 2541                 v |= GEM_MAC_XIF_GMII_MODE;
 2542         }
 2543         bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
 2544 
 2545         if ((ifp->if_flags & IFF_RUNNING) != 0 &&
 2546             (sc->sc_flags & GEM_LINK) != 0) {
 2547                 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG,
 2548                     txcfg | GEM_MAC_TX_ENABLE);
 2549                 bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG,
 2550                     rxcfg | GEM_MAC_RX_ENABLE);
 2551         }
 2552 }
 2553 
 2554 int
 2555 gem_ser_mediachange(struct ifnet *ifp)
 2556 {
 2557         struct gem_softc *sc = ifp->if_softc;
 2558         u_int s, t;
 2559 
 2560         if (IFM_TYPE(sc->sc_mii.mii_media.ifm_media) != IFM_ETHER)
 2561                 return EINVAL;
 2562 
 2563         s = IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media);
 2564         if (s == IFM_AUTO) {
 2565                 if (sc->sc_mii_media != s) {
 2566 #ifdef GEM_DEBUG
 2567                         aprint_debug_dev(sc->sc_dev, "setting media to auto\n");
 2568 #endif
 2569                         sc->sc_mii_media = s;
 2570                         if (ifp->if_flags & IFF_UP) {
 2571                                 gem_pcs_stop(sc, 0);
 2572                                 gem_pcs_start(sc);
 2573                         }
 2574                 }
 2575                 return 0;
 2576         }
 2577         if (s == IFM_1000_SX) {
 2578                 t = IFM_OPTIONS(sc->sc_mii.mii_media.ifm_media)
 2579                     & (IFM_FDX | IFM_HDX);
 2580                 if ((sc->sc_mii_media & (IFM_FDX | IFM_HDX)) != t) {
 2581                         sc->sc_mii_media &= ~(IFM_FDX | IFM_HDX);
 2582                         sc->sc_mii_media |= t;
 2583 #ifdef GEM_DEBUG
 2584                         aprint_debug_dev(sc->sc_dev,
 2585                             "setting media to 1000baseSX-%s\n",
 2586                             t == IFM_FDX ? "FDX" : "HDX");
 2587 #endif
 2588                         if (ifp->if_flags & IFF_UP) {
 2589                                 gem_pcs_stop(sc, 0);
 2590                                 gem_pcs_start(sc);
 2591                         }
 2592                 }
 2593                 return 0;
 2594         }
 2595         return EINVAL;
 2596 }
 2597 
 2598 void
 2599 gem_ser_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 2600 {
 2601         struct gem_softc *sc = ifp->if_softc;
 2602 
 2603         if ((ifp->if_flags & IFF_UP) == 0)
 2604                 return;
 2605         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 2606         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 2607 }
 2608 
 2609 static int
 2610 gem_ifflags_cb(struct ethercom *ec)
 2611 {
 2612         struct ifnet *ifp = &ec->ec_if;
 2613         struct gem_softc *sc = ifp->if_softc;
 2614         u_short change = ifp->if_flags ^ sc->sc_if_flags;
 2615 
 2616         if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
 2617                 return ENETRESET;
 2618         else if ((change & IFF_PROMISC) != 0)
 2619                 gem_setladrf(sc);
 2620         return 0;
 2621 }
 2622 
 2623 /*
 2624  * Process an ioctl request.
 2625  */
 2626 int
 2627 gem_ioctl(struct ifnet *ifp, unsigned long cmd, void *data)
 2628 {
 2629         struct gem_softc *sc = ifp->if_softc;
 2630         int s, error = 0;
 2631 
 2632         s = splnet();
 2633 
 2634         if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
 2635                 error = 0;
 2636                 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
 2637                         ;
 2638                 else if (ifp->if_flags & IFF_RUNNING) {
 2639                         /*
 2640                          * Multicast list has changed; set the hardware filter
 2641                          * accordingly.
 2642                          */
 2643                         gem_setladrf(sc);
 2644                 }
 2645         }
 2646 
 2647         /* Try to get things going again */
 2648         if (ifp->if_flags & IFF_UP)
 2649                 gem_start(ifp);
 2650         splx(s);
 2651         return (error);
 2652 }
 2653 
 2654 static void
 2655 gem_inten(struct gem_softc *sc)
 2656 {
 2657         bus_space_tag_t t = sc->sc_bustag;
 2658         bus_space_handle_t h = sc->sc_h1;
 2659         uint32_t v;
 2660 
 2661         if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
 2662                 v = GEM_INTR_PCS;
 2663         else
 2664                 v = GEM_INTR_MIF;
 2665         bus_space_write_4(t, h, GEM_INTMASK,
 2666                       ~(GEM_INTR_TX_INTME |
 2667                         GEM_INTR_TX_EMPTY |
 2668                         GEM_INTR_TX_MAC |
 2669                         GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF |
 2670                         GEM_INTR_RX_TAG_ERR | GEM_INTR_MAC_CONTROL |
 2671                         GEM_INTR_BERR | v));
 2672 }
 2673 
 2674 bool
 2675 gem_resume(device_t self, const pmf_qual_t *qual)
 2676 {
 2677         struct gem_softc *sc = device_private(self);
 2678 
 2679         gem_inten(sc);
 2680 
 2681         return true;
 2682 }
 2683 
 2684 bool
 2685 gem_suspend(device_t self, const pmf_qual_t *qual)
 2686 {
 2687         struct gem_softc *sc = device_private(self);
 2688         bus_space_tag_t t = sc->sc_bustag;
 2689         bus_space_handle_t h = sc->sc_h1;
 2690 
 2691         bus_space_write_4(t, h, GEM_INTMASK, ~(uint32_t)0);
 2692 
 2693         return true;
 2694 }
 2695 
 2696 bool
 2697 gem_shutdown(device_t self, int howto)
 2698 {
 2699         struct gem_softc *sc = device_private(self);
 2700         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2701 
 2702         gem_stop(ifp, 1);
 2703 
 2704         return true;
 2705 }
 2706 
 2707 /*
 2708  * Set up the logical address filter.
 2709  */
 2710 void
 2711 gem_setladrf(struct gem_softc *sc)
 2712 {
 2713         struct ethercom *ec = &sc->sc_ethercom;
 2714         struct ifnet *ifp = &ec->ec_if;
 2715         struct ether_multi *enm;
 2716         struct ether_multistep step;
 2717         bus_space_tag_t t = sc->sc_bustag;
 2718         bus_space_handle_t h = sc->sc_h1;
 2719         uint32_t crc;
 2720         uint32_t hash[16];
 2721         uint32_t v;
 2722         int i;
 2723 
 2724         /* Get current RX configuration */
 2725         v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
 2726 
 2727         /*
 2728          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 2729          * and hash filter.  Depending on the case, the right bit will be
 2730          * enabled.
 2731          */
 2732         v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER |
 2733             GEM_MAC_RX_PROMISC_GRP);
 2734 
 2735         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 2736                 /* Turn on promiscuous mode */
 2737                 v |= GEM_MAC_RX_PROMISCUOUS;
 2738                 ifp->if_flags |= IFF_ALLMULTI;
 2739                 goto chipit;
 2740         }
 2741 
 2742         /*
 2743          * Set up multicast address filter by passing all multicast addresses
 2744          * through a crc generator, and then using the high order 8 bits as an
 2745          * index into the 256 bit logical address filter.  The high order 4
 2746          * bits selects the word, while the other 4 bits select the bit within
 2747          * the word (where bit 0 is the MSB).
 2748          */
 2749 
 2750         /* Clear hash table */
 2751         memset(hash, 0, sizeof(hash));
 2752 
 2753         ETHER_LOCK(ec);
 2754         ETHER_FIRST_MULTI(step, ec, enm);
 2755         while (enm != NULL) {
 2756                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 2757                         /*
 2758                          * We must listen to a range of multicast addresses.
 2759                          * For now, just accept all multicasts, rather than
 2760                          * trying to set only those filter bits needed to match
 2761                          * the range.  (At this time, the only use of address
 2762                          * ranges is for IP multicast routing, for which the
 2763                          * range is big enough to require all bits set.)
 2764                          * XXX should use the address filters for this
 2765                          */
 2766                         ifp->if_flags |= IFF_ALLMULTI;
 2767                         v |= GEM_MAC_RX_PROMISC_GRP;
 2768                         ETHER_UNLOCK(ec);
 2769                         goto chipit;
 2770                 }
 2771 
 2772                 /* Get the LE CRC32 of the address */
 2773                 crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo));
 2774 
 2775                 /* Just want the 8 most significant bits. */
 2776                 crc >>= 24;
 2777 
 2778                 /* Set the corresponding bit in the filter. */
 2779                 hash[crc >> 4] |= 1 << (15 - (crc & 15));
 2780 
 2781                 ETHER_NEXT_MULTI(step, enm);
 2782         }
 2783         ETHER_UNLOCK(ec);
 2784 
 2785         v |= GEM_MAC_RX_HASH_FILTER;
 2786         ifp->if_flags &= ~IFF_ALLMULTI;
 2787 
 2788         /* Now load the hash table into the chip (if we are using it) */
 2789         for (i = 0; i < 16; i++) {
 2790                 bus_space_write_4(t, h,
 2791                     GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
 2792                     hash[i]);
 2793         }
 2794 
 2795 chipit:
 2796         sc->sc_if_flags = ifp->if_flags;
 2797         bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
 2798 }

Cache object: 2edacfc7a08b843586e4847bda4b25e4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.