The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cadence/if_cgem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * A network interface driver for Cadence GEM Gigabit Ethernet
   31  * interface such as the one used in Xilinx Zynq-7000 SoC.
   32  *
   33  * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
   34  * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
   35  * and register definitions are in appendix B.18.
   36  */
   37 
   38 #include <sys/cdefs.h>
   39 __FBSDID("$FreeBSD$");
   40 
   41 #include <sys/param.h>
   42 #include <sys/systm.h>
   43 #include <sys/bus.h>
   44 #include <sys/kernel.h>
   45 #include <sys/malloc.h>
   46 #include <sys/mbuf.h>
   47 #include <sys/module.h>
   48 #include <sys/rman.h>
   49 #include <sys/socket.h>
   50 #include <sys/sockio.h>
   51 #include <sys/sysctl.h>
   52 
   53 #include <machine/bus.h>
   54 
   55 #include <net/ethernet.h>
   56 #include <net/if.h>
   57 #include <net/if_arp.h>
   58 #include <net/if_dl.h>
   59 #include <net/if_media.h>
   60 #include <net/if_mib.h>
   61 #include <net/if_types.h>
   62 
   63 #ifdef INET
   64 #include <netinet/in.h>
   65 #include <netinet/in_systm.h>
   66 #include <netinet/in_var.h>
   67 #include <netinet/ip.h>
   68 #endif
   69 
   70 #include <net/bpf.h>
   71 #include <net/bpfdesc.h>
   72 
   73 #include <dev/fdt/fdt_common.h>
   74 #include <dev/ofw/ofw_bus.h>
   75 #include <dev/ofw/ofw_bus_subr.h>
   76 
   77 #include <dev/mii/mii.h>
   78 #include <dev/mii/miivar.h>
   79 #include <dev/mii/mii_fdt.h>
   80 
   81 #include <dev/extres/clk/clk.h>
   82 
   83 #if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT
   84 #define CGEM64
   85 #endif
   86 
   87 #include <dev/cadence/if_cgem_hw.h>
   88 
   89 #include "miibus_if.h"
   90 
   91 #define IF_CGEM_NAME "cgem"
   92 
   93 #define CGEM_NUM_RX_DESCS       512     /* size of receive descriptor ring */
   94 #define CGEM_NUM_TX_DESCS       512     /* size of transmit descriptor ring */
   95 
   96 /* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
   97 #define DEFAULT_NUM_RX_BUFS     256     /* number of receive bufs to queue. */
   98 
   99 #define TX_MAX_DMA_SEGS         8       /* maximum segs in a tx mbuf dma */
  100 
  101 #define CGEM_CKSUM_ASSIST       (CSUM_IP | CSUM_TCP | CSUM_UDP | \
  102                                  CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
  103 
  104 #define HWQUIRK_NONE            0
  105 #define HWQUIRK_NEEDNULLQS      1
  106 #define HWQUIRK_RXHANGWAR       2
  107 #define HWQUIRK_TXCLK           4
  108 #define HWQUIRK_PCLK            8
  109 
  110 static struct ofw_compat_data compat_data[] = {
  111         { "cdns,zynq-gem",              HWQUIRK_RXHANGWAR | HWQUIRK_TXCLK },
  112         { "cdns,zynqmp-gem",            HWQUIRK_NEEDNULLQS | HWQUIRK_TXCLK },
  113         { "microchip,mpfs-mss-gem",     HWQUIRK_NEEDNULLQS | HWQUIRK_TXCLK },
  114         { "sifive,fu540-c000-gem",      HWQUIRK_PCLK },
  115         { "sifive,fu740-c000-gem",      HWQUIRK_PCLK },
  116         { "cdns,gem",                   HWQUIRK_NONE },
  117         { "cdns,macb",                  HWQUIRK_NONE },
  118         { "cadence,gem",                HWQUIRK_NONE },
  119         { NULL,                         0 }
  120 };
  121 
  122 struct cgem_softc {
  123         if_t                    ifp;
  124         struct mtx              sc_mtx;
  125         device_t                dev;
  126         device_t                miibus;
  127         u_int                   mii_media_active;       /* last active media */
  128         int                     if_old_flags;
  129         struct resource         *mem_res;
  130         struct resource         *irq_res;
  131         void                    *intrhand;
  132         struct callout          tick_ch;
  133         uint32_t                net_ctl_shadow;
  134         uint32_t                net_cfg_shadow;
  135         clk_t                   ref_clk;
  136         int                     neednullqs;
  137         int                     phy_contype;
  138 
  139         bus_dma_tag_t           desc_dma_tag;
  140         bus_dma_tag_t           mbuf_dma_tag;
  141 
  142         /* receive descriptor ring */
  143         struct cgem_rx_desc     *rxring;
  144         bus_addr_t              rxring_physaddr;
  145         struct mbuf             *rxring_m[CGEM_NUM_RX_DESCS];
  146         bus_dmamap_t            rxring_m_dmamap[CGEM_NUM_RX_DESCS];
  147         int                     rxring_hd_ptr;  /* where to put rcv bufs */
  148         int                     rxring_tl_ptr;  /* where to get receives */
  149         int                     rxring_queued;  /* how many rcv bufs queued */
  150         bus_dmamap_t            rxring_dma_map;
  151         int                     rxbufs;         /* tunable number rcv bufs */
  152         int                     rxhangwar;      /* rx hang work-around */
  153         u_int                   rxoverruns;     /* rx overruns */
  154         u_int                   rxnobufs;       /* rx buf ring empty events */
  155         u_int                   rxdmamapfails;  /* rx dmamap failures */
  156         uint32_t                rx_frames_prev;
  157 
  158         /* transmit descriptor ring */
  159         struct cgem_tx_desc     *txring;
  160         bus_addr_t              txring_physaddr;
  161         struct mbuf             *txring_m[CGEM_NUM_TX_DESCS];
  162         bus_dmamap_t            txring_m_dmamap[CGEM_NUM_TX_DESCS];
  163         int                     txring_hd_ptr;  /* where to put next xmits */
  164         int                     txring_tl_ptr;  /* next xmit mbuf to free */
  165         int                     txring_queued;  /* num xmits segs queued */
  166         u_int                   txfull;         /* tx ring full events */
  167         u_int                   txdefrags;      /* tx calls to m_defrag() */
  168         u_int                   txdefragfails;  /* tx m_defrag() failures */
  169         u_int                   txdmamapfails;  /* tx dmamap failures */
  170 
  171         /* null descriptor rings */
  172         void                    *null_qs;
  173         bus_addr_t              null_qs_physaddr;
  174 
  175         /* hardware provided statistics */
  176         struct cgem_hw_stats {
  177                 uint64_t                tx_bytes;
  178                 uint32_t                tx_frames;
  179                 uint32_t                tx_frames_bcast;
  180                 uint32_t                tx_frames_multi;
  181                 uint32_t                tx_frames_pause;
  182                 uint32_t                tx_frames_64b;
  183                 uint32_t                tx_frames_65to127b;
  184                 uint32_t                tx_frames_128to255b;
  185                 uint32_t                tx_frames_256to511b;
  186                 uint32_t                tx_frames_512to1023b;
  187                 uint32_t                tx_frames_1024to1536b;
  188                 uint32_t                tx_under_runs;
  189                 uint32_t                tx_single_collisn;
  190                 uint32_t                tx_multi_collisn;
  191                 uint32_t                tx_excsv_collisn;
  192                 uint32_t                tx_late_collisn;
  193                 uint32_t                tx_deferred_frames;
  194                 uint32_t                tx_carrier_sense_errs;
  195 
  196                 uint64_t                rx_bytes;
  197                 uint32_t                rx_frames;
  198                 uint32_t                rx_frames_bcast;
  199                 uint32_t                rx_frames_multi;
  200                 uint32_t                rx_frames_pause;
  201                 uint32_t                rx_frames_64b;
  202                 uint32_t                rx_frames_65to127b;
  203                 uint32_t                rx_frames_128to255b;
  204                 uint32_t                rx_frames_256to511b;
  205                 uint32_t                rx_frames_512to1023b;
  206                 uint32_t                rx_frames_1024to1536b;
  207                 uint32_t                rx_frames_undersize;
  208                 uint32_t                rx_frames_oversize;
  209                 uint32_t                rx_frames_jabber;
  210                 uint32_t                rx_frames_fcs_errs;
  211                 uint32_t                rx_frames_length_errs;
  212                 uint32_t                rx_symbol_errs;
  213                 uint32_t                rx_align_errs;
  214                 uint32_t                rx_resource_errs;
  215                 uint32_t                rx_overrun_errs;
  216                 uint32_t                rx_ip_hdr_csum_errs;
  217                 uint32_t                rx_tcp_csum_errs;
  218                 uint32_t                rx_udp_csum_errs;
  219         } stats;
  220 };
  221 
  222 #define RD4(sc, off)            (bus_read_4((sc)->mem_res, (off)))
  223 #define WR4(sc, off, val)       (bus_write_4((sc)->mem_res, (off), (val)))
  224 #define BARRIER(sc, off, len, flags) \
  225         (bus_barrier((sc)->mem_res, (off), (len), (flags))
  226 
  227 #define CGEM_LOCK(sc)           mtx_lock(&(sc)->sc_mtx)
  228 #define CGEM_UNLOCK(sc)         mtx_unlock(&(sc)->sc_mtx)
  229 #define CGEM_LOCK_INIT(sc)      mtx_init(&(sc)->sc_mtx, \
  230             device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
  231 #define CGEM_LOCK_DESTROY(sc)   mtx_destroy(&(sc)->sc_mtx)
  232 #define CGEM_ASSERT_LOCKED(sc)  mtx_assert(&(sc)->sc_mtx, MA_OWNED)
  233 
  234 /* Allow platforms to optionally provide a way to set the reference clock. */
  235 int cgem_set_ref_clk(int unit, int frequency);
  236 
  237 static int cgem_probe(device_t dev);
  238 static int cgem_attach(device_t dev);
  239 static int cgem_detach(device_t dev);
  240 static void cgem_tick(void *);
  241 static void cgem_intr(void *);
  242 
  243 static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
  244 
  245 static void
  246 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
  247 {
  248         int i;
  249         uint32_t rnd;
  250 
  251         /* See if boot loader gave us a MAC address already. */
  252         for (i = 0; i < 4; i++) {
  253                 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
  254                 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
  255                 if (low != 0 || high != 0) {
  256                         eaddr[0] = low & 0xff;
  257                         eaddr[1] = (low >> 8) & 0xff;
  258                         eaddr[2] = (low >> 16) & 0xff;
  259                         eaddr[3] = (low >> 24) & 0xff;
  260                         eaddr[4] = high & 0xff;
  261                         eaddr[5] = (high >> 8) & 0xff;
  262                         break;
  263                 }
  264         }
  265 
  266         /* No MAC from boot loader?  Assign a random one. */
  267         if (i == 4) {
  268                 rnd = arc4random();
  269 
  270                 eaddr[0] = 'b';
  271                 eaddr[1] = 's';
  272                 eaddr[2] = 'd';
  273                 eaddr[3] = (rnd >> 16) & 0xff;
  274                 eaddr[4] = (rnd >> 8) & 0xff;
  275                 eaddr[5] = rnd & 0xff;
  276 
  277                 device_printf(sc->dev, "no mac address found, assigning "
  278                     "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0],
  279                     eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
  280         }
  281 
  282         /* Move address to first slot and zero out the rest. */
  283         WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
  284             (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
  285         WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
  286 
  287         for (i = 1; i < 4; i++) {
  288                 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
  289                 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
  290         }
  291 }
  292 
  293 /*
  294  * cgem_mac_hash():  map 48-bit address to a 6-bit hash. The 6-bit hash
  295  * corresponds to a bit in a 64-bit hash register.  Setting that bit in the
  296  * hash register enables reception of all frames with a destination address
  297  * that hashes to that 6-bit value.
  298  *
  299  * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
  300  * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
  301  * every sixth bit in the destination address.
  302  */
  303 static int
  304 cgem_mac_hash(u_char eaddr[])
  305 {
  306         int hash;
  307         int i, j;
  308 
  309         hash = 0;
  310         for (i = 0; i < 6; i++)
  311                 for (j = i; j < 48; j += 6)
  312                         if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
  313                                 hash ^= (1 << i);
  314 
  315         return hash;
  316 }
  317 
  318 static u_int
  319 cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  320 {
  321         uint32_t *hashes = arg;
  322         int index;
  323 
  324         index = cgem_mac_hash(LLADDR(sdl));
  325         if (index > 31)
  326                 hashes[0] |= (1U << (index - 32));
  327         else
  328                 hashes[1] |= (1U << index);
  329 
  330         return (1);
  331 }
  332 
  333 /*
  334  * After any change in rx flags or multi-cast addresses, set up hash registers
  335  * and net config register bits.
  336  */
  337 static void
  338 cgem_rx_filter(struct cgem_softc *sc)
  339 {
  340         if_t ifp = sc->ifp;
  341         uint32_t hashes[2] = { 0, 0 };
  342 
  343         sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
  344             CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL);
  345 
  346         if ((if_getflags(ifp) & IFF_PROMISC) != 0)
  347                 sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL;
  348         else {
  349                 if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
  350                         sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST;
  351                 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
  352                         hashes[0] = 0xffffffff;
  353                         hashes[1] = 0xffffffff;
  354                 } else
  355                         if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes);
  356 
  357                 if (hashes[0] != 0 || hashes[1] != 0)
  358                         sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN;
  359         }
  360 
  361         WR4(sc, CGEM_HASH_TOP, hashes[0]);
  362         WR4(sc, CGEM_HASH_BOT, hashes[1]);
  363         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
  364 }
  365 
  366 /* For bus_dmamap_load() callback. */
  367 static void
  368 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  369 {
  370 
  371         if (nsegs != 1 || error != 0)
  372                 return;
  373         *(bus_addr_t *)arg = segs[0].ds_addr;
  374 }
  375 
  376 /* Set up null queues for priority queues we actually can't disable. */
  377 static void
  378 cgem_null_qs(struct cgem_softc *sc)
  379 {
  380         struct cgem_rx_desc *rx_desc;
  381         struct cgem_tx_desc *tx_desc;
  382         uint32_t queue_mask;
  383         int n;
  384 
  385         /* Read design config register 6 to determine number of queues. */
  386         queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) &
  387             CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1;
  388         if (queue_mask == 0)
  389                 return;
  390 
  391         /* Create empty RX queue and empty TX buf queues. */
  392         memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
  393             sizeof(struct cgem_tx_desc));
  394         rx_desc = sc->null_qs;
  395         rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
  396         tx_desc = (struct cgem_tx_desc *)(rx_desc + 1);
  397         tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
  398 
  399         /* Point all valid ring base pointers to the null queues. */
  400         for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) {
  401                 WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
  402                 WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
  403                     sizeof(struct cgem_rx_desc));
  404         }
  405 }
  406 
  407 /* Create DMA'able descriptor rings. */
  408 static int
  409 cgem_setup_descs(struct cgem_softc *sc)
  410 {
  411         int i, err;
  412         int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) +
  413             CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
  414 
  415         if (sc->neednullqs)
  416                 desc_rings_size += sizeof(struct cgem_rx_desc) +
  417                     sizeof(struct cgem_tx_desc);
  418 
  419         sc->txring = NULL;
  420         sc->rxring = NULL;
  421 
  422         /* Allocate non-cached DMA space for RX and TX descriptors. */
  423         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1,
  424 #ifdef CGEM64
  425             1ULL << 32, /* Do not cross a 4G boundary. */
  426 #else
  427             0,
  428 #endif
  429             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  430             desc_rings_size, 1, desc_rings_size, 0,
  431             busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
  432         if (err)
  433                 return (err);
  434 
  435         /* Set up a bus_dma_tag for mbufs. */
  436         err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
  437             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  438             TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx,
  439             &sc->mbuf_dma_tag);
  440         if (err)
  441                 return (err);
  442 
  443         /*
  444          * Allocate DMA memory. We allocate transmit, receive and null
  445          * descriptor queues all at once because the hardware only provides
  446          * one register for the upper 32 bits of rx and tx descriptor queues
  447          * hardware addresses.
  448          */
  449         err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
  450             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO,
  451             &sc->rxring_dma_map);
  452         if (err)
  453                 return (err);
  454 
  455         /* Load descriptor DMA memory. */
  456         err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
  457             (void *)sc->rxring, desc_rings_size,
  458             cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
  459         if (err)
  460                 return (err);
  461 
  462         /* Initialize RX descriptors. */
  463         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
  464                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
  465                 sc->rxring[i].ctl = 0;
  466                 sc->rxring_m[i] = NULL;
  467                 sc->rxring_m_dmamap[i] = NULL;
  468         }
  469         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
  470 
  471         sc->rxring_hd_ptr = 0;
  472         sc->rxring_tl_ptr = 0;
  473         sc->rxring_queued = 0;
  474 
  475         sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
  476         sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
  477             sizeof(struct cgem_rx_desc);
  478 
  479         /* Initialize TX descriptor ring. */
  480         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
  481                 sc->txring[i].addr = 0;
  482                 sc->txring[i].ctl = CGEM_TXDESC_USED;
  483                 sc->txring_m[i] = NULL;
  484                 sc->txring_m_dmamap[i] = NULL;
  485         }
  486         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
  487 
  488         sc->txring_hd_ptr = 0;
  489         sc->txring_tl_ptr = 0;
  490         sc->txring_queued = 0;
  491 
  492         if (sc->neednullqs) {
  493                 sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
  494                 sc->null_qs_physaddr = sc->txring_physaddr +
  495                     CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
  496 
  497                 cgem_null_qs(sc);
  498         }
  499 
  500         return (0);
  501 }
  502 
  503 /* Fill receive descriptor ring with mbufs. */
  504 static void
  505 cgem_fill_rqueue(struct cgem_softc *sc)
  506 {
  507         struct mbuf *m = NULL;
  508         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
  509         int nsegs;
  510 
  511         CGEM_ASSERT_LOCKED(sc);
  512 
  513         while (sc->rxring_queued < sc->rxbufs) {
  514                 /* Get a cluster mbuf. */
  515                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  516                 if (m == NULL)
  517                         break;
  518 
  519                 m->m_len = MCLBYTES;
  520                 m->m_pkthdr.len = MCLBYTES;
  521                 m->m_pkthdr.rcvif = sc->ifp;
  522 
  523                 /* Load map and plug in physical address. */
  524                 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
  525                     &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
  526                         sc->rxdmamapfails++;
  527                         m_free(m);
  528                         break;
  529                 }
  530                 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
  531                     sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
  532                     segs, &nsegs, BUS_DMA_NOWAIT)) {
  533                         sc->rxdmamapfails++;
  534                         bus_dmamap_destroy(sc->mbuf_dma_tag,
  535                                    sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
  536                         sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
  537                         m_free(m);
  538                         break;
  539                 }
  540                 sc->rxring_m[sc->rxring_hd_ptr] = m;
  541 
  542                 /* Sync cache with receive buffer. */
  543                 bus_dmamap_sync(sc->mbuf_dma_tag,
  544                     sc->rxring_m_dmamap[sc->rxring_hd_ptr],
  545                     BUS_DMASYNC_PREREAD);
  546 
  547                 /* Write rx descriptor and increment head pointer. */
  548                 sc->rxring[sc->rxring_hd_ptr].ctl = 0;
  549 #ifdef CGEM64
  550                 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
  551 #endif
  552                 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
  553                         sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
  554                             CGEM_RXDESC_WRAP;
  555                         sc->rxring_hd_ptr = 0;
  556                 } else
  557                         sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
  558 
  559                 sc->rxring_queued++;
  560         }
  561 }
  562 
  563 /* Pull received packets off of receive descriptor ring. */
  564 static void
  565 cgem_recv(struct cgem_softc *sc)
  566 {
  567         if_t ifp = sc->ifp;
  568         struct mbuf *m, *m_hd, **m_tl;
  569         uint32_t ctl;
  570 
  571         CGEM_ASSERT_LOCKED(sc);
  572 
  573         /* Pick up all packets in which the OWN bit is set. */
  574         m_hd = NULL;
  575         m_tl = &m_hd;
  576         while (sc->rxring_queued > 0 &&
  577             (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
  578                 ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
  579 
  580                 /* Grab filled mbuf. */
  581                 m = sc->rxring_m[sc->rxring_tl_ptr];
  582                 sc->rxring_m[sc->rxring_tl_ptr] = NULL;
  583 
  584                 /* Sync cache with receive buffer. */
  585                 bus_dmamap_sync(sc->mbuf_dma_tag,
  586                     sc->rxring_m_dmamap[sc->rxring_tl_ptr],
  587                     BUS_DMASYNC_POSTREAD);
  588 
  589                 /* Unload and destroy dmamap. */
  590                 bus_dmamap_unload(sc->mbuf_dma_tag,
  591                     sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
  592                 bus_dmamap_destroy(sc->mbuf_dma_tag,
  593                     sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
  594                 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
  595 
  596                 /* Increment tail pointer. */
  597                 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
  598                         sc->rxring_tl_ptr = 0;
  599                 sc->rxring_queued--;
  600 
  601                 /*
  602                  * Check FCS and make sure entire packet landed in one mbuf
  603                  * cluster (which is much bigger than the largest ethernet
  604                  * packet).
  605                  */
  606                 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
  607                     (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
  608                     (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
  609                         /* discard. */
  610                         m_free(m);
  611                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
  612                         continue;
  613                 }
  614 
  615                 /* Ready it to hand off to upper layers. */
  616                 m->m_data += ETHER_ALIGN;
  617                 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
  618                 m->m_pkthdr.rcvif = ifp;
  619                 m->m_pkthdr.len = m->m_len;
  620 
  621                 /*
  622                  * Are we using hardware checksumming?  Check the status in the
  623                  * receive descriptor.
  624                  */
  625                 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
  626                         /* TCP or UDP checks out, IP checks out too. */
  627                         if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
  628                             CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
  629                             (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
  630                             CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
  631                                 m->m_pkthdr.csum_flags |=
  632                                     CSUM_IP_CHECKED | CSUM_IP_VALID |
  633                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
  634                                 m->m_pkthdr.csum_data = 0xffff;
  635                         } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
  636                             CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
  637                                 /* Only IP checks out. */
  638                                 m->m_pkthdr.csum_flags |=
  639                                     CSUM_IP_CHECKED | CSUM_IP_VALID;
  640                                 m->m_pkthdr.csum_data = 0xffff;
  641                         }
  642                 }
  643 
  644                 /* Queue it up for delivery below. */
  645                 *m_tl = m;
  646                 m_tl = &m->m_next;
  647         }
  648 
  649         /* Replenish receive buffers. */
  650         cgem_fill_rqueue(sc);
  651 
  652         /* Unlock and send up packets. */
  653         CGEM_UNLOCK(sc);
  654         while (m_hd != NULL) {
  655                 m = m_hd;
  656                 m_hd = m_hd->m_next;
  657                 m->m_next = NULL;
  658                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
  659                 if_input(ifp, m);
  660         }
  661         CGEM_LOCK(sc);
  662 }
  663 
  664 /* Find completed transmits and free their mbufs. */
  665 static void
  666 cgem_clean_tx(struct cgem_softc *sc)
  667 {
  668         struct mbuf *m;
  669         uint32_t ctl;
  670 
  671         CGEM_ASSERT_LOCKED(sc);
  672 
  673         /* free up finished transmits. */
  674         while (sc->txring_queued > 0 &&
  675             ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
  676             CGEM_TXDESC_USED) != 0) {
  677                 /* Sync cache. */
  678                 bus_dmamap_sync(sc->mbuf_dma_tag,
  679                     sc->txring_m_dmamap[sc->txring_tl_ptr],
  680                     BUS_DMASYNC_POSTWRITE);
  681 
  682                 /* Unload and destroy DMA map. */
  683                 bus_dmamap_unload(sc->mbuf_dma_tag,
  684                     sc->txring_m_dmamap[sc->txring_tl_ptr]);
  685                 bus_dmamap_destroy(sc->mbuf_dma_tag,
  686                     sc->txring_m_dmamap[sc->txring_tl_ptr]);
  687                 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
  688 
  689                 /* Free up the mbuf. */
  690                 m = sc->txring_m[sc->txring_tl_ptr];
  691                 sc->txring_m[sc->txring_tl_ptr] = NULL;
  692                 m_freem(m);
  693 
  694                 /* Check the status. */
  695                 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
  696                         /* Serious bus error. log to console. */
  697 #ifdef CGEM64
  698                         device_printf(sc->dev,
  699                             "cgem_clean_tx: AHB error, addr=0x%x%08x\n",
  700                             sc->txring[sc->txring_tl_ptr].addrhi,
  701                             sc->txring[sc->txring_tl_ptr].addr);
  702 #else
  703                         device_printf(sc->dev,
  704                             "cgem_clean_tx: AHB error, addr=0x%x\n",
  705                             sc->txring[sc->txring_tl_ptr].addr);
  706 #endif
  707                 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
  708                     CGEM_TXDESC_LATE_COLL)) != 0) {
  709                         if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
  710                 } else
  711                         if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
  712 
  713                 /*
  714                  * If the packet spanned more than one tx descriptor, skip
  715                  * descriptors until we find the end so that only
  716                  * start-of-frame descriptors are processed.
  717                  */
  718                 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
  719                         if ((ctl & CGEM_TXDESC_WRAP) != 0)
  720                                 sc->txring_tl_ptr = 0;
  721                         else
  722                                 sc->txring_tl_ptr++;
  723                         sc->txring_queued--;
  724 
  725                         ctl = sc->txring[sc->txring_tl_ptr].ctl;
  726 
  727                         sc->txring[sc->txring_tl_ptr].ctl =
  728                             ctl | CGEM_TXDESC_USED;
  729                 }
  730 
  731                 /* Next descriptor. */
  732                 if ((ctl & CGEM_TXDESC_WRAP) != 0)
  733                         sc->txring_tl_ptr = 0;
  734                 else
  735                         sc->txring_tl_ptr++;
  736                 sc->txring_queued--;
  737 
  738                 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
  739         }
  740 }
  741 
  742 /* Start transmits. */
  743 static void
  744 cgem_start_locked(if_t ifp)
  745 {
  746         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
  747         struct mbuf *m;
  748         bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
  749         uint32_t ctl;
  750         int i, nsegs, wrap, err;
  751 
  752         CGEM_ASSERT_LOCKED(sc);
  753 
  754         if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0)
  755                 return;
  756 
  757         for (;;) {
  758                 /* Check that there is room in the descriptor ring. */
  759                 if (sc->txring_queued >=
  760                     CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
  761                         /* Try to make room. */
  762                         cgem_clean_tx(sc);
  763 
  764                         /* Still no room? */
  765                         if (sc->txring_queued >=
  766                             CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
  767                                 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
  768                                 sc->txfull++;
  769                                 break;
  770                         }
  771                 }
  772 
  773                 /* Grab next transmit packet. */
  774                 m = if_dequeue(ifp);
  775                 if (m == NULL)
  776                         break;
  777 
  778                 /* Create and load DMA map. */
  779                 if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
  780                         &sc->txring_m_dmamap[sc->txring_hd_ptr])) {
  781                         m_freem(m);
  782                         sc->txdmamapfails++;
  783                         continue;
  784                 }
  785                 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
  786                     sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs,
  787                     BUS_DMA_NOWAIT);
  788                 if (err == EFBIG) {
  789                         /* Too many segments!  defrag and try again. */
  790                         struct mbuf *m2 = m_defrag(m, M_NOWAIT);
  791 
  792                         if (m2 == NULL) {
  793                                 sc->txdefragfails++;
  794                                 m_freem(m);
  795                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
  796                                     sc->txring_m_dmamap[sc->txring_hd_ptr]);
  797                                 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
  798                                 continue;
  799                         }
  800                         m = m2;
  801                         err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
  802                             sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs,
  803                             &nsegs, BUS_DMA_NOWAIT);
  804                         sc->txdefrags++;
  805                 }
  806                 if (err) {
  807                         /* Give up. */
  808                         m_freem(m);
  809                         bus_dmamap_destroy(sc->mbuf_dma_tag,
  810                             sc->txring_m_dmamap[sc->txring_hd_ptr]);
  811                         sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
  812                         sc->txdmamapfails++;
  813                         continue;
  814                 }
  815                 sc->txring_m[sc->txring_hd_ptr] = m;
  816 
  817                 /* Sync tx buffer with cache. */
  818                 bus_dmamap_sync(sc->mbuf_dma_tag,
  819                     sc->txring_m_dmamap[sc->txring_hd_ptr],
  820                     BUS_DMASYNC_PREWRITE);
  821 
  822                 /* Set wrap flag if next packet might run off end of ring. */
  823                 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
  824                     CGEM_NUM_TX_DESCS;
  825 
  826                 /*
  827                  * Fill in the TX descriptors back to front so that USED bit in
  828                  * first descriptor is cleared last.
  829                  */
  830                 for (i = nsegs - 1; i >= 0; i--) {
  831                         /* Descriptor address. */
  832                         sc->txring[sc->txring_hd_ptr + i].addr =
  833                             segs[i].ds_addr;
  834 #ifdef CGEM64
  835                         sc->txring[sc->txring_hd_ptr + i].addrhi =
  836                             segs[i].ds_addr >> 32;
  837 #endif
  838                         /* Descriptor control word. */
  839                         ctl = segs[i].ds_len;
  840                         if (i == nsegs - 1) {
  841                                 ctl |= CGEM_TXDESC_LAST_BUF;
  842                                 if (wrap)
  843                                         ctl |= CGEM_TXDESC_WRAP;
  844                         }
  845                         sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
  846 
  847                         if (i != 0)
  848                                 sc->txring_m[sc->txring_hd_ptr + i] = NULL;
  849                 }
  850 
  851                 if (wrap)
  852                         sc->txring_hd_ptr = 0;
  853                 else
  854                         sc->txring_hd_ptr += nsegs;
  855                 sc->txring_queued += nsegs;
  856 
  857                 /* Kick the transmitter. */
  858                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
  859                     CGEM_NET_CTRL_START_TX);
  860 
  861                 /* If there is a BPF listener, bounce a copy to him. */
  862                 ETHER_BPF_MTAP(ifp, m);
  863         }
  864 }
  865 
  866 static void
  867 cgem_start(if_t ifp)
  868 {
  869         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
  870 
  871         CGEM_LOCK(sc);
  872         cgem_start_locked(ifp);
  873         CGEM_UNLOCK(sc);
  874 }
  875 
  876 static void
  877 cgem_poll_hw_stats(struct cgem_softc *sc)
  878 {
  879         uint32_t n;
  880 
  881         CGEM_ASSERT_LOCKED(sc);
  882 
  883         sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
  884         sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
  885 
  886         sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
  887         sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
  888         sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
  889         sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
  890         sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
  891         sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
  892         sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
  893         sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
  894         sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
  895         sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
  896         sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
  897 
  898         n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
  899         sc->stats.tx_single_collisn += n;
  900         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
  901         n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
  902         sc->stats.tx_multi_collisn += n;
  903         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
  904         n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
  905         sc->stats.tx_excsv_collisn += n;
  906         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
  907         n = RD4(sc, CGEM_LATE_COLL);
  908         sc->stats.tx_late_collisn += n;
  909         if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
  910 
  911         sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
  912         sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
  913 
  914         sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
  915         sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
  916 
  917         sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
  918         sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
  919         sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
  920         sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
  921         sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
  922         sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
  923         sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
  924         sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
  925         sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
  926         sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
  927         sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
  928         sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
  929         sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
  930         sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
  931         sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
  932         sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
  933         sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
  934         sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
  935         sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
  936         sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
  937         sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
  938         sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
  939 }
  940 
  941 static void
  942 cgem_tick(void *arg)
  943 {
  944         struct cgem_softc *sc = (struct cgem_softc *)arg;
  945         struct mii_data *mii;
  946 
  947         CGEM_ASSERT_LOCKED(sc);
  948 
  949         /* Poll the phy. */
  950         if (sc->miibus != NULL) {
  951                 mii = device_get_softc(sc->miibus);
  952                 mii_tick(mii);
  953         }
  954 
  955         /* Poll statistics registers. */
  956         cgem_poll_hw_stats(sc);
  957 
  958         /* Check for receiver hang. */
  959         if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
  960                 /*
  961                  * Reset receiver logic by toggling RX_EN bit.  1usec
  962                  * delay is necessary especially when operating at 100mbps
  963                  * and 10mbps speeds.
  964                  */
  965                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
  966                     ~CGEM_NET_CTRL_RX_EN);
  967                 DELAY(1);
  968                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
  969         }
  970         sc->rx_frames_prev = sc->stats.rx_frames;
  971 
  972         /* Next callout in one second. */
  973         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
  974 }
  975 
  976 /* Interrupt handler. */
  977 static void
  978 cgem_intr(void *arg)
  979 {
  980         struct cgem_softc *sc = (struct cgem_softc *)arg;
  981         if_t ifp = sc->ifp;
  982         uint32_t istatus;
  983 
  984         CGEM_LOCK(sc);
  985 
  986         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
  987                 CGEM_UNLOCK(sc);
  988                 return;
  989         }
  990 
  991         /* Read interrupt status and immediately clear the bits. */
  992         istatus = RD4(sc, CGEM_INTR_STAT);
  993         WR4(sc, CGEM_INTR_STAT, istatus);
  994 
  995         /* Packets received. */
  996         if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
  997                 cgem_recv(sc);
  998 
  999         /* Free up any completed transmit buffers. */
 1000         cgem_clean_tx(sc);
 1001 
 1002         /* Hresp not ok.  Something is very bad with DMA.  Try to clear. */
 1003         if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
 1004                 device_printf(sc->dev,
 1005                     "cgem_intr: hresp not okay! rx_status=0x%x\n",
 1006                     RD4(sc, CGEM_RX_STAT));
 1007                 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
 1008         }
 1009 
 1010         /* Receiver overrun. */
 1011         if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
 1012                 /* Clear status bit. */
 1013                 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
 1014                 sc->rxoverruns++;
 1015         }
 1016 
 1017         /* Receiver ran out of bufs. */
 1018         if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
 1019                 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
 1020                     CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
 1021                 cgem_fill_rqueue(sc);
 1022                 sc->rxnobufs++;
 1023         }
 1024 
 1025         /* Restart transmitter if needed. */
 1026         if (!if_sendq_empty(ifp))
 1027                 cgem_start_locked(ifp);
 1028 
 1029         CGEM_UNLOCK(sc);
 1030 }
 1031 
 1032 /* Reset hardware. */
 1033 static void
 1034 cgem_reset(struct cgem_softc *sc)
 1035 {
 1036 
 1037         CGEM_ASSERT_LOCKED(sc);
 1038 
 1039         /* Determine data bus width from design configuration register. */
 1040         switch (RD4(sc, CGEM_DESIGN_CFG1) &
 1041             CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) {
 1042         case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64:
 1043                 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
 1044                 break;
 1045         case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128:
 1046                 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
 1047                 break;
 1048         default:
 1049                 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
 1050         }
 1051 
 1052         WR4(sc, CGEM_NET_CTRL, 0);
 1053         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
 1054         WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
 1055         WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
 1056         WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
 1057         WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
 1058         WR4(sc, CGEM_HASH_BOT, 0);
 1059         WR4(sc, CGEM_HASH_TOP, 0);
 1060         WR4(sc, CGEM_TX_QBAR, 0);       /* manual says do this. */
 1061         WR4(sc, CGEM_RX_QBAR, 0);
 1062 
 1063         /* Get management port running even if interface is down. */
 1064         sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
 1065         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
 1066 
 1067         sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
 1068         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
 1069 }
 1070 
 1071 /* Bring up the hardware. */
 1072 static void
 1073 cgem_config(struct cgem_softc *sc)
 1074 {
 1075         if_t ifp = sc->ifp;
 1076         uint32_t dma_cfg;
 1077         u_char *eaddr = if_getlladdr(ifp);
 1078 
 1079         CGEM_ASSERT_LOCKED(sc);
 1080 
 1081         /* Program Net Config Register. */
 1082         sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
 1083             CGEM_NET_CFG_DBUS_WIDTH_MASK);
 1084         sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE |
 1085             CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
 1086             CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN |
 1087             CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100);
 1088 
 1089         /* Check connection type, enable SGMII bits if necessary. */
 1090         if (sc->phy_contype == MII_CONTYPE_SGMII) {
 1091                 sc->net_cfg_shadow |= CGEM_NET_CFG_SGMII_EN;
 1092                 sc->net_cfg_shadow |= CGEM_NET_CFG_PCS_SEL;
 1093         }
 1094 
 1095         /* Enable receive checksum offloading? */
 1096         if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
 1097                 sc->net_cfg_shadow |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
 1098 
 1099         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
 1100 
 1101         /* Program DMA Config Register. */
 1102         dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
 1103             CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
 1104             CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
 1105             CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
 1106 #ifdef CGEM64
 1107             CGEM_DMA_CFG_ADDR_BUS_64 |
 1108 #endif
 1109             CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
 1110 
 1111         /* Enable transmit checksum offloading? */
 1112         if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
 1113                 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
 1114 
 1115         WR4(sc, CGEM_DMA_CFG, dma_cfg);
 1116 
 1117         /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
 1118         WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr);
 1119         WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr);
 1120 #ifdef CGEM64
 1121         WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
 1122         WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
 1123 #endif
 1124 
 1125         /* Enable rx and tx. */
 1126         sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
 1127         WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
 1128 
 1129         /* Set receive address in case it changed. */
 1130         WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
 1131             (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
 1132         WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
 1133 
 1134         /* Set up interrupts. */
 1135         WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
 1136             CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
 1137             CGEM_INTR_HRESP_NOT_OK);
 1138 }
 1139 
 1140 /* Turn on interface and load up receive ring with buffers. */
 1141 static void
 1142 cgem_init_locked(struct cgem_softc *sc)
 1143 {
 1144         struct mii_data *mii;
 1145 
 1146         CGEM_ASSERT_LOCKED(sc);
 1147 
 1148         if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
 1149                 return;
 1150 
 1151         cgem_config(sc);
 1152         cgem_fill_rqueue(sc);
 1153 
 1154         if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
 1155 
 1156         if (sc->miibus != NULL) {
 1157                 mii = device_get_softc(sc->miibus);
 1158                 mii_mediachg(mii);
 1159         }
 1160 
 1161         callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
 1162 }
 1163 
 1164 static void
 1165 cgem_init(void *arg)
 1166 {
 1167         struct cgem_softc *sc = (struct cgem_softc *)arg;
 1168 
 1169         CGEM_LOCK(sc);
 1170         cgem_init_locked(sc);
 1171         CGEM_UNLOCK(sc);
 1172 }
 1173 
 1174 /* Turn off interface.  Free up any buffers in transmit or receive queues. */
 1175 static void
 1176 cgem_stop(struct cgem_softc *sc)
 1177 {
 1178         int i;
 1179 
 1180         CGEM_ASSERT_LOCKED(sc);
 1181 
 1182         callout_stop(&sc->tick_ch);
 1183 
 1184         /* Shut down hardware. */
 1185         cgem_reset(sc);
 1186 
 1187         /* Clear out transmit queue. */
 1188         memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc));
 1189         for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
 1190                 sc->txring[i].ctl = CGEM_TXDESC_USED;
 1191                 if (sc->txring_m[i]) {
 1192                         /* Unload and destroy dmamap. */
 1193                         bus_dmamap_unload(sc->mbuf_dma_tag,
 1194                             sc->txring_m_dmamap[i]);
 1195                         bus_dmamap_destroy(sc->mbuf_dma_tag,
 1196                             sc->txring_m_dmamap[i]);
 1197                         sc->txring_m_dmamap[i] = NULL;
 1198                         m_freem(sc->txring_m[i]);
 1199                         sc->txring_m[i] = NULL;
 1200                 }
 1201         }
 1202         sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
 1203 
 1204         sc->txring_hd_ptr = 0;
 1205         sc->txring_tl_ptr = 0;
 1206         sc->txring_queued = 0;
 1207 
 1208         /* Clear out receive queue. */
 1209         memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc));
 1210         for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
 1211                 sc->rxring[i].addr = CGEM_RXDESC_OWN;
 1212                 if (sc->rxring_m[i]) {
 1213                         /* Unload and destroy dmamap. */
 1214                         bus_dmamap_unload(sc->mbuf_dma_tag,
 1215                             sc->rxring_m_dmamap[i]);
 1216                         bus_dmamap_destroy(sc->mbuf_dma_tag,
 1217                             sc->rxring_m_dmamap[i]);
 1218                         sc->rxring_m_dmamap[i] = NULL;
 1219 
 1220                         m_freem(sc->rxring_m[i]);
 1221                         sc->rxring_m[i] = NULL;
 1222                 }
 1223         }
 1224         sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
 1225 
 1226         sc->rxring_hd_ptr = 0;
 1227         sc->rxring_tl_ptr = 0;
 1228         sc->rxring_queued = 0;
 1229 
 1230         /* Force next statchg or linkchg to program net config register. */
 1231         sc->mii_media_active = 0;
 1232 }
 1233 
 1234 static int
 1235 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
 1236 {
 1237         struct cgem_softc *sc = if_getsoftc(ifp);
 1238         struct ifreq *ifr = (struct ifreq *)data;
 1239         struct mii_data *mii;
 1240         int error = 0, mask;
 1241 
 1242         switch (cmd) {
 1243         case SIOCSIFFLAGS:
 1244                 CGEM_LOCK(sc);
 1245                 if ((if_getflags(ifp) & IFF_UP) != 0) {
 1246                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 1247                                 if (((if_getflags(ifp) ^ sc->if_old_flags) &
 1248                                     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
 1249                                         cgem_rx_filter(sc);
 1250                                 }
 1251                         } else {
 1252                                 cgem_init_locked(sc);
 1253                         }
 1254                 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 1255                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 1256                         cgem_stop(sc);
 1257                 }
 1258                 sc->if_old_flags = if_getflags(ifp);
 1259                 CGEM_UNLOCK(sc);
 1260                 break;
 1261 
 1262         case SIOCADDMULTI:
 1263         case SIOCDELMULTI:
 1264                 /* Set up multi-cast filters. */
 1265                 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 1266                         CGEM_LOCK(sc);
 1267                         cgem_rx_filter(sc);
 1268                         CGEM_UNLOCK(sc);
 1269                 }
 1270                 break;
 1271 
 1272         case SIOCSIFMEDIA:
 1273         case SIOCGIFMEDIA:
 1274                 if (sc->miibus == NULL)
 1275                         return (ENXIO);
 1276                 mii = device_get_softc(sc->miibus);
 1277                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1278                 break;
 1279 
 1280         case SIOCSIFCAP:
 1281                 CGEM_LOCK(sc);
 1282                 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
 1283 
 1284                 if ((mask & IFCAP_TXCSUM) != 0) {
 1285                         if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
 1286                                 /* Turn on TX checksumming. */
 1287                                 if_setcapenablebit(ifp, IFCAP_TXCSUM |
 1288                                     IFCAP_TXCSUM_IPV6, 0);
 1289                                 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
 1290 
 1291                                 WR4(sc, CGEM_DMA_CFG,
 1292                                     RD4(sc, CGEM_DMA_CFG) |
 1293                                     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
 1294                         } else {
 1295                                 /* Turn off TX checksumming. */
 1296                                 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
 1297                                     IFCAP_TXCSUM_IPV6);
 1298                                 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
 1299 
 1300                                 WR4(sc, CGEM_DMA_CFG,
 1301                                     RD4(sc, CGEM_DMA_CFG) &
 1302                                     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
 1303                         }
 1304                 }
 1305                 if ((mask & IFCAP_RXCSUM) != 0) {
 1306                         if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
 1307                                 /* Turn on RX checksumming. */
 1308                                 if_setcapenablebit(ifp, IFCAP_RXCSUM |
 1309                                     IFCAP_RXCSUM_IPV6, 0);
 1310                                 sc->net_cfg_shadow |=
 1311                                     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
 1312                                 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
 1313                         } else {
 1314                                 /* Turn off RX checksumming. */
 1315                                 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
 1316                                     IFCAP_RXCSUM_IPV6);
 1317                                 sc->net_cfg_shadow &=
 1318                                     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
 1319                                 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
 1320                         }
 1321                 }
 1322                 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
 1323                     (IFCAP_RXCSUM | IFCAP_TXCSUM))
 1324                         if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
 1325                 else
 1326                         if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
 1327 
 1328                 CGEM_UNLOCK(sc);
 1329                 break;
 1330         default:
 1331                 error = ether_ioctl(ifp, cmd, data);
 1332                 break;
 1333         }
 1334 
 1335         return (error);
 1336 }
 1337 
 1338 /* MII bus support routines.
 1339  */
 1340 static int
 1341 cgem_ifmedia_upd(if_t ifp)
 1342 {
 1343         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
 1344         struct mii_data *mii;
 1345         struct mii_softc *miisc;
 1346         int error = 0;
 1347 
 1348         mii = device_get_softc(sc->miibus);
 1349         CGEM_LOCK(sc);
 1350         if ((if_getflags(ifp) & IFF_UP) != 0) {
 1351                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 1352                         PHY_RESET(miisc);
 1353                 error = mii_mediachg(mii);
 1354         }
 1355         CGEM_UNLOCK(sc);
 1356 
 1357         return (error);
 1358 }
 1359 
 1360 static void
 1361 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
 1362 {
 1363         struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
 1364         struct mii_data *mii;
 1365 
 1366         mii = device_get_softc(sc->miibus);
 1367         CGEM_LOCK(sc);
 1368         mii_pollstat(mii);
 1369         ifmr->ifm_active = mii->mii_media_active;
 1370         ifmr->ifm_status = mii->mii_media_status;
 1371         CGEM_UNLOCK(sc);
 1372 }
 1373 
 1374 static int
 1375 cgem_miibus_readreg(device_t dev, int phy, int reg)
 1376 {
 1377         struct cgem_softc *sc = device_get_softc(dev);
 1378         int tries, val;
 1379 
 1380         WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
 1381             CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ |
 1382             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
 1383             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
 1384 
 1385         /* Wait for completion. */
 1386         tries=0;
 1387         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
 1388                 DELAY(5);
 1389                 if (++tries > 200) {
 1390                         device_printf(dev, "phy read timeout: %d\n", reg);
 1391                         return (-1);
 1392                 }
 1393         }
 1394 
 1395         val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
 1396 
 1397         if (reg == MII_EXTSR)
 1398                 /*
 1399                  * MAC does not support half-duplex at gig speeds.
 1400                  * Let mii(4) exclude the capability.
 1401                  */
 1402                 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
 1403 
 1404         return (val);
 1405 }
 1406 
 1407 static int
 1408 cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
 1409 {
 1410         struct cgem_softc *sc = device_get_softc(dev);
 1411         int tries;
 1412 
 1413         WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
 1414             CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE |
 1415             (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
 1416             (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
 1417             (data & CGEM_PHY_MAINT_DATA_MASK));
 1418 
 1419         /* Wait for completion. */
 1420         tries = 0;
 1421         while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
 1422                 DELAY(5);
 1423                 if (++tries > 200) {
 1424                         device_printf(dev, "phy write timeout: %d\n", reg);
 1425                         return (-1);
 1426                 }
 1427         }
 1428 
 1429         return (0);
 1430 }
 1431 
 1432 static void
 1433 cgem_miibus_statchg(device_t dev)
 1434 {
 1435         struct cgem_softc *sc  = device_get_softc(dev);
 1436         struct mii_data *mii = device_get_softc(sc->miibus);
 1437 
 1438         CGEM_ASSERT_LOCKED(sc);
 1439 
 1440         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
 1441             (IFM_ACTIVE | IFM_AVALID) &&
 1442             sc->mii_media_active != mii->mii_media_active)
 1443                 cgem_mediachange(sc, mii);
 1444 }
 1445 
 1446 static void
 1447 cgem_miibus_linkchg(device_t dev)
 1448 {
 1449         struct cgem_softc *sc  = device_get_softc(dev);
 1450         struct mii_data *mii = device_get_softc(sc->miibus);
 1451 
 1452         CGEM_ASSERT_LOCKED(sc);
 1453 
 1454         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
 1455             (IFM_ACTIVE | IFM_AVALID) &&
 1456             sc->mii_media_active != mii->mii_media_active)
 1457                 cgem_mediachange(sc, mii);
 1458 }
 1459 
 1460 /*
 1461  * Overridable weak symbol cgem_set_ref_clk().  This allows platforms to
 1462  * provide a function to set the cgem's reference clock.
 1463  */
 1464 static int __used
 1465 cgem_default_set_ref_clk(int unit, int frequency)
 1466 {
 1467 
 1468         return 0;
 1469 }
 1470 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
 1471 
 1472 /* Call to set reference clock and network config bits according to media. */
 1473 static void
 1474 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
 1475 {
 1476         int ref_clk_freq;
 1477 
 1478         CGEM_ASSERT_LOCKED(sc);
 1479 
 1480         /* Update hardware to reflect media. */
 1481         sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
 1482             CGEM_NET_CFG_FULL_DUPLEX);
 1483 
 1484         switch (IFM_SUBTYPE(mii->mii_media_active)) {
 1485         case IFM_1000_T:
 1486                 sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 |
 1487                     CGEM_NET_CFG_GIGE_EN);
 1488                 ref_clk_freq = 125000000;
 1489                 break;
 1490         case IFM_100_TX:
 1491                 sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100;
 1492                 ref_clk_freq = 25000000;
 1493                 break;
 1494         default:
 1495                 ref_clk_freq = 2500000;
 1496         }
 1497 
 1498         if ((mii->mii_media_active & IFM_FDX) != 0)
 1499                 sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX;
 1500 
 1501         WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
 1502 
 1503         if (sc->ref_clk != NULL) {
 1504                 CGEM_UNLOCK(sc);
 1505                 if (clk_set_freq(sc->ref_clk, ref_clk_freq, 0))
 1506                         device_printf(sc->dev, "could not set ref clk to %d\n",
 1507                             ref_clk_freq);
 1508                 CGEM_LOCK(sc);
 1509         }
 1510 
 1511         sc->mii_media_active = mii->mii_media_active;
 1512 }
 1513 
 1514 static void
 1515 cgem_add_sysctls(device_t dev)
 1516 {
 1517         struct cgem_softc *sc = device_get_softc(dev);
 1518         struct sysctl_ctx_list *ctx;
 1519         struct sysctl_oid_list *child;
 1520         struct sysctl_oid *tree;
 1521 
 1522         ctx = device_get_sysctl_ctx(dev);
 1523         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
 1524 
 1525         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
 1526             &sc->rxbufs, 0, "Number receive buffers to provide");
 1527 
 1528         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
 1529             &sc->rxhangwar, 0, "Enable receive hang work-around");
 1530 
 1531         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
 1532             &sc->rxoverruns, 0, "Receive overrun events");
 1533 
 1534         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
 1535             &sc->rxnobufs, 0, "Receive buf queue empty events");
 1536 
 1537         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
 1538             &sc->rxdmamapfails, 0, "Receive DMA map failures");
 1539 
 1540         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
 1541             &sc->txfull, 0, "Transmit ring full events");
 1542 
 1543         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
 1544             &sc->txdmamapfails, 0, "Transmit DMA map failures");
 1545 
 1546         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
 1547             &sc->txdefrags, 0, "Transmit m_defrag() calls");
 1548 
 1549         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
 1550             &sc->txdefragfails, 0, "Transmit m_defrag() failures");
 1551 
 1552         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
 1553             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics");
 1554         child = SYSCTL_CHILDREN(tree);
 1555 
 1556         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
 1557             &sc->stats.tx_bytes, "Total bytes transmitted");
 1558 
 1559         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
 1560             &sc->stats.tx_frames, 0, "Total frames transmitted");
 1561 
 1562         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
 1563             &sc->stats.tx_frames_bcast, 0,
 1564             "Number broadcast frames transmitted");
 1565 
 1566         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
 1567             &sc->stats.tx_frames_multi, 0,
 1568             "Number multicast frames transmitted");
 1569 
 1570         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
 1571             CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
 1572             "Number pause frames transmitted");
 1573 
 1574         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
 1575             &sc->stats.tx_frames_64b, 0,
 1576             "Number frames transmitted of size 64 bytes or less");
 1577 
 1578         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
 1579             &sc->stats.tx_frames_65to127b, 0,
 1580             "Number frames transmitted of size 65-127 bytes");
 1581 
 1582         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
 1583             CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
 1584             "Number frames transmitted of size 128-255 bytes");
 1585 
 1586         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
 1587             CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
 1588             "Number frames transmitted of size 256-511 bytes");
 1589 
 1590         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
 1591             CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
 1592             "Number frames transmitted of size 512-1023 bytes");
 1593 
 1594         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
 1595             CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
 1596             "Number frames transmitted of size 1024-1536 bytes");
 1597 
 1598         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
 1599             CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
 1600             "Number transmit under-run events");
 1601 
 1602         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
 1603             CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
 1604             "Number single-collision transmit frames");
 1605 
 1606         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
 1607             CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
 1608             "Number multi-collision transmit frames");
 1609 
 1610         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
 1611             CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
 1612             "Number excessive collision transmit frames");
 1613 
 1614         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
 1615             CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
 1616             "Number late-collision transmit frames");
 1617 
 1618         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
 1619             CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
 1620             "Number deferred transmit frames");
 1621 
 1622         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
 1623             CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
 1624             "Number carrier sense errors on transmit");
 1625 
 1626         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
 1627             &sc->stats.rx_bytes, "Total bytes received");
 1628 
 1629         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
 1630             &sc->stats.rx_frames, 0, "Total frames received");
 1631 
 1632         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
 1633             CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
 1634             "Number broadcast frames received");
 1635 
 1636         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
 1637             CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
 1638             "Number multicast frames received");
 1639 
 1640         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
 1641             CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
 1642             "Number pause frames received");
 1643 
 1644         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
 1645             CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
 1646             "Number frames received of size 64 bytes or less");
 1647 
 1648         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
 1649             CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
 1650             "Number frames received of size 65-127 bytes");
 1651 
 1652         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
 1653             CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
 1654             "Number frames received of size 128-255 bytes");
 1655 
 1656         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
 1657             CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
 1658             "Number frames received of size 256-511 bytes");
 1659 
 1660         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
 1661             CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
 1662             "Number frames received of size 512-1023 bytes");
 1663 
 1664         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
 1665             CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
 1666             "Number frames received of size 1024-1536 bytes");
 1667 
 1668         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
 1669             CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
 1670             "Number undersize frames received");
 1671 
 1672         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
 1673             CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
 1674             "Number oversize frames received");
 1675 
 1676         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
 1677             CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
 1678             "Number jabber frames received");
 1679 
 1680         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
 1681             CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
 1682             "Number frames received with FCS errors");
 1683 
 1684         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
 1685             CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
 1686             "Number frames received with length errors");
 1687 
 1688         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
 1689             CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
 1690             "Number receive symbol errors");
 1691 
 1692         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
 1693             CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
 1694             "Number receive alignment errors");
 1695 
 1696         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
 1697             CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
 1698             "Number frames received when no rx buffer available");
 1699 
 1700         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
 1701             CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
 1702             "Number frames received but not copied due to receive overrun");
 1703 
 1704         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
 1705             CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
 1706             "Number frames received with IP header checksum errors");
 1707 
 1708         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
 1709             CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
 1710             "Number frames received with TCP checksum errors");
 1711 
 1712         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
 1713             CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
 1714             "Number frames received with UDP checksum errors");
 1715 }
 1716 
 1717 static int
 1718 cgem_probe(device_t dev)
 1719 {
 1720 
 1721         if (!ofw_bus_status_okay(dev))
 1722                 return (ENXIO);
 1723 
 1724         if (ofw_bus_search_compatible(dev, compat_data)->ocd_str == NULL)
 1725                 return (ENXIO);
 1726 
 1727         device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
 1728         return (0);
 1729 }
 1730 
 1731 static int
 1732 cgem_attach(device_t dev)
 1733 {
 1734         struct cgem_softc *sc = device_get_softc(dev);
 1735         if_t ifp = NULL;
 1736         int rid, err;
 1737         u_char eaddr[ETHER_ADDR_LEN];
 1738         int hwquirks;
 1739         phandle_t node;
 1740 
 1741         sc->dev = dev;
 1742         CGEM_LOCK_INIT(sc);
 1743 
 1744         /* Key off of compatible string and set hardware-specific options. */
 1745         hwquirks = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
 1746         if ((hwquirks & HWQUIRK_NEEDNULLQS) != 0)
 1747                 sc->neednullqs = 1;
 1748         if ((hwquirks & HWQUIRK_RXHANGWAR) != 0)
 1749                 sc->rxhangwar = 1;
 1750         if ((hwquirks & HWQUIRK_TXCLK) != 0) {
 1751                 if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->ref_clk) != 0)
 1752                         device_printf(dev,
 1753                             "could not retrieve reference clock.\n");
 1754                 else if (clk_enable(sc->ref_clk) != 0)
 1755                         device_printf(dev, "could not enable clock.\n");
 1756         }
 1757         if ((hwquirks & HWQUIRK_PCLK) != 0) {
 1758                 if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->ref_clk) != 0)
 1759                         device_printf(dev,
 1760                             "could not retrieve reference clock.\n");
 1761                 else if (clk_enable(sc->ref_clk) != 0)
 1762                         device_printf(dev, "could not enable clock.\n");
 1763         }
 1764 
 1765         node = ofw_bus_get_node(dev);
 1766         sc->phy_contype = mii_fdt_get_contype(node);
 1767 
 1768         /* Get memory resource. */
 1769         rid = 0;
 1770         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 1771             RF_ACTIVE);
 1772         if (sc->mem_res == NULL) {
 1773                 device_printf(dev, "could not allocate memory resources.\n");
 1774                 return (ENOMEM);
 1775         }
 1776 
 1777         /* Get IRQ resource. */
 1778         rid = 0;
 1779         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1780             RF_ACTIVE);
 1781         if (sc->irq_res == NULL) {
 1782                 device_printf(dev, "could not allocate interrupt resource.\n");
 1783                 cgem_detach(dev);
 1784                 return (ENOMEM);
 1785         }
 1786 
 1787         /* Set up ifnet structure. */
 1788         ifp = sc->ifp = if_alloc(IFT_ETHER);
 1789         if (ifp == NULL) {
 1790                 device_printf(dev, "could not allocate ifnet structure\n");
 1791                 cgem_detach(dev);
 1792                 return (ENOMEM);
 1793         }
 1794         if_setsoftc(ifp, sc);
 1795         if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
 1796         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
 1797         if_setinitfn(ifp, cgem_init);
 1798         if_setioctlfn(ifp, cgem_ioctl);
 1799         if_setstartfn(ifp, cgem_start);
 1800         if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
 1801             IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
 1802         if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
 1803         if_setsendqready(ifp);
 1804 
 1805         /* Disable hardware checksumming by default. */
 1806         if_sethwassist(ifp, 0);
 1807         if_setcapenable(ifp, if_getcapabilities(ifp) &
 1808             ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
 1809 
 1810         sc->if_old_flags = if_getflags(ifp);
 1811         sc->rxbufs = DEFAULT_NUM_RX_BUFS;
 1812 
 1813         /* Reset hardware. */
 1814         CGEM_LOCK(sc);
 1815         cgem_reset(sc);
 1816         CGEM_UNLOCK(sc);
 1817 
 1818         /* Attach phy to mii bus. */
 1819         err = mii_attach(dev, &sc->miibus, ifp,
 1820             cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK,
 1821             MII_PHY_ANY, MII_OFFSET_ANY, 0);
 1822         if (err)
 1823                 device_printf(dev, "warning: attaching PHYs failed\n");
 1824 
 1825         /* Set up TX and RX descriptor area. */
 1826         err = cgem_setup_descs(sc);
 1827         if (err) {
 1828                 device_printf(dev, "could not set up dma mem for descs.\n");
 1829                 cgem_detach(dev);
 1830                 return (ENOMEM);
 1831         }
 1832 
 1833         /* Get a MAC address. */
 1834         cgem_get_mac(sc, eaddr);
 1835 
 1836         /* Start ticks. */
 1837         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
 1838 
 1839         ether_ifattach(ifp, eaddr);
 1840 
 1841         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
 1842             INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
 1843         if (err) {
 1844                 device_printf(dev, "could not set interrupt handler.\n");
 1845                 ether_ifdetach(ifp);
 1846                 cgem_detach(dev);
 1847                 return (err);
 1848         }
 1849 
 1850         cgem_add_sysctls(dev);
 1851 
 1852         return (0);
 1853 }
 1854 
 1855 static int
 1856 cgem_detach(device_t dev)
 1857 {
 1858         struct cgem_softc *sc = device_get_softc(dev);
 1859         int i;
 1860 
 1861         if (sc == NULL)
 1862                 return (ENODEV);
 1863 
 1864         if (device_is_attached(dev)) {
 1865                 CGEM_LOCK(sc);
 1866                 cgem_stop(sc);
 1867                 CGEM_UNLOCK(sc);
 1868                 callout_drain(&sc->tick_ch);
 1869                 if_setflagbits(sc->ifp, 0, IFF_UP);
 1870                 ether_ifdetach(sc->ifp);
 1871         }
 1872 
 1873         if (sc->miibus != NULL) {
 1874                 device_delete_child(dev, sc->miibus);
 1875                 sc->miibus = NULL;
 1876         }
 1877 
 1878         /* Release resources. */
 1879         if (sc->mem_res != NULL) {
 1880                 bus_release_resource(dev, SYS_RES_MEMORY,
 1881                     rman_get_rid(sc->mem_res), sc->mem_res);
 1882                 sc->mem_res = NULL;
 1883         }
 1884         if (sc->irq_res != NULL) {
 1885                 if (sc->intrhand)
 1886                         bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
 1887                 bus_release_resource(dev, SYS_RES_IRQ,
 1888                     rman_get_rid(sc->irq_res), sc->irq_res);
 1889                 sc->irq_res = NULL;
 1890         }
 1891 
 1892         /* Release DMA resources. */
 1893         if (sc->rxring != NULL) {
 1894                 if (sc->rxring_physaddr != 0) {
 1895                         bus_dmamap_unload(sc->desc_dma_tag,
 1896                             sc->rxring_dma_map);
 1897                         sc->rxring_physaddr = 0;
 1898                         sc->txring_physaddr = 0;
 1899                         sc->null_qs_physaddr = 0;
 1900                 }
 1901                 bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
 1902                                 sc->rxring_dma_map);
 1903                 sc->rxring = NULL;
 1904                 sc->txring = NULL;
 1905                 sc->null_qs = NULL;
 1906 
 1907                 for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
 1908                         if (sc->rxring_m_dmamap[i] != NULL) {
 1909                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
 1910                                     sc->rxring_m_dmamap[i]);
 1911                                 sc->rxring_m_dmamap[i] = NULL;
 1912                         }
 1913                 for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
 1914                         if (sc->txring_m_dmamap[i] != NULL) {
 1915                                 bus_dmamap_destroy(sc->mbuf_dma_tag,
 1916                                     sc->txring_m_dmamap[i]);
 1917                                 sc->txring_m_dmamap[i] = NULL;
 1918                         }
 1919         }
 1920         if (sc->desc_dma_tag != NULL) {
 1921                 bus_dma_tag_destroy(sc->desc_dma_tag);
 1922                 sc->desc_dma_tag = NULL;
 1923         }
 1924         if (sc->mbuf_dma_tag != NULL) {
 1925                 bus_dma_tag_destroy(sc->mbuf_dma_tag);
 1926                 sc->mbuf_dma_tag = NULL;
 1927         }
 1928 
 1929         if (sc->ref_clk != NULL) {
 1930                 clk_release(sc->ref_clk);
 1931                 sc->ref_clk = NULL;
 1932         }
 1933 
 1934         bus_generic_detach(dev);
 1935 
 1936         CGEM_LOCK_DESTROY(sc);
 1937 
 1938         return (0);
 1939 }
 1940 
 1941 static device_method_t cgem_methods[] = {
 1942         /* Device interface */
 1943         DEVMETHOD(device_probe,         cgem_probe),
 1944         DEVMETHOD(device_attach,        cgem_attach),
 1945         DEVMETHOD(device_detach,        cgem_detach),
 1946 
 1947         /* MII interface */
 1948         DEVMETHOD(miibus_readreg,       cgem_miibus_readreg),
 1949         DEVMETHOD(miibus_writereg,      cgem_miibus_writereg),
 1950         DEVMETHOD(miibus_statchg,       cgem_miibus_statchg),
 1951         DEVMETHOD(miibus_linkchg,       cgem_miibus_linkchg),
 1952 
 1953         DEVMETHOD_END
 1954 };
 1955 
 1956 static driver_t cgem_driver = {
 1957         "cgem",
 1958         cgem_methods,
 1959         sizeof(struct cgem_softc),
 1960 };
 1961 
 1962 DRIVER_MODULE(cgem, simplebus, cgem_driver, NULL, NULL);
 1963 DRIVER_MODULE(miibus, cgem, miibus_driver, NULL, NULL);
 1964 MODULE_DEPEND(cgem, miibus, 1, 1, 1);
 1965 MODULE_DEPEND(cgem, ether, 1, 1, 1);
 1966 SIMPLEBUS_PNP_INFO(compat_data);

Cache object: 95cdb93e1acf46b0aa81520b334c80aa


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.