The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cas/if_cas.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2001 Eduardo Horvath.
    3  * Copyright (c) 2001-2003 Thomas Moestl
    4  * Copyright (c) 2007-2009 Marius Strobl <marius@FreeBSD.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *      from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
   29  *      from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/8.3/sys/dev/cas/if_cas.c 230714 2012-01-29 01:22:48Z marius $");
   34 
   35 /*
   36  * driver for Sun Cassini/Cassini+ and National Semiconductor DP83065
   37  * Saturn Gigabit Ethernet controllers
   38  */
   39 
   40 #if 0
   41 #define CAS_DEBUG
   42 #endif
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/bus.h>
   47 #include <sys/callout.h>
   48 #include <sys/endian.h>
   49 #include <sys/mbuf.h>
   50 #include <sys/malloc.h>
   51 #include <sys/kernel.h>
   52 #include <sys/lock.h>
   53 #include <sys/module.h>
   54 #include <sys/mutex.h>
   55 #include <sys/refcount.h>
   56 #include <sys/resource.h>
   57 #include <sys/rman.h>
   58 #include <sys/socket.h>
   59 #include <sys/sockio.h>
   60 #include <sys/taskqueue.h>
   61 
   62 #include <net/bpf.h>
   63 #include <net/ethernet.h>
   64 #include <net/if.h>
   65 #include <net/if_arp.h>
   66 #include <net/if_dl.h>
   67 #include <net/if_media.h>
   68 #include <net/if_types.h>
   69 #include <net/if_vlan_var.h>
   70 
   71 #include <netinet/in.h>
   72 #include <netinet/in_systm.h>
   73 #include <netinet/ip.h>
   74 #include <netinet/tcp.h>
   75 #include <netinet/udp.h>
   76 
   77 #include <machine/bus.h>
   78 #if defined(__powerpc__) || defined(__sparc64__)
   79 #include <dev/ofw/ofw_bus.h>
   80 #include <dev/ofw/openfirm.h>
   81 #include <machine/ofw_machdep.h>
   82 #endif
   83 #include <machine/resource.h>
   84 
   85 #include <dev/mii/mii.h>
   86 #include <dev/mii/miivar.h>
   87 
   88 #include <dev/cas/if_casreg.h>
   89 #include <dev/cas/if_casvar.h>
   90 
   91 #include <dev/pci/pcireg.h>
   92 #include <dev/pci/pcivar.h>
   93 
   94 #include "miibus_if.h"
   95 
   96 #define RINGASSERT(n , min, max)                                        \
   97         CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max))
   98 
   99 RINGASSERT(CAS_NRXCOMP, 128, 32768);
  100 RINGASSERT(CAS_NRXDESC, 32, 8192);
  101 RINGASSERT(CAS_NRXDESC2, 32, 8192);
  102 RINGASSERT(CAS_NTXDESC, 32, 8192);
  103 
  104 #undef RINGASSERT
  105 
  106 #define CCDASSERT(m, a)                                                 \
  107         CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0)
  108 
  109 CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN);
  110 CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN);
  111 CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN);
  112 
  113 #undef CCDASSERT
  114 
  115 #define CAS_TRIES       10000
  116 
  117 /*
  118  * According to documentation, the hardware has support for basic TCP
  119  * checksum offloading only, in practice this can be also used for UDP
  120  * however (i.e. the problem of previous Sun NICs that a checksum of 0x0
  121  * is not converted to 0xffff no longer exists).
  122  */
  123 #define CAS_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
  124 
  125 static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx);
  126 static int      cas_attach(struct cas_softc *sc);
  127 static int      cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr,
  128                     uint32_t set);
  129 static void     cas_cddma_callback(void *xsc, bus_dma_segment_t *segs,
  130                     int nsegs, int error);
  131 static void     cas_detach(struct cas_softc *sc);
  132 static int      cas_disable_rx(struct cas_softc *sc);
  133 static int      cas_disable_tx(struct cas_softc *sc);
  134 static void     cas_eint(struct cas_softc *sc, u_int status);
  135 static void     cas_free(void *arg1, void* arg2);
  136 static void     cas_init(void *xsc);
  137 static void     cas_init_locked(struct cas_softc *sc);
  138 static void     cas_init_regs(struct cas_softc *sc);
  139 static int      cas_intr(void *v);
  140 static void     cas_intr_task(void *arg, int pending __unused);
  141 static int      cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
  142 static int      cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head);
  143 static int      cas_mediachange(struct ifnet *ifp);
  144 static void     cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
  145 static void     cas_meminit(struct cas_softc *sc);
  146 static void     cas_mifinit(struct cas_softc *sc);
  147 static int      cas_mii_readreg(device_t dev, int phy, int reg);
  148 static void     cas_mii_statchg(device_t dev);
  149 static int      cas_mii_writereg(device_t dev, int phy, int reg, int val);
  150 static void     cas_reset(struct cas_softc *sc);
  151 static int      cas_reset_rx(struct cas_softc *sc);
  152 static int      cas_reset_tx(struct cas_softc *sc);
  153 static void     cas_resume(struct cas_softc *sc);
  154 static u_int    cas_descsize(u_int sz);
  155 static void     cas_rint(struct cas_softc *sc);
  156 static void     cas_rint_timeout(void *arg);
  157 static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum);
  158 static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp);
  159 static u_int    cas_rxcompsize(u_int sz);
  160 static void     cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs,
  161                     int nsegs, int error);
  162 static void     cas_setladrf(struct cas_softc *sc);
  163 static void     cas_start(struct ifnet *ifp);
  164 static void     cas_stop(struct ifnet *ifp);
  165 static void     cas_suspend(struct cas_softc *sc);
  166 static void     cas_tick(void *arg);
  167 static void     cas_tint(struct cas_softc *sc);
  168 static void     cas_tx_task(void *arg, int pending __unused);
  169 static inline void cas_txkick(struct cas_softc *sc);
  170 static void     cas_watchdog(struct cas_softc *sc);
  171 
  172 static devclass_t cas_devclass;
  173 
  174 MODULE_DEPEND(cas, ether, 1, 1, 1);
  175 MODULE_DEPEND(cas, miibus, 1, 1, 1);
  176 
  177 #ifdef CAS_DEBUG
  178 #include <sys/ktr.h>
  179 #define KTR_CAS         KTR_CT2
  180 #endif
  181 
  182 static int
  183 cas_attach(struct cas_softc *sc)
  184 {
  185         struct cas_txsoft *txs;
  186         struct ifnet *ifp;
  187         int error, i;
  188         uint32_t v;
  189 
  190         /* Set up ifnet structure. */
  191         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  192         if (ifp == NULL)
  193                 return (ENOSPC);
  194         ifp->if_softc = sc;
  195         if_initname(ifp, device_get_name(sc->sc_dev),
  196             device_get_unit(sc->sc_dev));
  197         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  198         ifp->if_start = cas_start;
  199         ifp->if_ioctl = cas_ioctl;
  200         ifp->if_init = cas_init;
  201         IFQ_SET_MAXLEN(&ifp->if_snd, CAS_TXQUEUELEN);
  202         ifp->if_snd.ifq_drv_maxlen = CAS_TXQUEUELEN;
  203         IFQ_SET_READY(&ifp->if_snd);
  204 
  205         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
  206         callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
  207         /* Create local taskq. */
  208         TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc);
  209         TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp);
  210         sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK,
  211             taskqueue_thread_enqueue, &sc->sc_tq);
  212         if (sc->sc_tq == NULL) {
  213                 device_printf(sc->sc_dev, "could not create taskqueue\n");
  214                 error = ENXIO;
  215                 goto fail_ifnet;
  216         }
  217         taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
  218             device_get_nameunit(sc->sc_dev));
  219 
  220         /* Make sure the chip is stopped. */
  221         cas_reset(sc);
  222 
  223         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
  224             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  225             BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL,
  226             &sc->sc_pdmatag);
  227         if (error != 0)
  228                 goto fail_taskq;
  229 
  230         error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
  231             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  232             CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag);
  233         if (error != 0)
  234                 goto fail_ptag;
  235 
  236         error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
  237             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  238             MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES,
  239             BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
  240         if (error != 0)
  241                 goto fail_rtag;
  242 
  243         error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0,
  244             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  245             sizeof(struct cas_control_data), 1,
  246             sizeof(struct cas_control_data), 0,
  247             NULL, NULL, &sc->sc_cdmatag);
  248         if (error != 0)
  249                 goto fail_ttag;
  250 
  251         /*
  252          * Allocate the control data structures, create and load the
  253          * DMA map for it.
  254          */
  255         if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
  256             (void **)&sc->sc_control_data,
  257             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
  258             &sc->sc_cddmamap)) != 0) {
  259                 device_printf(sc->sc_dev,
  260                     "unable to allocate control data, error = %d\n", error);
  261                 goto fail_ctag;
  262         }
  263 
  264         sc->sc_cddma = 0;
  265         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
  266             sc->sc_control_data, sizeof(struct cas_control_data),
  267             cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
  268                 device_printf(sc->sc_dev,
  269                     "unable to load control data DMA map, error = %d\n",
  270                     error);
  271                 goto fail_cmem;
  272         }
  273 
  274         /*
  275          * Initialize the transmit job descriptors.
  276          */
  277         STAILQ_INIT(&sc->sc_txfreeq);
  278         STAILQ_INIT(&sc->sc_txdirtyq);
  279 
  280         /*
  281          * Create the transmit buffer DMA maps.
  282          */
  283         error = ENOMEM;
  284         for (i = 0; i < CAS_TXQUEUELEN; i++) {
  285                 txs = &sc->sc_txsoft[i];
  286                 txs->txs_mbuf = NULL;
  287                 txs->txs_ndescs = 0;
  288                 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
  289                     &txs->txs_dmamap)) != 0) {
  290                         device_printf(sc->sc_dev,
  291                             "unable to create TX DMA map %d, error = %d\n",
  292                             i, error);
  293                         goto fail_txd;
  294                 }
  295                 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  296         }
  297 
  298         /*
  299          * Allocate the receive buffers, create and load the DMA maps
  300          * for them.
  301          */
  302         for (i = 0; i < CAS_NRXDESC; i++) {
  303                 if ((error = bus_dmamem_alloc(sc->sc_rdmatag,
  304                     &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK,
  305                     &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) {
  306                         device_printf(sc->sc_dev,
  307                             "unable to allocate RX buffer %d, error = %d\n",
  308                             i, error);
  309                         goto fail_rxmem;
  310                 }
  311 
  312                 sc->sc_rxdptr = i;
  313                 sc->sc_rxdsoft[i].rxds_paddr = 0;
  314                 if ((error = bus_dmamap_load(sc->sc_rdmatag,
  315                     sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf,
  316                     CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 ||
  317                     sc->sc_rxdsoft[i].rxds_paddr == 0) {
  318                         device_printf(sc->sc_dev,
  319                             "unable to load RX DMA map %d, error = %d\n",
  320                             i, error);
  321                         goto fail_rxmap;
  322                 }
  323         }
  324 
  325         if ((sc->sc_flags & CAS_SERDES) == 0) {
  326                 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII);
  327                 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4,
  328                     BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  329                 cas_mifinit(sc);
  330                 /*
  331                  * Look for an external PHY.
  332                  */
  333                 error = ENXIO;
  334                 v = CAS_READ_4(sc, CAS_MIF_CONF);
  335                 if ((v & CAS_MIF_CONF_MDI1) != 0) {
  336                         v |= CAS_MIF_CONF_PHY_SELECT;
  337                         CAS_WRITE_4(sc, CAS_MIF_CONF, v);
  338                         CAS_BARRIER(sc, CAS_MIF_CONF, 4,
  339                             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  340                         /* Enable/unfreeze the GMII pins of Saturn. */
  341                         if (sc->sc_variant == CAS_SATURN) {
  342                                 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0);
  343                                 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
  344                                     BUS_SPACE_BARRIER_READ |
  345                                     BUS_SPACE_BARRIER_WRITE);
  346                         }
  347                         error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
  348                             cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
  349                             MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
  350                 }
  351                 /*
  352                  * Fall back on an internal PHY if no external PHY was found.
  353                  */
  354                 if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) {
  355                         v &= ~CAS_MIF_CONF_PHY_SELECT;
  356                         CAS_WRITE_4(sc, CAS_MIF_CONF, v);
  357                         CAS_BARRIER(sc, CAS_MIF_CONF, 4,
  358                             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  359                         /* Freeze the GMII pins of Saturn for saving power. */
  360                         if (sc->sc_variant == CAS_SATURN) {
  361                                 CAS_WRITE_4(sc, CAS_SATURN_PCFG,
  362                                     CAS_SATURN_PCFG_FSI);
  363                                 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
  364                                     BUS_SPACE_BARRIER_READ |
  365                                     BUS_SPACE_BARRIER_WRITE);
  366                         }
  367                         error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
  368                             cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
  369                             MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
  370                 }
  371         } else {
  372                 /*
  373                  * Use the external PCS SERDES.
  374                  */
  375                 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES);
  376                 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_WRITE);
  377                 /* Enable/unfreeze the SERDES pins of Saturn. */
  378                 if (sc->sc_variant == CAS_SATURN) {
  379                         CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0);
  380                         CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
  381                             BUS_SPACE_BARRIER_WRITE);
  382                 }
  383                 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD);
  384                 CAS_BARRIER(sc, CAS_PCS_SERDES_CTRL, 4,
  385                     BUS_SPACE_BARRIER_WRITE);
  386                 CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN);
  387                 CAS_BARRIER(sc, CAS_PCS_CONF, 4,
  388                     BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  389                 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
  390                     cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
  391                     CAS_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
  392         }
  393         if (error != 0) {
  394                 device_printf(sc->sc_dev, "attaching PHYs failed\n");
  395                 goto fail_rxmap;
  396         }
  397         sc->sc_mii = device_get_softc(sc->sc_miibus);
  398 
  399         /*
  400          * From this point forward, the attachment cannot fail.  A failure
  401          * before this point releases all resources that may have been
  402          * allocated.
  403          */
  404 
  405         /* Announce FIFO sizes. */
  406         v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE);
  407         device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
  408             CAS_RX_FIFO_SIZE / 1024, v / 16);
  409 
  410         /* Attach the interface. */
  411         ether_ifattach(ifp, sc->sc_enaddr);
  412 
  413         /*
  414          * Tell the upper layer(s) we support long frames/checksum offloads.
  415          */
  416         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  417         ifp->if_capabilities = IFCAP_VLAN_MTU;
  418         if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
  419                 ifp->if_capabilities |= IFCAP_HWCSUM;
  420                 ifp->if_hwassist = CAS_CSUM_FEATURES;
  421         }
  422         ifp->if_capenable = ifp->if_capabilities;
  423 
  424         return (0);
  425 
  426         /*
  427          * Free any resources we've allocated during the failed attach
  428          * attempt.  Do this in reverse order and fall through.
  429          */
  430  fail_rxmap:
  431         for (i = 0; i < CAS_NRXDESC; i++)
  432                 if (sc->sc_rxdsoft[i].rxds_paddr != 0)
  433                         bus_dmamap_unload(sc->sc_rdmatag,
  434                             sc->sc_rxdsoft[i].rxds_dmamap);
  435  fail_rxmem:
  436         for (i = 0; i < CAS_NRXDESC; i++)
  437                 if (sc->sc_rxdsoft[i].rxds_buf != NULL)
  438                         bus_dmamem_free(sc->sc_rdmatag,
  439                             sc->sc_rxdsoft[i].rxds_buf,
  440                             sc->sc_rxdsoft[i].rxds_dmamap);
  441  fail_txd:
  442         for (i = 0; i < CAS_TXQUEUELEN; i++)
  443                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
  444                         bus_dmamap_destroy(sc->sc_tdmatag,
  445                             sc->sc_txsoft[i].txs_dmamap);
  446         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
  447  fail_cmem:
  448         bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
  449             sc->sc_cddmamap);
  450  fail_ctag:
  451         bus_dma_tag_destroy(sc->sc_cdmatag);
  452  fail_ttag:
  453         bus_dma_tag_destroy(sc->sc_tdmatag);
  454  fail_rtag:
  455         bus_dma_tag_destroy(sc->sc_rdmatag);
  456  fail_ptag:
  457         bus_dma_tag_destroy(sc->sc_pdmatag);
  458  fail_taskq:
  459         taskqueue_free(sc->sc_tq);
  460  fail_ifnet:
  461         if_free(ifp);
  462         return (error);
  463 }
  464 
  465 static void
  466 cas_detach(struct cas_softc *sc)
  467 {
  468         struct ifnet *ifp = sc->sc_ifp;
  469         int i;
  470 
  471         ether_ifdetach(ifp);
  472         CAS_LOCK(sc);
  473         cas_stop(ifp);
  474         CAS_UNLOCK(sc);
  475         callout_drain(&sc->sc_tick_ch);
  476         callout_drain(&sc->sc_rx_ch);
  477         taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
  478         taskqueue_drain(sc->sc_tq, &sc->sc_tx_task);
  479         if_free(ifp);
  480         taskqueue_free(sc->sc_tq);
  481         device_delete_child(sc->sc_dev, sc->sc_miibus);
  482 
  483         for (i = 0; i < CAS_NRXDESC; i++)
  484                 if (sc->sc_rxdsoft[i].rxds_dmamap != NULL)
  485                         bus_dmamap_sync(sc->sc_rdmatag,
  486                             sc->sc_rxdsoft[i].rxds_dmamap,
  487                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  488         for (i = 0; i < CAS_NRXDESC; i++)
  489                 if (sc->sc_rxdsoft[i].rxds_paddr != 0)
  490                         bus_dmamap_unload(sc->sc_rdmatag,
  491                             sc->sc_rxdsoft[i].rxds_dmamap);
  492         for (i = 0; i < CAS_NRXDESC; i++)
  493                 if (sc->sc_rxdsoft[i].rxds_buf != NULL)
  494                         bus_dmamem_free(sc->sc_rdmatag,
  495                             sc->sc_rxdsoft[i].rxds_buf,
  496                             sc->sc_rxdsoft[i].rxds_dmamap);
  497         for (i = 0; i < CAS_TXQUEUELEN; i++)
  498                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
  499                         bus_dmamap_destroy(sc->sc_tdmatag,
  500                             sc->sc_txsoft[i].txs_dmamap);
  501         CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  502         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
  503         bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
  504             sc->sc_cddmamap);
  505         bus_dma_tag_destroy(sc->sc_cdmatag);
  506         bus_dma_tag_destroy(sc->sc_tdmatag);
  507         bus_dma_tag_destroy(sc->sc_rdmatag);
  508         bus_dma_tag_destroy(sc->sc_pdmatag);
  509 }
  510 
  511 static void
  512 cas_suspend(struct cas_softc *sc)
  513 {
  514         struct ifnet *ifp = sc->sc_ifp;
  515 
  516         CAS_LOCK(sc);
  517         cas_stop(ifp);
  518         CAS_UNLOCK(sc);
  519 }
  520 
  521 static void
  522 cas_resume(struct cas_softc *sc)
  523 {
  524         struct ifnet *ifp = sc->sc_ifp;
  525 
  526         CAS_LOCK(sc);
  527         /*
  528          * On resume all registers have to be initialized again like
  529          * after power-on.
  530          */
  531         sc->sc_flags &= ~CAS_INITED;
  532         if (ifp->if_flags & IFF_UP)
  533                 cas_init_locked(sc);
  534         CAS_UNLOCK(sc);
  535 }
  536 
  537 static inline void
  538 cas_rxcksum(struct mbuf *m, uint16_t cksum)
  539 {
  540         struct ether_header *eh;
  541         struct ip *ip;
  542         struct udphdr *uh;
  543         uint16_t *opts;
  544         int32_t hlen, len, pktlen;
  545         uint32_t temp32;
  546 
  547         pktlen = m->m_pkthdr.len;
  548         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
  549                 return;
  550         eh = mtod(m, struct ether_header *);
  551         if (eh->ether_type != htons(ETHERTYPE_IP))
  552                 return;
  553         ip = (struct ip *)(eh + 1);
  554         if (ip->ip_v != IPVERSION)
  555                 return;
  556 
  557         hlen = ip->ip_hl << 2;
  558         pktlen -= sizeof(struct ether_header);
  559         if (hlen < sizeof(struct ip))
  560                 return;
  561         if (ntohs(ip->ip_len) < hlen)
  562                 return;
  563         if (ntohs(ip->ip_len) != pktlen)
  564                 return;
  565         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
  566                 return; /* Cannot handle fragmented packet. */
  567 
  568         switch (ip->ip_p) {
  569         case IPPROTO_TCP:
  570                 if (pktlen < (hlen + sizeof(struct tcphdr)))
  571                         return;
  572                 break;
  573         case IPPROTO_UDP:
  574                 if (pktlen < (hlen + sizeof(struct udphdr)))
  575                         return;
  576                 uh = (struct udphdr *)((uint8_t *)ip + hlen);
  577                 if (uh->uh_sum == 0)
  578                         return; /* no checksum */
  579                 break;
  580         default:
  581                 return;
  582         }
  583 
  584         cksum = ~cksum;
  585         /* checksum fixup for IP options */
  586         len = hlen - sizeof(struct ip);
  587         if (len > 0) {
  588                 opts = (uint16_t *)(ip + 1);
  589                 for (; len > 0; len -= sizeof(uint16_t), opts++) {
  590                         temp32 = cksum - *opts;
  591                         temp32 = (temp32 >> 16) + (temp32 & 65535);
  592                         cksum = temp32 & 65535;
  593                 }
  594         }
  595         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
  596         m->m_pkthdr.csum_data = cksum;
  597 }
  598 
  599 static void
  600 cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  601 {
  602         struct cas_softc *sc = xsc;
  603 
  604         if (error != 0)
  605                 return;
  606         if (nsegs != 1)
  607                 panic("%s: bad control buffer segment count", __func__);
  608         sc->sc_cddma = segs[0].ds_addr;
  609 }
  610 
  611 static void
  612 cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  613 {
  614         struct cas_softc *sc = xsc;
  615 
  616         if (error != 0)
  617                 return;
  618         if (nsegs != 1)
  619                 panic("%s: bad RX buffer segment count", __func__);
  620         sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr;
  621 }
  622 
  623 static void
  624 cas_tick(void *arg)
  625 {
  626         struct cas_softc *sc = arg;
  627         struct ifnet *ifp = sc->sc_ifp;
  628         uint32_t v;
  629 
  630         CAS_LOCK_ASSERT(sc, MA_OWNED);
  631 
  632         /*
  633          * Unload collision and error counters.
  634          */
  635         ifp->if_collisions +=
  636             CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) +
  637             CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT);
  638         v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) +
  639             CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT);
  640         ifp->if_collisions += v;
  641         ifp->if_oerrors += v;
  642         ifp->if_ierrors +=
  643             CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) +
  644             CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) +
  645             CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) +
  646             CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL);
  647 
  648         /*
  649          * Then clear the hardware counters.
  650          */
  651         CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0);
  652         CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0);
  653         CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0);
  654         CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0);
  655         CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0);
  656         CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0);
  657         CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0);
  658         CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0);
  659 
  660         mii_tick(sc->sc_mii);
  661 
  662         if (sc->sc_txfree != CAS_MAXTXFREE)
  663                 cas_tint(sc);
  664 
  665         cas_watchdog(sc);
  666 
  667         callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
  668 }
  669 
  670 static int
  671 cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set)
  672 {
  673         int i;
  674         uint32_t reg;
  675 
  676         for (i = CAS_TRIES; i--; DELAY(100)) {
  677                 reg = CAS_READ_4(sc, r);
  678                 if ((reg & clr) == 0 && (reg & set) == set)
  679                         return (1);
  680         }
  681         return (0);
  682 }
  683 
  684 static void
  685 cas_reset(struct cas_softc *sc)
  686 {
  687 
  688 #ifdef CAS_DEBUG
  689         CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
  690 #endif
  691         /* Disable all interrupts in order to avoid spurious ones. */
  692         CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
  693 
  694         cas_reset_rx(sc);
  695         cas_reset_tx(sc);
  696 
  697         /*
  698          * Do a full reset modulo the result of the last auto-negotiation
  699          * when using the SERDES.
  700          */
  701         CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX |
  702             ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
  703         CAS_BARRIER(sc, CAS_RESET, 4,
  704             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  705         DELAY(3000);
  706         if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0))
  707                 device_printf(sc->sc_dev, "cannot reset device\n");
  708 }
  709 
  710 static void
  711 cas_stop(struct ifnet *ifp)
  712 {
  713         struct cas_softc *sc = ifp->if_softc;
  714         struct cas_txsoft *txs;
  715 
  716 #ifdef CAS_DEBUG
  717         CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
  718 #endif
  719 
  720         callout_stop(&sc->sc_tick_ch);
  721         callout_stop(&sc->sc_rx_ch);
  722 
  723         /* Disable all interrupts in order to avoid spurious ones. */
  724         CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
  725 
  726         cas_reset_tx(sc);
  727         cas_reset_rx(sc);
  728 
  729         /*
  730          * Release any queued transmit buffers.
  731          */
  732         while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
  733                 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
  734                 if (txs->txs_ndescs != 0) {
  735                         bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
  736                             BUS_DMASYNC_POSTWRITE);
  737                         bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
  738                         if (txs->txs_mbuf != NULL) {
  739                                 m_freem(txs->txs_mbuf);
  740                                 txs->txs_mbuf = NULL;
  741                         }
  742                 }
  743                 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  744         }
  745 
  746         /*
  747          * Mark the interface down and cancel the watchdog timer.
  748          */
  749         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  750         sc->sc_flags &= ~CAS_LINK;
  751         sc->sc_wdog_timer = 0;
  752 }
  753 
  754 static int
  755 cas_reset_rx(struct cas_softc *sc)
  756 {
  757 
  758         /*
  759          * Resetting while DMA is in progress can cause a bus hang, so we
  760          * disable DMA first.
  761          */
  762         cas_disable_rx(sc);
  763         CAS_WRITE_4(sc, CAS_RX_CONF, 0);
  764         CAS_BARRIER(sc, CAS_RX_CONF, 4,
  765             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  766         if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0))
  767                 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
  768 
  769         /* Finally, reset the ERX. */
  770         CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX |
  771             ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
  772         CAS_BARRIER(sc, CAS_RESET, 4,
  773             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  774         if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) {
  775                 device_printf(sc->sc_dev, "cannot reset receiver\n");
  776                 return (1);
  777         }
  778         return (0);
  779 }
  780 
  781 static int
  782 cas_reset_tx(struct cas_softc *sc)
  783 {
  784 
  785         /*
  786          * Resetting while DMA is in progress can cause a bus hang, so we
  787          * disable DMA first.
  788          */
  789         cas_disable_tx(sc);
  790         CAS_WRITE_4(sc, CAS_TX_CONF, 0);
  791         CAS_BARRIER(sc, CAS_TX_CONF, 4,
  792             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  793         if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0))
  794                 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
  795 
  796         /* Finally, reset the ETX. */
  797         CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX |
  798             ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
  799         CAS_BARRIER(sc, CAS_RESET, 4,
  800             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  801         if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) {
  802                 device_printf(sc->sc_dev, "cannot reset transmitter\n");
  803                 return (1);
  804         }
  805         return (0);
  806 }
  807 
  808 static int
  809 cas_disable_rx(struct cas_softc *sc)
  810 {
  811 
  812         CAS_WRITE_4(sc, CAS_MAC_RX_CONF,
  813             CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN);
  814         CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
  815             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  816         return (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0));
  817 }
  818 
  819 static int
  820 cas_disable_tx(struct cas_softc *sc)
  821 {
  822 
  823         CAS_WRITE_4(sc, CAS_MAC_TX_CONF,
  824             CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN);
  825         CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4,
  826             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  827         return (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0));
  828 }
  829 
  830 static inline void
  831 cas_rxcompinit(struct cas_rx_comp *rxcomp)
  832 {
  833 
  834         rxcomp->crc_word1 = 0;
  835         rxcomp->crc_word2 = 0;
  836         rxcomp->crc_word3 =
  837             htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO));
  838         rxcomp->crc_word4 = htole64(CAS_RC4_ZERO);
  839 }
  840 
  841 static void
  842 cas_meminit(struct cas_softc *sc)
  843 {
  844         int i;
  845 
  846         CAS_LOCK_ASSERT(sc, MA_OWNED);
  847 
  848         /*
  849          * Initialize the transmit descriptor ring.
  850          */
  851         for (i = 0; i < CAS_NTXDESC; i++) {
  852                 sc->sc_txdescs[i].cd_flags = 0;
  853                 sc->sc_txdescs[i].cd_buf_ptr = 0;
  854         }
  855         sc->sc_txfree = CAS_MAXTXFREE;
  856         sc->sc_txnext = 0;
  857         sc->sc_txwin = 0;
  858 
  859         /*
  860          * Initialize the receive completion ring.
  861          */
  862         for (i = 0; i < CAS_NRXCOMP; i++)
  863                 cas_rxcompinit(&sc->sc_rxcomps[i]);
  864         sc->sc_rxcptr = 0;
  865 
  866         /*
  867          * Initialize the first receive descriptor ring.  We leave
  868          * the second one zeroed as we don't actually use it.
  869          */
  870         for (i = 0; i < CAS_NRXDESC; i++)
  871                 CAS_INIT_RXDESC(sc, i, i);
  872         sc->sc_rxdptr = 0;
  873 
  874         CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  875 }
  876 
  877 static u_int
  878 cas_descsize(u_int sz)
  879 {
  880 
  881         switch (sz) {
  882         case 32:
  883                 return (CAS_DESC_32);
  884         case 64:
  885                 return (CAS_DESC_64);
  886         case 128:
  887                 return (CAS_DESC_128);
  888         case 256:
  889                 return (CAS_DESC_256);
  890         case 512:
  891                 return (CAS_DESC_512);
  892         case 1024:
  893                 return (CAS_DESC_1K);
  894         case 2048:
  895                 return (CAS_DESC_2K);
  896         case 4096:
  897                 return (CAS_DESC_4K);
  898         case 8192:
  899                 return (CAS_DESC_8K);
  900         default:
  901                 printf("%s: invalid descriptor ring size %d\n", __func__, sz);
  902                 return (CAS_DESC_32);
  903         }
  904 }
  905 
  906 static u_int
  907 cas_rxcompsize(u_int sz)
  908 {
  909 
  910         switch (sz) {
  911         case 128:
  912                 return (CAS_RX_CONF_COMP_128);
  913         case 256:
  914                 return (CAS_RX_CONF_COMP_256);
  915         case 512:
  916                 return (CAS_RX_CONF_COMP_512);
  917         case 1024:
  918                 return (CAS_RX_CONF_COMP_1K);
  919         case 2048:
  920                 return (CAS_RX_CONF_COMP_2K);
  921         case 4096:
  922                 return (CAS_RX_CONF_COMP_4K);
  923         case 8192:
  924                 return (CAS_RX_CONF_COMP_8K);
  925         case 16384:
  926                 return (CAS_RX_CONF_COMP_16K);
  927         case 32768:
  928                 return (CAS_RX_CONF_COMP_32K);
  929         default:
  930                 printf("%s: invalid dcompletion ring size %d\n", __func__, sz);
  931                 return (CAS_RX_CONF_COMP_128);
  932         }
  933 }
  934 
  935 static void
  936 cas_init(void *xsc)
  937 {
  938         struct cas_softc *sc = xsc;
  939 
  940         CAS_LOCK(sc);
  941         cas_init_locked(sc);
  942         CAS_UNLOCK(sc);
  943 }
  944 
  945 /*
  946  * Initialization of interface; set up initialization block
  947  * and transmit/receive descriptor rings.
  948  */
  949 static void
  950 cas_init_locked(struct cas_softc *sc)
  951 {
  952         struct ifnet *ifp = sc->sc_ifp;
  953         uint32_t v;
  954 
  955         CAS_LOCK_ASSERT(sc, MA_OWNED);
  956 
  957         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  958                 return;
  959 
  960 #ifdef CAS_DEBUG
  961         CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev),
  962             __func__);
  963 #endif
  964         /*
  965          * Initialization sequence.  The numbered steps below correspond
  966          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  967          * Channel Engine manual (part of the PCIO manual).
  968          * See also the STP2002-STQ document from Sun Microsystems.
  969          */
  970 
  971         /* step 1 & 2.  Reset the Ethernet Channel. */
  972         cas_stop(ifp);
  973         cas_reset(sc);
  974 #ifdef CAS_DEBUG
  975         CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev),
  976             __func__);
  977 #endif
  978 
  979         if ((sc->sc_flags & CAS_SERDES) == 0)
  980                 /* Re-initialize the MIF. */
  981                 cas_mifinit(sc);
  982 
  983         /* step 3.  Setup data structures in host memory. */
  984         cas_meminit(sc);
  985 
  986         /* step 4.  TX MAC registers & counters */
  987         cas_init_regs(sc);
  988 
  989         /* step 5.  RX MAC registers & counters */
  990         cas_setladrf(sc);
  991 
  992         /* step 6 & 7.  Program Ring Base Addresses. */
  993         CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI,
  994             (((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32));
  995         CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO,
  996             CAS_CDTXDADDR(sc, 0) & 0xffffffff);
  997 
  998         CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI,
  999             (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32));
 1000         CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO,
 1001             CAS_CDRXCADDR(sc, 0) & 0xffffffff);
 1002 
 1003         CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI,
 1004             (((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32));
 1005         CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO,
 1006             CAS_CDRXDADDR(sc, 0) & 0xffffffff);
 1007 
 1008         if ((sc->sc_flags & CAS_REG_PLUS) != 0) {
 1009                 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI,
 1010                     (((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32));
 1011                 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO,
 1012                     CAS_CDRXD2ADDR(sc, 0) & 0xffffffff);
 1013         }
 1014 
 1015 #ifdef CAS_DEBUG
 1016         CTR5(KTR_CAS,
 1017             "loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx",
 1018             CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0),
 1019             CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma);
 1020 #endif
 1021 
 1022         /* step 8.  Global Configuration & Interrupt Masks */
 1023 
 1024         /* Disable weighted round robin. */
 1025         CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS);
 1026 
 1027         /*
 1028          * Enable infinite bursts for revisions without PCI issues if
 1029          * applicable.  Doing so greatly improves the TX performance on
 1030          * !__sparc64__.
 1031          */
 1032         CAS_WRITE_4(sc, CAS_INF_BURST,
 1033 #if !defined(__sparc64__)
 1034             (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN :
 1035 #endif
 1036             0);
 1037 
 1038         /* Set up interrupts. */
 1039         CAS_WRITE_4(sc, CAS_INTMASK,
 1040             ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR |
 1041             CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR |
 1042             CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY |
 1043             CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH |
 1044             CAS_INTR_PCI_ERROR_INT
 1045 #ifdef CAS_DEBUG
 1046             | CAS_INTR_PCS_INT | CAS_INTR_MIF
 1047 #endif
 1048             ));
 1049         /* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */
 1050         CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0);
 1051         CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW);
 1052         CAS_WRITE_4(sc, CAS_MAC_TX_MASK,
 1053             ~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR));
 1054 #ifdef CAS_DEBUG
 1055         CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK,
 1056             ~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE |
 1057             CAS_MAC_CTRL_NON_PAUSE));
 1058 #else
 1059         CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK,
 1060             CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE |
 1061             CAS_MAC_CTRL_NON_PAUSE);
 1062 #endif
 1063 
 1064         /* Enable PCI error interrupts. */
 1065         CAS_WRITE_4(sc, CAS_ERROR_MASK,
 1066             ~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO |
 1067             CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO));
 1068 
 1069         /* Enable PCI error interrupts in BIM configuration. */
 1070         CAS_WRITE_4(sc, CAS_BIM_CONF,
 1071             CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN);
 1072 
 1073         /*
 1074          * step 9.  ETX Configuration: encode receive descriptor ring size,
 1075          * enable DMA and disable pre-interrupt writeback completion.
 1076          */
 1077         v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT;
 1078         CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN |
 1079             CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS);
 1080 
 1081         /* step 10.  ERX Configuration */
 1082 
 1083         /*
 1084          * Encode receive completion and descriptor ring sizes, set the
 1085          * swivel offset.
 1086          */
 1087         v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT;
 1088         v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT;
 1089         if ((sc->sc_flags & CAS_REG_PLUS) != 0)
 1090                 v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT;
 1091         CAS_WRITE_4(sc, CAS_RX_CONF,
 1092             v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT));
 1093 
 1094         /* Set the PAUSE thresholds.  We use the maximum OFF threshold. */
 1095         CAS_WRITE_4(sc, CAS_RX_PTHRS,
 1096             (111 << CAS_RX_PTHRS_XOFF_SHFT) | (15 << CAS_RX_PTHRS_XON_SHFT));
 1097 
 1098         /* RX blanking */
 1099         CAS_WRITE_4(sc, CAS_RX_BLANK,
 1100             (15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT));
 1101 
 1102         /* Set RX_COMP_AFULL threshold to half of the RX completions. */
 1103         CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS,
 1104             (CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT);
 1105 
 1106         /* Initialize the RX page size register as appropriate for 8k. */
 1107         CAS_WRITE_4(sc, CAS_RX_PSZ,
 1108             (CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) |
 1109             (4 << CAS_RX_PSZ_MB_CNT_SHFT) |
 1110             (CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) |
 1111             (CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT));
 1112 
 1113         /* Disable RX random early detection. */
 1114         CAS_WRITE_4(sc, CAS_RX_RED, 0);
 1115 
 1116         /* Zero the RX reassembly DMA table. */
 1117         for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) {
 1118                 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v);
 1119                 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0);
 1120                 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0);
 1121                 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0);
 1122         }
 1123 
 1124         /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */
 1125         CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0);
 1126         CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0);
 1127 
 1128         /* Finally, enable RX DMA. */
 1129         CAS_WRITE_4(sc, CAS_RX_CONF,
 1130             CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN);
 1131 
 1132         /* step 11.  Configure Media. */
 1133 
 1134         /* step 12.  RX_MAC Configuration Register */
 1135         v = CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_STRPPAD;
 1136         v |= CAS_MAC_RX_CONF_EN | CAS_MAC_RX_CONF_STRPFCS;
 1137         CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 0);
 1138         CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
 1139             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1140         if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0))
 1141                 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
 1142         CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v);
 1143 
 1144         /* step 13.  TX_MAC Configuration Register */
 1145         v = CAS_READ_4(sc, CAS_MAC_TX_CONF);
 1146         v |= CAS_MAC_TX_CONF_EN;
 1147         CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 0);
 1148         CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4,
 1149             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1150         if (!cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0))
 1151                 device_printf(sc->sc_dev, "cannot configure TX MAC\n");
 1152         CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v);
 1153 
 1154         /* step 14.  Issue Transmit Pending command. */
 1155 
 1156         /* step 15.  Give the receiver a swift kick. */
 1157         CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4);
 1158         CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0);
 1159         if ((sc->sc_flags & CAS_REG_PLUS) != 0)
 1160                 CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4);
 1161 
 1162         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1163         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1164 
 1165         mii_mediachg(sc->sc_mii);
 1166 
 1167         /* Start the one second timer. */
 1168         sc->sc_wdog_timer = 0;
 1169         callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
 1170 }
 1171 
 1172 static int
 1173 cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head)
 1174 {
 1175         bus_dma_segment_t txsegs[CAS_NTXSEGS];
 1176         struct cas_txsoft *txs;
 1177         struct ip *ip;
 1178         struct mbuf *m;
 1179         uint64_t cflags;
 1180         int error, nexttx, nsegs, offset, seg;
 1181 
 1182         CAS_LOCK_ASSERT(sc, MA_OWNED);
 1183 
 1184         /* Get a work queue entry. */
 1185         if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
 1186                 /* Ran out of descriptors. */
 1187                 return (ENOBUFS);
 1188         }
 1189 
 1190         cflags = 0;
 1191         if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) {
 1192                 if (M_WRITABLE(*m_head) == 0) {
 1193                         m = m_dup(*m_head, M_DONTWAIT);
 1194                         m_freem(*m_head);
 1195                         *m_head = m;
 1196                         if (m == NULL)
 1197                                 return (ENOBUFS);
 1198                 }
 1199                 offset = sizeof(struct ether_header);
 1200                 m = m_pullup(*m_head, offset + sizeof(struct ip));
 1201                 if (m == NULL) {
 1202                         *m_head = NULL;
 1203                         return (ENOBUFS);
 1204                 }
 1205                 ip = (struct ip *)(mtod(m, caddr_t) + offset);
 1206                 offset += (ip->ip_hl << 2);
 1207                 cflags = (offset << CAS_TD_CKSUM_START_SHFT) |
 1208                     ((offset + m->m_pkthdr.csum_data) <<
 1209                     CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN;
 1210                 *m_head = m;
 1211         }
 1212 
 1213         error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
 1214             *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
 1215         if (error == EFBIG) {
 1216                 m = m_collapse(*m_head, M_DONTWAIT, CAS_NTXSEGS);
 1217                 if (m == NULL) {
 1218                         m_freem(*m_head);
 1219                         *m_head = NULL;
 1220                         return (ENOBUFS);
 1221                 }
 1222                 *m_head = m;
 1223                 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
 1224                     txs->txs_dmamap, *m_head, txsegs, &nsegs,
 1225                     BUS_DMA_NOWAIT);
 1226                 if (error != 0) {
 1227                         m_freem(*m_head);
 1228                         *m_head = NULL;
 1229                         return (error);
 1230                 }
 1231         } else if (error != 0)
 1232                 return (error);
 1233         /* If nsegs is wrong then the stack is corrupt. */
 1234         KASSERT(nsegs <= CAS_NTXSEGS,
 1235             ("%s: too many DMA segments (%d)", __func__, nsegs));
 1236         if (nsegs == 0) {
 1237                 m_freem(*m_head);
 1238                 *m_head = NULL;
 1239                 return (EIO);
 1240         }
 1241 
 1242         /*
 1243          * Ensure we have enough descriptors free to describe
 1244          * the packet.  Note, we always reserve one descriptor
 1245          * at the end of the ring as a termination point, in
 1246          * order to prevent wrap-around.
 1247          */
 1248         if (nsegs > sc->sc_txfree - 1) {
 1249                 txs->txs_ndescs = 0;
 1250                 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
 1251                 return (ENOBUFS);
 1252         }
 1253 
 1254         txs->txs_ndescs = nsegs;
 1255         txs->txs_firstdesc = sc->sc_txnext;
 1256         nexttx = txs->txs_firstdesc;
 1257         for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) {
 1258 #ifdef CAS_DEBUG
 1259                 CTR6(KTR_CAS,
 1260                     "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
 1261                     __func__, seg, nexttx, txsegs[seg].ds_len,
 1262                     txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr));
 1263 #endif
 1264                 sc->sc_txdescs[nexttx].cd_buf_ptr =
 1265                     htole64(txsegs[seg].ds_addr);
 1266                 KASSERT(txsegs[seg].ds_len <
 1267                     CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT,
 1268                     ("%s: segment size too large!", __func__));
 1269                 sc->sc_txdescs[nexttx].cd_flags =
 1270                     htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT);
 1271                 txs->txs_lastdesc = nexttx;
 1272         }
 1273 
 1274         /* Set EOF on the last descriptor. */
 1275 #ifdef CAS_DEBUG
 1276         CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d",
 1277             __func__, seg, nexttx);
 1278 #endif
 1279         sc->sc_txdescs[txs->txs_lastdesc].cd_flags |=
 1280             htole64(CAS_TD_END_OF_FRAME);
 1281 
 1282         /* Lastly set SOF on the first descriptor. */
 1283 #ifdef CAS_DEBUG
 1284         CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d",
 1285             __func__, seg, nexttx);
 1286 #endif
 1287         if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) {
 1288                 sc->sc_txwin = 0;
 1289                 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
 1290                     htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME);
 1291         } else
 1292                 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
 1293                     htole64(cflags | CAS_TD_START_OF_FRAME);
 1294 
 1295         /* Sync the DMA map. */
 1296         bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
 1297             BUS_DMASYNC_PREWRITE);
 1298 
 1299 #ifdef CAS_DEBUG
 1300         CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
 1301             __func__, txs->txs_firstdesc, txs->txs_lastdesc,
 1302             txs->txs_ndescs);
 1303 #endif
 1304         STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
 1305         STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
 1306         txs->txs_mbuf = *m_head;
 1307 
 1308         sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc);
 1309         sc->sc_txfree -= txs->txs_ndescs;
 1310 
 1311         return (0);
 1312 }
 1313 
 1314 static void
 1315 cas_init_regs(struct cas_softc *sc)
 1316 {
 1317         int i;
 1318         const u_char *laddr = IF_LLADDR(sc->sc_ifp);
 1319 
 1320         CAS_LOCK_ASSERT(sc, MA_OWNED);
 1321 
 1322         /* These registers are not cleared on reset. */
 1323         if ((sc->sc_flags & CAS_INITED) == 0) {
 1324                 /* magic values */
 1325                 CAS_WRITE_4(sc, CAS_MAC_IPG0, 0);
 1326                 CAS_WRITE_4(sc, CAS_MAC_IPG1, 8);
 1327                 CAS_WRITE_4(sc, CAS_MAC_IPG2, 4);
 1328 
 1329                 /* min frame length */
 1330                 CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN);
 1331                 /* max frame length and max burst size */
 1332                 CAS_WRITE_4(sc, CAS_MAC_MAX_BF,
 1333                     ((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) <<
 1334                     CAS_MAC_MAX_BF_FRM_SHFT) |
 1335                     (0x2000 << CAS_MAC_MAX_BF_BST_SHFT));
 1336 
 1337                 /* more magic values */
 1338                 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7);
 1339                 CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4);
 1340                 CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10);
 1341                 CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8808);
 1342 
 1343                 /* random number seed */
 1344                 CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED,
 1345                     ((laddr[5] << 8) | laddr[4]) & 0x3ff);
 1346 
 1347                 /* secondary MAC addresses: 0:0:0:0:0:0 */
 1348                 for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41;
 1349                     i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3)
 1350                         CAS_WRITE_4(sc, i, 0);
 1351 
 1352                 /* MAC control address: 01:80:c2:00:00:01 */
 1353                 CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001);
 1354                 CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200);
 1355                 CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180);
 1356 
 1357                 /* MAC filter address: 0:0:0:0:0:0 */
 1358                 CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0);
 1359                 CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0);
 1360                 CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0);
 1361                 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0);
 1362                 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0);
 1363 
 1364                 /* Zero the hash table. */
 1365                 for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15;
 1366                     i += CAS_MAC_HASH1 - CAS_MAC_HASH0)
 1367                         CAS_WRITE_4(sc, i, 0);
 1368 
 1369                 sc->sc_flags |= CAS_INITED;
 1370         }
 1371 
 1372         /* Counters need to be zeroed. */
 1373         CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0);
 1374         CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0);
 1375         CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0);
 1376         CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0);
 1377         CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0);
 1378         CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0);
 1379         CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0);
 1380         CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0);
 1381         CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0);
 1382         CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0);
 1383         CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0);
 1384 
 1385         /* Set XOFF PAUSE time. */
 1386         CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT);
 1387 
 1388         /* Set the station address. */
 1389         CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
 1390         CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
 1391         CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
 1392 
 1393         /* Enable MII outputs. */
 1394         CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE);
 1395 }
 1396 
 1397 static void
 1398 cas_tx_task(void *arg, int pending __unused)
 1399 {
 1400         struct ifnet *ifp;
 1401 
 1402         ifp = (struct ifnet *)arg;
 1403         cas_start(ifp);
 1404 }
 1405 
 1406 static inline void
 1407 cas_txkick(struct cas_softc *sc)
 1408 {
 1409 
 1410         /*
 1411          * Update the TX kick register.  This register has to point to the
 1412          * descriptor after the last valid one and for optimum performance
 1413          * should be incremented in multiples of 4 (the DMA engine fetches/
 1414          * updates descriptors in batches of 4).
 1415          */
 1416 #ifdef CAS_DEBUG
 1417         CTR3(KTR_CAS, "%s: %s: kicking TX %d",
 1418             device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
 1419 #endif
 1420         CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1421         CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext);
 1422 }
 1423 
 1424 static void
 1425 cas_start(struct ifnet *ifp)
 1426 {
 1427         struct cas_softc *sc = ifp->if_softc;
 1428         struct mbuf *m;
 1429         int kicked, ntx;
 1430 
 1431         CAS_LOCK(sc);
 1432 
 1433         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1434             IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) {
 1435                 CAS_UNLOCK(sc);
 1436                 return;
 1437         }
 1438 
 1439         if (sc->sc_txfree < CAS_MAXTXFREE / 4)
 1440                 cas_tint(sc);
 1441 
 1442 #ifdef CAS_DEBUG
 1443         CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d",
 1444             device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
 1445             sc->sc_txnext);
 1446 #endif
 1447         ntx = 0;
 1448         kicked = 0;
 1449         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
 1450                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1451                 if (m == NULL)
 1452                         break;
 1453                 if (cas_load_txmbuf(sc, &m) != 0) {
 1454                         if (m == NULL)
 1455                                 break;
 1456                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1457                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1458                         break;
 1459                 }
 1460                 if ((sc->sc_txnext % 4) == 0) {
 1461                         cas_txkick(sc);
 1462                         kicked = 1;
 1463                 } else
 1464                         kicked = 0;
 1465                 ntx++;
 1466                 BPF_MTAP(ifp, m);
 1467         }
 1468 
 1469         if (ntx > 0) {
 1470                 if (kicked == 0)
 1471                         cas_txkick(sc);
 1472 #ifdef CAS_DEBUG
 1473                 CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d",
 1474                     device_get_name(sc->sc_dev), sc->sc_txnext);
 1475 #endif
 1476 
 1477                 /* Set a watchdog timer in case the chip flakes out. */
 1478                 sc->sc_wdog_timer = 5;
 1479 #ifdef CAS_DEBUG
 1480                 CTR3(KTR_CAS, "%s: %s: watchdog %d",
 1481                     device_get_name(sc->sc_dev), __func__,
 1482                     sc->sc_wdog_timer);
 1483 #endif
 1484         }
 1485 
 1486         CAS_UNLOCK(sc);
 1487 }
 1488 
 1489 static void
 1490 cas_tint(struct cas_softc *sc)
 1491 {
 1492         struct ifnet *ifp = sc->sc_ifp;
 1493         struct cas_txsoft *txs;
 1494         int progress;
 1495         uint32_t txlast;
 1496 #ifdef CAS_DEBUG
 1497         int i;
 1498 
 1499         CAS_LOCK_ASSERT(sc, MA_OWNED);
 1500 
 1501         CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
 1502 #endif
 1503 
 1504         /*
 1505          * Go through our TX list and free mbufs for those
 1506          * frames that have been transmitted.
 1507          */
 1508         progress = 0;
 1509         CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
 1510         while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
 1511 #ifdef CAS_DEBUG
 1512                 if ((ifp->if_flags & IFF_DEBUG) != 0) {
 1513                         printf("    txsoft %p transmit chain:\n", txs);
 1514                         for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) {
 1515                                 printf("descriptor %d: ", i);
 1516                                 printf("cd_flags: 0x%016llx\t",
 1517                                     (long long)le64toh(
 1518                                     sc->sc_txdescs[i].cd_flags));
 1519                                 printf("cd_buf_ptr: 0x%016llx\n",
 1520                                     (long long)le64toh(
 1521                                     sc->sc_txdescs[i].cd_buf_ptr));
 1522                                 if (i == txs->txs_lastdesc)
 1523                                         break;
 1524                         }
 1525                 }
 1526 #endif
 1527 
 1528                 /*
 1529                  * In theory, we could harvest some descriptors before
 1530                  * the ring is empty, but that's a bit complicated.
 1531                  *
 1532                  * CAS_TX_COMPn points to the last descriptor
 1533                  * processed + 1.
 1534                  */
 1535                 txlast = CAS_READ_4(sc, CAS_TX_COMP3);
 1536 #ifdef CAS_DEBUG
 1537                 CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, "
 1538                     "txs->txs_lastdesc = %d, txlast = %d",
 1539                     __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
 1540 #endif
 1541                 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
 1542                         if ((txlast >= txs->txs_firstdesc) &&
 1543                             (txlast <= txs->txs_lastdesc))
 1544                                 break;
 1545                 } else {
 1546                         /* Ick -- this command wraps. */
 1547                         if ((txlast >= txs->txs_firstdesc) ||
 1548                             (txlast <= txs->txs_lastdesc))
 1549                                 break;
 1550                 }
 1551 
 1552 #ifdef CAS_DEBUG
 1553                 CTR1(KTR_CAS, "%s: releasing a descriptor", __func__);
 1554 #endif
 1555                 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
 1556 
 1557                 sc->sc_txfree += txs->txs_ndescs;
 1558 
 1559                 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
 1560                     BUS_DMASYNC_POSTWRITE);
 1561                 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
 1562                 if (txs->txs_mbuf != NULL) {
 1563                         m_freem(txs->txs_mbuf);
 1564                         txs->txs_mbuf = NULL;
 1565                 }
 1566 
 1567                 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
 1568 
 1569                 ifp->if_opackets++;
 1570                 progress = 1;
 1571         }
 1572 
 1573 #ifdef CAS_DEBUG
 1574         CTR5(KTR_CAS, "%s: CAS_TX_SM1 %x CAS_TX_SM2 %x CAS_TX_DESC_BASE %llx "
 1575             "CAS_TX_COMP3 %x",
 1576             __func__, CAS_READ_4(sc, CAS_TX_SM1), CAS_READ_4(sc, CAS_TX_SM2),
 1577             ((long long)CAS_READ_4(sc, CAS_TX_DESC3_BASE_HI) << 32) |
 1578             CAS_READ_4(sc, CAS_TX_DESC3_BASE_LO),
 1579             CAS_READ_4(sc, CAS_TX_COMP3));
 1580 #endif
 1581 
 1582         if (progress) {
 1583                 /* We freed some descriptors, so reset IFF_DRV_OACTIVE. */
 1584                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1585                 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
 1586                         sc->sc_wdog_timer = 0;
 1587         }
 1588 
 1589 #ifdef CAS_DEBUG
 1590         CTR3(KTR_CAS, "%s: %s: watchdog %d",
 1591             device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
 1592 #endif
 1593 }
 1594 
 1595 static void
 1596 cas_rint_timeout(void *arg)
 1597 {
 1598         struct cas_softc *sc = arg;
 1599 
 1600         CAS_LOCK_ASSERT(sc, MA_OWNED);
 1601 
 1602         cas_rint(sc);
 1603 }
 1604 
 1605 static void
 1606 cas_rint(struct cas_softc *sc)
 1607 {
 1608         struct cas_rxdsoft *rxds, *rxds2;
 1609         struct ifnet *ifp = sc->sc_ifp;
 1610         struct mbuf *m, *m2;
 1611         uint64_t word1, word2, word3, word4;
 1612         uint32_t rxhead;
 1613         u_int idx, idx2, len, off, skip;
 1614 
 1615         CAS_LOCK_ASSERT(sc, MA_OWNED);
 1616 
 1617         callout_stop(&sc->sc_rx_ch);
 1618 
 1619 #ifdef CAS_DEBUG
 1620         CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
 1621 #endif
 1622 
 1623 #define PRINTWORD(n, delimiter)                                         \
 1624         printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter)
 1625 
 1626 #define SKIPASSERT(n)                                                   \
 1627         KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0,       \
 1628             ("%s: word ## n not 0", __func__))
 1629 
 1630 #define WORDTOH(n)                                                      \
 1631         word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n)
 1632 
 1633         /*
 1634          * Read the completion head register once.  This limits
 1635          * how long the following loop can execute.
 1636          */
 1637         rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD);
 1638 #ifdef CAS_DEBUG
 1639         CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
 1640             __func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead);
 1641 #endif
 1642         skip = 0;
 1643         CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1644         for (; sc->sc_rxcptr != rxhead;
 1645             sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) {
 1646                 if (skip != 0) {
 1647                         SKIPASSERT(1);
 1648                         SKIPASSERT(2);
 1649                         SKIPASSERT(3);
 1650 
 1651                         --skip;
 1652                         goto skip;
 1653                 }
 1654 
 1655                 WORDTOH(1);
 1656                 WORDTOH(2);
 1657                 WORDTOH(3);
 1658                 WORDTOH(4);
 1659 
 1660 #ifdef CAS_DEBUG
 1661                 if ((ifp->if_flags & IFF_DEBUG) != 0) {
 1662                         printf("    completion %d: ", sc->sc_rxcptr);
 1663                         PRINTWORD(1, '\t');
 1664                         PRINTWORD(2, '\t');
 1665                         PRINTWORD(3, '\t');
 1666                         PRINTWORD(4, '\n');
 1667                 }
 1668 #endif
 1669 
 1670                 if (__predict_false(
 1671                     (word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW ||
 1672                     (word4 & CAS_RC4_ZERO) != 0)) {
 1673                         /*
 1674                          * The descriptor is still marked as owned, although
 1675                          * it is supposed to have completed.  This has been
 1676                          * observed on some machines.  Just exiting here
 1677                          * might leave the packet sitting around until another
 1678                          * one arrives to trigger a new interrupt, which is
 1679                          * generally undesirable, so set up a timeout.
 1680                          */
 1681                         callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS,
 1682                             cas_rint_timeout, sc);
 1683                         break;
 1684                 }
 1685 
 1686                 if (__predict_false(
 1687                     (word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) {
 1688                         ifp->if_ierrors++;
 1689                         device_printf(sc->sc_dev,
 1690                             "receive error: CRC error\n");
 1691                         continue;
 1692                 }
 1693 
 1694                 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 ||
 1695                     CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0,
 1696                     ("%s: data and header present", __func__));
 1697                 KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 ||
 1698                     CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0,
 1699                     ("%s: split and header present", __func__));
 1700                 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 ||
 1701                     (word1 & CAS_RC1_RELEASE_HDR) == 0,
 1702                     ("%s: data present but header release", __func__));
 1703                 KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 ||
 1704                     (word1 & CAS_RC1_RELEASE_DATA) == 0,
 1705                     ("%s: header present but data release", __func__));
 1706 
 1707                 if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) {
 1708                         idx = CAS_GET(word2, CAS_RC2_HDR_INDEX);
 1709                         off = CAS_GET(word2, CAS_RC2_HDR_OFF);
 1710 #ifdef CAS_DEBUG
 1711                         CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d",
 1712                             __func__, idx, off, len);
 1713 #endif
 1714                         rxds = &sc->sc_rxdsoft[idx];
 1715                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1716                         if (m != NULL) {
 1717                                 refcount_acquire(&rxds->rxds_refcount);
 1718                                 bus_dmamap_sync(sc->sc_rdmatag,
 1719                                     rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
 1720 #if __FreeBSD_version < 800016
 1721                                 MEXTADD(m, (caddr_t)rxds->rxds_buf +
 1722                                     off * 256 + ETHER_ALIGN, len, cas_free,
 1723                                     rxds, M_RDONLY, EXT_NET_DRV);
 1724 #else
 1725                                 MEXTADD(m, (caddr_t)rxds->rxds_buf +
 1726                                     off * 256 + ETHER_ALIGN, len, cas_free,
 1727                                     sc, (void *)(uintptr_t)idx,
 1728                                     M_RDONLY, EXT_NET_DRV);
 1729 #endif
 1730                                 if ((m->m_flags & M_EXT) == 0) {
 1731                                         m_freem(m);
 1732                                         m = NULL;
 1733                                 }
 1734                         }
 1735                         if (m != NULL) {
 1736                                 m->m_pkthdr.rcvif = ifp;
 1737                                 m->m_pkthdr.len = m->m_len = len;
 1738                                 ifp->if_ipackets++;
 1739                                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 1740                                         cas_rxcksum(m, CAS_GET(word4,
 1741                                             CAS_RC4_TCP_CSUM));
 1742                                 /* Pass it on. */
 1743                                 CAS_UNLOCK(sc);
 1744                                 (*ifp->if_input)(ifp, m);
 1745                                 CAS_LOCK(sc);
 1746                         } else
 1747                                 ifp->if_ierrors++;
 1748 
 1749                         if ((word1 & CAS_RC1_RELEASE_HDR) != 0 &&
 1750                             refcount_release(&rxds->rxds_refcount) != 0)
 1751                                 cas_add_rxdesc(sc, idx);
 1752                 } else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) {
 1753                         idx = CAS_GET(word1, CAS_RC1_DATA_INDEX);
 1754                         off = CAS_GET(word1, CAS_RC1_DATA_OFF);
 1755 #ifdef CAS_DEBUG
 1756                         CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d",
 1757                             __func__, idx, off, len);
 1758 #endif
 1759                         rxds = &sc->sc_rxdsoft[idx];
 1760                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1761                         if (m != NULL) {
 1762                                 refcount_acquire(&rxds->rxds_refcount);
 1763                                 off += ETHER_ALIGN;
 1764                                 m->m_len = min(CAS_PAGE_SIZE - off, len);
 1765                                 bus_dmamap_sync(sc->sc_rdmatag,
 1766                                     rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
 1767 #if __FreeBSD_version < 800016
 1768                                 MEXTADD(m, (caddr_t)rxds->rxds_buf + off,
 1769                                     m->m_len, cas_free, rxds, M_RDONLY,
 1770                                     EXT_NET_DRV);
 1771 #else
 1772                                 MEXTADD(m, (caddr_t)rxds->rxds_buf + off,
 1773                                     m->m_len, cas_free, sc,
 1774                                     (void *)(uintptr_t)idx, M_RDONLY,
 1775                                     EXT_NET_DRV);
 1776 #endif
 1777                                 if ((m->m_flags & M_EXT) == 0) {
 1778                                         m_freem(m);
 1779                                         m = NULL;
 1780                                 }
 1781                         }
 1782                         idx2 = 0;
 1783                         m2 = NULL;
 1784                         rxds2 = NULL;
 1785                         if ((word1 & CAS_RC1_SPLIT_PKT) != 0) {
 1786                                 KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0,
 1787                                     ("%s: split but no release next",
 1788                                     __func__));
 1789 
 1790                                 idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX);
 1791 #ifdef CAS_DEBUG
 1792                                 CTR2(KTR_CAS, "%s: split at idx %d",
 1793                                     __func__, idx2);
 1794 #endif
 1795                                 rxds2 = &sc->sc_rxdsoft[idx2];
 1796                                 if (m != NULL) {
 1797                                         MGET(m2, M_DONTWAIT, MT_DATA);
 1798                                         if (m2 != NULL) {
 1799                                                 refcount_acquire(
 1800                                                     &rxds2->rxds_refcount);
 1801                                                 m2->m_len = len - m->m_len;
 1802                                                 bus_dmamap_sync(
 1803                                                     sc->sc_rdmatag,
 1804                                                     rxds2->rxds_dmamap,
 1805                                                     BUS_DMASYNC_POSTREAD);
 1806 #if __FreeBSD_version < 800016
 1807                                                 MEXTADD(m2,
 1808                                                     (caddr_t)rxds2->rxds_buf,
 1809                                                     m2->m_len, cas_free,
 1810                                                     rxds2, M_RDONLY,
 1811                                                     EXT_NET_DRV);
 1812 #else
 1813                                                 MEXTADD(m2,
 1814                                                     (caddr_t)rxds2->rxds_buf,
 1815                                                     m2->m_len, cas_free, sc,
 1816                                                     (void *)(uintptr_t)idx2,
 1817                                                     M_RDONLY, EXT_NET_DRV);
 1818 #endif
 1819                                                 if ((m2->m_flags & M_EXT) ==
 1820                                                     0) {
 1821                                                         m_freem(m2);
 1822                                                         m2 = NULL;
 1823                                                 }
 1824                                         }
 1825                                 }
 1826                                 if (m2 != NULL)
 1827                                         m->m_next = m2;
 1828                                 else if (m != NULL) {
 1829                                         m_freem(m);
 1830                                         m = NULL;
 1831                                 }
 1832                         }
 1833                         if (m != NULL) {
 1834                                 m->m_pkthdr.rcvif = ifp;
 1835                                 m->m_pkthdr.len = len;
 1836                                 ifp->if_ipackets++;
 1837                                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 1838                                         cas_rxcksum(m, CAS_GET(word4,
 1839                                             CAS_RC4_TCP_CSUM));
 1840                                 /* Pass it on. */
 1841                                 CAS_UNLOCK(sc);
 1842                                 (*ifp->if_input)(ifp, m);
 1843                                 CAS_LOCK(sc);
 1844                         } else
 1845                                 ifp->if_ierrors++;
 1846 
 1847                         if ((word1 & CAS_RC1_RELEASE_DATA) != 0 &&
 1848                             refcount_release(&rxds->rxds_refcount) != 0)
 1849                                 cas_add_rxdesc(sc, idx);
 1850                         if ((word1 & CAS_RC1_SPLIT_PKT) != 0 &&
 1851                             refcount_release(&rxds2->rxds_refcount) != 0)
 1852                                 cas_add_rxdesc(sc, idx2);
 1853                 }
 1854 
 1855                 skip = CAS_GET(word1, CAS_RC1_SKIP);
 1856 
 1857  skip:
 1858                 cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]);
 1859                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 1860                         break;
 1861         }
 1862         CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1863         CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr);
 1864 
 1865 #undef PRINTWORD
 1866 #undef SKIPASSERT
 1867 #undef WORDTOH
 1868 
 1869 #ifdef CAS_DEBUG
 1870         CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
 1871             __func__, sc->sc_rxcptr, sc->sc_rxdptr,
 1872             CAS_READ_4(sc, CAS_RX_COMP_HEAD));
 1873 #endif
 1874 }
 1875 
 1876 static void
 1877 cas_free(void *arg1, void *arg2)
 1878 {
 1879         struct cas_rxdsoft *rxds;
 1880         struct cas_softc *sc;
 1881         u_int idx, locked;
 1882 
 1883 #if __FreeBSD_version < 800016
 1884         rxds = arg2;
 1885         sc = rxds->rxds_sc;
 1886         idx = rxds->rxds_idx;
 1887 #else
 1888         sc = arg1;
 1889         idx = (uintptr_t)arg2;
 1890         rxds = &sc->sc_rxdsoft[idx];
 1891 #endif
 1892         if (refcount_release(&rxds->rxds_refcount) == 0)
 1893                 return;
 1894 
 1895         /*
 1896          * NB: this function can be called via m_freem(9) within
 1897          * this driver!
 1898          */
 1899         if ((locked = CAS_LOCK_OWNED(sc)) == 0)
 1900                 CAS_LOCK(sc);
 1901         cas_add_rxdesc(sc, idx);
 1902         if (locked == 0)
 1903                 CAS_UNLOCK(sc);
 1904 }
 1905 
 1906 static inline void
 1907 cas_add_rxdesc(struct cas_softc *sc, u_int idx)
 1908 {
 1909 
 1910         CAS_LOCK_ASSERT(sc, MA_OWNED);
 1911 
 1912         bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap,
 1913             BUS_DMASYNC_PREREAD);
 1914         CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx);
 1915         sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr);
 1916 
 1917         /*
 1918          * Update the RX kick register.  This register has to point to the
 1919          * descriptor after the last valid one (before the current batch)
 1920          * and for optimum performance should be incremented in multiples
 1921          * of 4 (the DMA engine fetches/updates descriptors in batches of 4).
 1922          */
 1923         if ((sc->sc_rxdptr % 4) == 0) {
 1924                 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1925                 CAS_WRITE_4(sc, CAS_RX_KICK,
 1926                     (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK);
 1927         }
 1928 }
 1929 
 1930 static void
 1931 cas_eint(struct cas_softc *sc, u_int status)
 1932 {
 1933         struct ifnet *ifp = sc->sc_ifp;
 1934 
 1935         CAS_LOCK_ASSERT(sc, MA_OWNED);
 1936 
 1937         ifp->if_ierrors++;
 1938 
 1939         device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
 1940         if ((status & CAS_INTR_PCI_ERROR_INT) != 0) {
 1941                 status = CAS_READ_4(sc, CAS_ERROR_STATUS);
 1942                 printf(", PCI bus error 0x%x", status);
 1943                 if ((status & CAS_ERROR_OTHER) != 0) {
 1944                         status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2);
 1945                         printf(", PCI status 0x%x", status);
 1946                         pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2);
 1947                 }
 1948         }
 1949         printf("\n");
 1950 
 1951         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1952         cas_init_locked(sc);
 1953         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1954                 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
 1955 }
 1956 
 1957 static int
 1958 cas_intr(void *v)
 1959 {
 1960         struct cas_softc *sc = v;
 1961 
 1962         if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) &
 1963             CAS_INTR_SUMMARY) == 0))
 1964                 return (FILTER_STRAY);
 1965 
 1966         /* Disable interrupts. */
 1967         CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
 1968         taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
 1969 
 1970         return (FILTER_HANDLED);
 1971 }
 1972 
 1973 static void
 1974 cas_intr_task(void *arg, int pending __unused)
 1975 {
 1976         struct cas_softc *sc = arg;
 1977         struct ifnet *ifp = sc->sc_ifp;
 1978         uint32_t status, status2;
 1979 
 1980         CAS_LOCK_ASSERT(sc, MA_NOTOWNED);
 1981 
 1982         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 1983                 return;
 1984 
 1985         status = CAS_READ_4(sc, CAS_STATUS);
 1986         if (__predict_false((status & CAS_INTR_SUMMARY) == 0))
 1987                 goto done;
 1988 
 1989         CAS_LOCK(sc);
 1990 #ifdef CAS_DEBUG
 1991         CTR4(KTR_CAS, "%s: %s: cplt %x, status %x",
 1992             device_get_name(sc->sc_dev), __func__,
 1993             (status >> CAS_STATUS_TX_COMP3_SHFT), (u_int)status);
 1994 
 1995         /*
 1996          * PCS interrupts must be cleared, otherwise no traffic is passed!
 1997          */
 1998         if ((status & CAS_INTR_PCS_INT) != 0) {
 1999                 status2 =
 2000                     CAS_READ_4(sc, CAS_PCS_INTR_STATUS) |
 2001                     CAS_READ_4(sc, CAS_PCS_INTR_STATUS);
 2002                 if ((status2 & CAS_PCS_INTR_LINK) != 0)
 2003                         device_printf(sc->sc_dev,
 2004                             "%s: PCS link status changed\n", __func__);
 2005         }
 2006         if ((status & CAS_MAC_CTRL_STATUS) != 0) {
 2007                 status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS);
 2008                 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0)
 2009                         device_printf(sc->sc_dev,
 2010                             "%s: PAUSE received (PAUSE time %d slots)\n",
 2011                             __func__,
 2012                             (status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >>
 2013                             CAS_MAC_CTRL_STATUS_PT_SHFT);
 2014                 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0)
 2015                         device_printf(sc->sc_dev,
 2016                             "%s: transited to PAUSE state\n", __func__);
 2017                 if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0)
 2018                         device_printf(sc->sc_dev,
 2019                             "%s: transited to non-PAUSE state\n", __func__);
 2020         }
 2021         if ((status & CAS_INTR_MIF) != 0)
 2022                 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
 2023 #endif
 2024 
 2025         if (__predict_false((status &
 2026             (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR |
 2027             CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) {
 2028                 cas_eint(sc, status);
 2029                 CAS_UNLOCK(sc);
 2030                 return;
 2031         }
 2032 
 2033         if (__predict_false(status & CAS_INTR_TX_MAC_INT)) {
 2034                 status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS);
 2035                 if ((status2 &
 2036                     (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0)
 2037                         ifp->if_oerrors++;
 2038                 else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0)
 2039                         device_printf(sc->sc_dev,
 2040                             "MAC TX fault, status %x\n", status2);
 2041         }
 2042 
 2043         if (__predict_false(status & CAS_INTR_RX_MAC_INT)) {
 2044                 status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS);
 2045                 if ((status2 & CAS_MAC_RX_OVERFLOW) != 0)
 2046                         ifp->if_ierrors++;
 2047                 else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0)
 2048                         device_printf(sc->sc_dev,
 2049                             "MAC RX fault, status %x\n", status2);
 2050         }
 2051 
 2052         if ((status &
 2053             (CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
 2054             CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) {
 2055                 cas_rint(sc);
 2056 #ifdef CAS_DEBUG
 2057                 if (__predict_false((status &
 2058                     (CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
 2059                     CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0))
 2060                         device_printf(sc->sc_dev,
 2061                             "RX fault, status %x\n", status);
 2062 #endif
 2063         }
 2064 
 2065         if ((status &
 2066             (CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0)
 2067                 cas_tint(sc);
 2068 
 2069         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 2070                 CAS_UNLOCK(sc);
 2071                 return;
 2072         } else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2073                 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
 2074         CAS_UNLOCK(sc);
 2075 
 2076         status = CAS_READ_4(sc, CAS_STATUS_ALIAS);
 2077         if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) {
 2078                 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
 2079                 return;
 2080         }
 2081 
 2082  done:
 2083         /* Re-enable interrupts. */
 2084         CAS_WRITE_4(sc, CAS_INTMASK,
 2085             ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR |
 2086             CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR |
 2087             CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY |
 2088             CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH |
 2089             CAS_INTR_PCI_ERROR_INT
 2090 #ifdef CAS_DEBUG
 2091             | CAS_INTR_PCS_INT | CAS_INTR_MIF
 2092 #endif
 2093         ));
 2094 }
 2095 
 2096 static void
 2097 cas_watchdog(struct cas_softc *sc)
 2098 {
 2099         struct ifnet *ifp = sc->sc_ifp;
 2100 
 2101         CAS_LOCK_ASSERT(sc, MA_OWNED);
 2102 
 2103 #ifdef CAS_DEBUG
 2104         CTR4(KTR_CAS,
 2105             "%s: CAS_RX_CONF %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONF %x",
 2106             __func__, CAS_READ_4(sc, CAS_RX_CONF),
 2107             CAS_READ_4(sc, CAS_MAC_RX_STATUS),
 2108             CAS_READ_4(sc, CAS_MAC_RX_CONF));
 2109         CTR4(KTR_CAS,
 2110             "%s: CAS_TX_CONF %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONF %x",
 2111             __func__, CAS_READ_4(sc, CAS_TX_CONF),
 2112             CAS_READ_4(sc, CAS_MAC_TX_STATUS),
 2113             CAS_READ_4(sc, CAS_MAC_TX_CONF));
 2114 #endif
 2115 
 2116         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
 2117                 return;
 2118 
 2119         if ((sc->sc_flags & CAS_LINK) != 0)
 2120                 device_printf(sc->sc_dev, "device timeout\n");
 2121         else if (bootverbose)
 2122                 device_printf(sc->sc_dev, "device timeout (no link)\n");
 2123         ++ifp->if_oerrors;
 2124 
 2125         /* Try to get more packets going. */
 2126         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2127         cas_init_locked(sc);
 2128         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2129                 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
 2130 }
 2131 
 2132 static void
 2133 cas_mifinit(struct cas_softc *sc)
 2134 {
 2135 
 2136         /* Configure the MIF in frame mode. */
 2137         CAS_WRITE_4(sc, CAS_MIF_CONF,
 2138             CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE);
 2139         CAS_BARRIER(sc, CAS_MIF_CONF, 4,
 2140             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2141 }
 2142 
 2143 /*
 2144  * MII interface
 2145  *
 2146  * The MII interface supports at least three different operating modes:
 2147  *
 2148  * Bitbang mode is implemented using data, clock and output enable registers.
 2149  *
 2150  * Frame mode is implemented by loading a complete frame into the frame
 2151  * register and polling the valid bit for completion.
 2152  *
 2153  * Polling mode uses the frame register but completion is indicated by
 2154  * an interrupt.
 2155  *
 2156  */
 2157 static int
 2158 cas_mii_readreg(device_t dev, int phy, int reg)
 2159 {
 2160         struct cas_softc *sc;
 2161         int n;
 2162         uint32_t v;
 2163 
 2164 #ifdef CAS_DEBUG_PHY
 2165         printf("%s: phy %d reg %d\n", __func__, phy, reg);
 2166 #endif
 2167 
 2168         sc = device_get_softc(dev);
 2169         if ((sc->sc_flags & CAS_SERDES) != 0) {
 2170                 switch (reg) {
 2171                 case MII_BMCR:
 2172                         reg = CAS_PCS_CTRL;
 2173                         break;
 2174                 case MII_BMSR:
 2175                         reg = CAS_PCS_STATUS;
 2176                         break;
 2177                 case MII_PHYIDR1:
 2178                 case MII_PHYIDR2:
 2179                         return (0);
 2180                 case MII_ANAR:
 2181                         reg = CAS_PCS_ANAR;
 2182                         break;
 2183                 case MII_ANLPAR:
 2184                         reg = CAS_PCS_ANLPAR;
 2185                         break;
 2186                 case MII_EXTSR:
 2187                         return (EXTSR_1000XFDX | EXTSR_1000XHDX);
 2188                 default:
 2189                         device_printf(sc->sc_dev,
 2190                             "%s: unhandled register %d\n", __func__, reg);
 2191                         return (0);
 2192                 }
 2193                 return (CAS_READ_4(sc, reg));
 2194         }
 2195 
 2196         /* Construct the frame command. */
 2197         v = CAS_MIF_FRAME_READ |
 2198             (phy << CAS_MIF_FRAME_PHY_SHFT) |
 2199             (reg << CAS_MIF_FRAME_REG_SHFT);
 2200 
 2201         CAS_WRITE_4(sc, CAS_MIF_FRAME, v);
 2202         CAS_BARRIER(sc, CAS_MIF_FRAME, 4,
 2203             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2204         for (n = 0; n < 100; n++) {
 2205                 DELAY(1);
 2206                 v = CAS_READ_4(sc, CAS_MIF_FRAME);
 2207                 if (v & CAS_MIF_FRAME_TA_LSB)
 2208                         return (v & CAS_MIF_FRAME_DATA);
 2209         }
 2210 
 2211         device_printf(sc->sc_dev, "%s: timed out\n", __func__);
 2212         return (0);
 2213 }
 2214 
 2215 static int
 2216 cas_mii_writereg(device_t dev, int phy, int reg, int val)
 2217 {
 2218         struct cas_softc *sc;
 2219         int n;
 2220         uint32_t v;
 2221 
 2222 #ifdef CAS_DEBUG_PHY
 2223         printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
 2224 #endif
 2225 
 2226         sc = device_get_softc(dev);
 2227         if ((sc->sc_flags & CAS_SERDES) != 0) {
 2228                 switch (reg) {
 2229                 case MII_BMSR:
 2230                         reg = CAS_PCS_STATUS;
 2231                         break;
 2232                 case MII_BMCR:
 2233                         reg = CAS_PCS_CTRL;
 2234                         if ((val & CAS_PCS_CTRL_RESET) == 0)
 2235                                 break;
 2236                         CAS_WRITE_4(sc, CAS_PCS_CTRL, val);
 2237                         CAS_BARRIER(sc, CAS_PCS_CTRL, 4,
 2238                             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2239                         if (!cas_bitwait(sc, CAS_PCS_CTRL,
 2240                             CAS_PCS_CTRL_RESET, 0))
 2241                                 device_printf(sc->sc_dev,
 2242                                     "cannot reset PCS\n");
 2243                         /* FALLTHROUGH */
 2244                 case MII_ANAR:
 2245                         CAS_WRITE_4(sc, CAS_PCS_CONF, 0);
 2246                         CAS_BARRIER(sc, CAS_PCS_CONF, 4,
 2247                             BUS_SPACE_BARRIER_WRITE);
 2248                         CAS_WRITE_4(sc, CAS_PCS_ANAR, val);
 2249                         CAS_BARRIER(sc, CAS_PCS_ANAR, 4,
 2250                             BUS_SPACE_BARRIER_WRITE);
 2251                         CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL,
 2252                             CAS_PCS_SERDES_CTRL_ESD);
 2253                         CAS_BARRIER(sc, CAS_PCS_CONF, 4,
 2254                             BUS_SPACE_BARRIER_WRITE);
 2255                         CAS_WRITE_4(sc, CAS_PCS_CONF,
 2256                             CAS_PCS_CONF_EN);
 2257                         CAS_BARRIER(sc, CAS_PCS_CONF, 4,
 2258                             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2259                         return (0);
 2260                 case MII_ANLPAR:
 2261                         reg = CAS_PCS_ANLPAR;
 2262                         break;
 2263                 default:
 2264                         device_printf(sc->sc_dev,
 2265                             "%s: unhandled register %d\n", __func__, reg);
 2266                         return (0);
 2267                 }
 2268                 CAS_WRITE_4(sc, reg, val);
 2269                 CAS_BARRIER(sc, reg, 4,
 2270                     BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2271                 return (0);
 2272         }
 2273 
 2274         /* Construct the frame command. */
 2275         v = CAS_MIF_FRAME_WRITE |
 2276             (phy << CAS_MIF_FRAME_PHY_SHFT) |
 2277             (reg << CAS_MIF_FRAME_REG_SHFT) |
 2278             (val & CAS_MIF_FRAME_DATA);
 2279 
 2280         CAS_WRITE_4(sc, CAS_MIF_FRAME, v);
 2281         CAS_BARRIER(sc, CAS_MIF_FRAME, 4,
 2282             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2283         for (n = 0; n < 100; n++) {
 2284                 DELAY(1);
 2285                 v = CAS_READ_4(sc, CAS_MIF_FRAME);
 2286                 if (v & CAS_MIF_FRAME_TA_LSB)
 2287                         return (1);
 2288         }
 2289 
 2290         device_printf(sc->sc_dev, "%s: timed out\n", __func__);
 2291         return (0);
 2292 }
 2293 
 2294 static void
 2295 cas_mii_statchg(device_t dev)
 2296 {
 2297         struct cas_softc *sc;
 2298         struct ifnet *ifp;
 2299         int gigabit;
 2300         uint32_t rxcfg, txcfg, v;
 2301 
 2302         sc = device_get_softc(dev);
 2303         ifp = sc->sc_ifp;
 2304 
 2305         CAS_LOCK_ASSERT(sc, MA_OWNED);
 2306 
 2307 #ifdef CAS_DEBUG
 2308         if ((ifp->if_flags & IFF_DEBUG) != 0)
 2309                 device_printf(sc->sc_dev, "%s: status changen", __func__);
 2310 #endif
 2311 
 2312         if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
 2313             IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
 2314                 sc->sc_flags |= CAS_LINK;
 2315         else
 2316                 sc->sc_flags &= ~CAS_LINK;
 2317 
 2318         switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
 2319         case IFM_1000_SX:
 2320         case IFM_1000_LX:
 2321         case IFM_1000_CX:
 2322         case IFM_1000_T:
 2323                 gigabit = 1;
 2324                 break;
 2325         default:
 2326                 gigabit = 0;
 2327         }
 2328 
 2329         /*
 2330          * The configuration done here corresponds to the steps F) and
 2331          * G) and as far as enabling of RX and TX MAC goes also step H)
 2332          * of the initialization sequence outlined in section 11.2.1 of
 2333          * the Cassini+ ASIC Specification.
 2334          */
 2335 
 2336         rxcfg = CAS_READ_4(sc, CAS_MAC_RX_CONF);
 2337         rxcfg &= ~(CAS_MAC_RX_CONF_EN | CAS_MAC_RX_CONF_CARR);
 2338         txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU |
 2339             CAS_MAC_TX_CONF_NGUL;
 2340         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 2341                 txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS;
 2342         else if (gigabit != 0) {
 2343                 rxcfg |= CAS_MAC_RX_CONF_CARR;
 2344                 txcfg |= CAS_MAC_TX_CONF_CARR;
 2345         }
 2346         CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 0);
 2347         CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4,
 2348             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2349         if (!cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0))
 2350                 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
 2351         CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg);
 2352         CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 0);
 2353         CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
 2354             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2355         if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0))
 2356                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 2357         CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg);
 2358 
 2359         v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) &
 2360             ~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP);
 2361         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
 2362             IFM_ETH_RXPAUSE) != 0)
 2363                 v |= CAS_MAC_CTRL_CONF_RXP;
 2364         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
 2365             IFM_ETH_TXPAUSE) != 0)
 2366                 v |= CAS_MAC_CTRL_CONF_TXP;
 2367         CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v);
 2368 
 2369         /*
 2370          * All supported chips have a bug causing incorrect checksum
 2371          * to be calculated when letting them strip the FCS in half-
 2372          * duplex mode.  In theory we could disable FCS stripping and
 2373          * manually adjust the checksum accordingly.  It seems to make
 2374          * more sense to optimze for the common case and just disable
 2375          * hardware checksumming in half-duplex mode though.
 2376          */
 2377         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) {
 2378                 ifp->if_capenable &= ~IFCAP_HWCSUM;
 2379                 ifp->if_hwassist = 0;
 2380         } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
 2381                 ifp->if_capenable = ifp->if_capabilities;
 2382                 ifp->if_hwassist = CAS_CSUM_FEATURES;
 2383         }
 2384 
 2385         if (sc->sc_variant == CAS_SATURN) {
 2386                 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
 2387                         /* silicon bug workaround */
 2388                         CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41);
 2389                 else
 2390                         CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7);
 2391         }
 2392 
 2393         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
 2394             gigabit != 0)
 2395                 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME,
 2396                     CAS_MAC_SLOT_TIME_CARR);
 2397         else
 2398                 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME,
 2399                     CAS_MAC_SLOT_TIME_NORM);
 2400 
 2401         /* XIF Configuration */
 2402         v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED;
 2403         if ((sc->sc_flags & CAS_SERDES) == 0) {
 2404                 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
 2405                         v |= CAS_MAC_XIF_CONF_NOECHO;
 2406                 v |= CAS_MAC_XIF_CONF_BUF_OE;
 2407         }
 2408         if (gigabit != 0)
 2409                 v |= CAS_MAC_XIF_CONF_GMII;
 2410         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 2411                 v |= CAS_MAC_XIF_CONF_FDXLED;
 2412         CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v);
 2413 
 2414         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 2415             (sc->sc_flags & CAS_LINK) != 0) {
 2416                 CAS_WRITE_4(sc, CAS_MAC_TX_CONF,
 2417                     txcfg | CAS_MAC_TX_CONF_EN);
 2418                 CAS_WRITE_4(sc, CAS_MAC_RX_CONF,
 2419                     rxcfg | CAS_MAC_RX_CONF_EN);
 2420         }
 2421 }
 2422 
 2423 static int
 2424 cas_mediachange(struct ifnet *ifp)
 2425 {
 2426         struct cas_softc *sc = ifp->if_softc;
 2427         int error;
 2428 
 2429         /* XXX add support for serial media. */
 2430 
 2431         CAS_LOCK(sc);
 2432         error = mii_mediachg(sc->sc_mii);
 2433         CAS_UNLOCK(sc);
 2434         return (error);
 2435 }
 2436 
 2437 static void
 2438 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 2439 {
 2440         struct cas_softc *sc = ifp->if_softc;
 2441 
 2442         CAS_LOCK(sc);
 2443         if ((ifp->if_flags & IFF_UP) == 0) {
 2444                 CAS_UNLOCK(sc);
 2445                 return;
 2446         }
 2447 
 2448         mii_pollstat(sc->sc_mii);
 2449         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 2450         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 2451         CAS_UNLOCK(sc);
 2452 }
 2453 
 2454 static int
 2455 cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 2456 {
 2457         struct cas_softc *sc = ifp->if_softc;
 2458         struct ifreq *ifr = (struct ifreq *)data;
 2459         int error;
 2460 
 2461         error = 0;
 2462         switch (cmd) {
 2463         case SIOCSIFFLAGS:
 2464                 CAS_LOCK(sc);
 2465                 if ((ifp->if_flags & IFF_UP) != 0) {
 2466                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 2467                             ((ifp->if_flags ^ sc->sc_ifflags) &
 2468                             (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 2469                                 cas_setladrf(sc);
 2470                         else
 2471                                 cas_init_locked(sc);
 2472                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2473                         cas_stop(ifp);
 2474                 sc->sc_ifflags = ifp->if_flags;
 2475                 CAS_UNLOCK(sc);
 2476                 break;
 2477         case SIOCSIFCAP:
 2478                 CAS_LOCK(sc);
 2479                 if ((sc->sc_flags & CAS_NO_CSUM) != 0) {
 2480                         error = EINVAL;
 2481                         CAS_UNLOCK(sc);
 2482                         break;
 2483                 }
 2484                 ifp->if_capenable = ifr->ifr_reqcap;
 2485                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 2486                         ifp->if_hwassist = CAS_CSUM_FEATURES;
 2487                 else
 2488                         ifp->if_hwassist = 0;
 2489                 CAS_UNLOCK(sc);
 2490                 break;
 2491         case SIOCADDMULTI:
 2492         case SIOCDELMULTI:
 2493                 CAS_LOCK(sc);
 2494                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2495                         cas_setladrf(sc);
 2496                 CAS_UNLOCK(sc);
 2497                 break;
 2498         case SIOCSIFMTU:
 2499                 if ((ifr->ifr_mtu < ETHERMIN) ||
 2500                     (ifr->ifr_mtu > ETHERMTU_JUMBO))
 2501                         error = EINVAL;
 2502                 else
 2503                         ifp->if_mtu = ifr->ifr_mtu;
 2504                 break;
 2505         case SIOCGIFMEDIA:
 2506         case SIOCSIFMEDIA:
 2507                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 2508                 break;
 2509         default:
 2510                 error = ether_ioctl(ifp, cmd, data);
 2511                 break;
 2512         }
 2513 
 2514         return (error);
 2515 }
 2516 
 2517 static void
 2518 cas_setladrf(struct cas_softc *sc)
 2519 {
 2520         struct ifnet *ifp = sc->sc_ifp;
 2521         struct ifmultiaddr *inm;
 2522         int i;
 2523         uint32_t hash[16];
 2524         uint32_t crc, v;
 2525 
 2526         CAS_LOCK_ASSERT(sc, MA_OWNED);
 2527 
 2528         /* Get the current RX configuration. */
 2529         v = CAS_READ_4(sc, CAS_MAC_RX_CONF);
 2530 
 2531         /*
 2532          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 2533          * and hash filter.  Depending on the case, the right bit will be
 2534          * enabled.
 2535          */
 2536         v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_HFILTER |
 2537             CAS_MAC_RX_CONF_PGRP);
 2538 
 2539         CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v);
 2540         CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
 2541             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 2542         if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER, 0))
 2543                 device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
 2544 
 2545         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 2546                 v |= CAS_MAC_RX_CONF_PROMISC;
 2547                 goto chipit;
 2548         }
 2549         if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
 2550                 v |= CAS_MAC_RX_CONF_PGRP;
 2551                 goto chipit;
 2552         }
 2553 
 2554         /*
 2555          * Set up multicast address filter by passing all multicast
 2556          * addresses through a crc generator, and then using the high
 2557          * order 8 bits as an index into the 256 bit logical address
 2558          * filter.  The high order 4 bits selects the word, while the
 2559          * other 4 bits select the bit within the word (where bit 0
 2560          * is the MSB).
 2561          */
 2562 
 2563         /* Clear the hash table. */
 2564         memset(hash, 0, sizeof(hash));
 2565 
 2566         if_maddr_rlock(ifp);
 2567         TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
 2568                 if (inm->ifma_addr->sa_family != AF_LINK)
 2569                         continue;
 2570                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 2571                     inm->ifma_addr), ETHER_ADDR_LEN);
 2572 
 2573                 /* We just want the 8 most significant bits. */
 2574                 crc >>= 24;
 2575 
 2576                 /* Set the corresponding bit in the filter. */
 2577                 hash[crc >> 4] |= 1 << (15 - (crc & 15));
 2578         }
 2579         if_maddr_runlock(ifp);
 2580 
 2581         v |= CAS_MAC_RX_CONF_HFILTER;
 2582 
 2583         /* Now load the hash table into the chip (if we are using it). */
 2584         for (i = 0; i < 16; i++)
 2585                 CAS_WRITE_4(sc,
 2586                     CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
 2587                     hash[i]);
 2588 
 2589  chipit:
 2590         CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v);
 2591 }
 2592 
 2593 static int      cas_pci_attach(device_t dev);
 2594 static int      cas_pci_detach(device_t dev);
 2595 static int      cas_pci_probe(device_t dev);
 2596 static int      cas_pci_resume(device_t dev);
 2597 static int      cas_pci_suspend(device_t dev);
 2598 
 2599 static device_method_t cas_pci_methods[] = {
 2600         /* Device interface */
 2601         DEVMETHOD(device_probe,         cas_pci_probe),
 2602         DEVMETHOD(device_attach,        cas_pci_attach),
 2603         DEVMETHOD(device_detach,        cas_pci_detach),
 2604         DEVMETHOD(device_suspend,       cas_pci_suspend),
 2605         DEVMETHOD(device_resume,        cas_pci_resume),
 2606         /* Use the suspend handler here, it is all that is required. */
 2607         DEVMETHOD(device_shutdown,      cas_pci_suspend),
 2608 
 2609         /* MII interface */
 2610         DEVMETHOD(miibus_readreg,       cas_mii_readreg),
 2611         DEVMETHOD(miibus_writereg,      cas_mii_writereg),
 2612         DEVMETHOD(miibus_statchg,       cas_mii_statchg),
 2613 
 2614         DEVMETHOD_END
 2615 };
 2616 
 2617 static driver_t cas_pci_driver = {
 2618         "cas",
 2619         cas_pci_methods,
 2620         sizeof(struct cas_softc)
 2621 };
 2622 
 2623 DRIVER_MODULE(cas, pci, cas_pci_driver, cas_devclass, 0, 0);
 2624 DRIVER_MODULE(miibus, cas, miibus_driver, miibus_devclass, 0, 0);
 2625 MODULE_DEPEND(cas, pci, 1, 1, 1);
 2626 
 2627 static const struct cas_pci_dev {
 2628         uint32_t        cpd_devid;
 2629         uint8_t         cpd_revid;
 2630         int             cpd_variant;
 2631         const char      *cpd_desc;
 2632 } const cas_pci_devlist[] = {
 2633         { 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" },
 2634         { 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" },
 2635         { 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" },
 2636         { 0, 0, 0, NULL }
 2637 };
 2638 
 2639 static int
 2640 cas_pci_probe(device_t dev)
 2641 {
 2642         int i;
 2643 
 2644         for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) {
 2645                 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid &&
 2646                     pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) {
 2647                         device_set_desc(dev, cas_pci_devlist[i].cpd_desc);
 2648                         return (BUS_PROBE_DEFAULT);
 2649                 }
 2650         }
 2651 
 2652         return (ENXIO);
 2653 }
 2654 
 2655 static struct resource_spec cas_pci_res_spec[] = {
 2656         { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE },   /* CAS_RES_INTR */
 2657         { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },     /* CAS_RES_MEM */
 2658         { -1, 0 }
 2659 };
 2660 
 2661 #define CAS_LOCAL_MAC_ADDRESS   "local-mac-address"
 2662 #define CAS_PHY_INTERFACE       "phy-interface"
 2663 #define CAS_PHY_TYPE            "phy-type"
 2664 #define CAS_PHY_TYPE_PCS        "pcs"
 2665 
 2666 static int
 2667 cas_pci_attach(device_t dev)
 2668 {
 2669         char buf[sizeof(CAS_LOCAL_MAC_ADDRESS)];
 2670         struct cas_softc *sc;
 2671         int i;
 2672 #if !(defined(__powerpc__) || defined(__sparc64__))
 2673         u_char enaddr[4][ETHER_ADDR_LEN];
 2674         u_int j, k, lma, pcs[4], phy;
 2675 #endif
 2676 
 2677         sc = device_get_softc(dev);
 2678         sc->sc_variant = CAS_UNKNOWN;
 2679         for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) {
 2680                 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid &&
 2681                     pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) {
 2682                         sc->sc_variant = cas_pci_devlist[i].cpd_variant;
 2683                         break;
 2684                 }
 2685         }
 2686         if (sc->sc_variant == CAS_UNKNOWN) {
 2687                 device_printf(dev, "unknown adaptor\n");
 2688                 return (ENXIO);
 2689         }
 2690 
 2691         pci_enable_busmaster(dev);
 2692 
 2693         sc->sc_dev = dev;
 2694         if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02)
 2695                 /* Hardware checksumming may hang TX. */
 2696                 sc->sc_flags |= CAS_NO_CSUM;
 2697         if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN)
 2698                 sc->sc_flags |= CAS_REG_PLUS;
 2699         if (sc->sc_variant == CAS_CAS ||
 2700             (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11))
 2701                 sc->sc_flags |= CAS_TABORT;
 2702         if (bootverbose)
 2703                 device_printf(dev, "flags=0x%x\n", sc->sc_flags);
 2704 
 2705         if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) {
 2706                 device_printf(dev, "failed to allocate resources\n");
 2707                 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
 2708                 return (ENXIO);
 2709         }
 2710 
 2711         CAS_LOCK_INIT(sc, device_get_nameunit(dev));
 2712 
 2713 #if defined(__powerpc__) || defined(__sparc64__)
 2714         OF_getetheraddr(dev, sc->sc_enaddr);
 2715         if (OF_getprop(ofw_bus_get_node(dev), CAS_PHY_INTERFACE, buf,
 2716             sizeof(buf)) > 0 || OF_getprop(ofw_bus_get_node(dev),
 2717             CAS_PHY_TYPE, buf, sizeof(buf)) > 0) {
 2718                 buf[sizeof(buf) - 1] = '\0';
 2719                 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0)
 2720                         sc->sc_flags |= CAS_SERDES;
 2721         }
 2722 #else
 2723         /*
 2724          * Dig out VPD (vital product data) and read the MAC address as well
 2725          * as the PHY type.  The VPD resides in the PCI Expansion ROM (PCI
 2726          * FCode) and can't be accessed via the PCI capability pointer.
 2727          * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described
 2728          * in the free US Patent 7149820.
 2729          */
 2730 
 2731 #define PCI_ROMHDR_SIZE                 0x1c
 2732 #define PCI_ROMHDR_SIG                  0x00
 2733 #define PCI_ROMHDR_SIG_MAGIC            0xaa55          /* little endian */
 2734 #define PCI_ROMHDR_PTR_DATA             0x18
 2735 #define PCI_ROM_SIZE                    0x18
 2736 #define PCI_ROM_SIG                     0x00
 2737 #define PCI_ROM_SIG_MAGIC               0x52494350      /* "PCIR", endian */
 2738                                                         /* reversed */
 2739 #define PCI_ROM_VENDOR                  0x04
 2740 #define PCI_ROM_DEVICE                  0x06
 2741 #define PCI_ROM_PTR_VPD                 0x08
 2742 #define PCI_VPDRES_BYTE0                0x00
 2743 #define PCI_VPDRES_ISLARGE(x)           ((x) & 0x80)
 2744 #define PCI_VPDRES_LARGE_NAME(x)        ((x) & 0x7f)
 2745 #define PCI_VPDRES_LARGE_LEN_LSB        0x01
 2746 #define PCI_VPDRES_LARGE_LEN_MSB        0x02
 2747 #define PCI_VPDRES_LARGE_SIZE           0x03
 2748 #define PCI_VPDRES_TYPE_ID_STRING       0x02            /* large */
 2749 #define PCI_VPDRES_TYPE_VPD             0x10            /* large */
 2750 #define PCI_VPD_KEY0                    0x00
 2751 #define PCI_VPD_KEY1                    0x01
 2752 #define PCI_VPD_LEN                     0x02
 2753 #define PCI_VPD_SIZE                    0x03
 2754 
 2755 #define CAS_ROM_READ_1(sc, offs)                                        \
 2756         CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs))
 2757 #define CAS_ROM_READ_2(sc, offs)                                        \
 2758         CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs))
 2759 #define CAS_ROM_READ_4(sc, offs)                                        \
 2760         CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs))
 2761 
 2762         lma = phy = 0;
 2763         memset(enaddr, 0, sizeof(enaddr));
 2764         memset(pcs, 0, sizeof(pcs));
 2765 
 2766         /* Enable PCI Expansion ROM access. */
 2767         CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN,
 2768             CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM);
 2769 
 2770         /* Read PCI Expansion ROM header. */
 2771         if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC ||
 2772             (i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) <
 2773             PCI_ROMHDR_SIZE) {
 2774                 device_printf(dev, "unexpected PCI Expansion ROM header\n");
 2775                 goto fail_prom;
 2776         }
 2777 
 2778         /* Read PCI Expansion ROM data. */
 2779         if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC ||
 2780             CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) ||
 2781             CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) ||
 2782             (j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) <
 2783             i + PCI_ROM_SIZE) {
 2784                 device_printf(dev, "unexpected PCI Expansion ROM data\n");
 2785                 goto fail_prom;
 2786         }
 2787 
 2788         /* Read PCI VPD. */
 2789  next:
 2790         if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc,
 2791             j + PCI_VPDRES_BYTE0)) == 0) {
 2792                 device_printf(dev, "no large PCI VPD\n");
 2793                 goto fail_prom;
 2794         }
 2795 
 2796         i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) |
 2797             CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB);
 2798         switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc,
 2799             j + PCI_VPDRES_BYTE0))) {
 2800         case PCI_VPDRES_TYPE_ID_STRING:
 2801                 /* Skip identifier string. */
 2802                 j += PCI_VPDRES_LARGE_SIZE + i;
 2803                 goto next;
 2804         case PCI_VPDRES_TYPE_VPD:
 2805                 for (j += PCI_VPDRES_LARGE_SIZE; i > 0;
 2806                     i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN),
 2807                     j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) {
 2808                         if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z')
 2809                                 /* no Enhanced VPD */
 2810                                 continue;
 2811                         if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I')
 2812                                 /* no instance property */
 2813                                 continue;
 2814                         if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'B') {
 2815                                 /* byte array */
 2816                                 if (CAS_ROM_READ_1(sc,
 2817                                     j + PCI_VPD_SIZE + 4) != ETHER_ADDR_LEN)
 2818                                         continue;
 2819                                 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
 2820                                     CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5,
 2821                                     buf, sizeof(buf));
 2822                                 buf[sizeof(buf) - 1] = '\0';
 2823                                 if (strcmp(buf, CAS_LOCAL_MAC_ADDRESS) != 0)
 2824                                         continue;
 2825                                 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
 2826                                     CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE +
 2827                                     5 + sizeof(CAS_LOCAL_MAC_ADDRESS),
 2828                                     enaddr[lma], sizeof(enaddr[lma]));
 2829                                 lma++;
 2830                                 if (lma == 4 && phy == 4)
 2831                                         break;
 2832                         } else if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) ==
 2833                            'S') {
 2834                                 /* string */
 2835                                 if (CAS_ROM_READ_1(sc,
 2836                                     j + PCI_VPD_SIZE + 4) !=
 2837                                     sizeof(CAS_PHY_TYPE_PCS))
 2838                                         continue;
 2839                                 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
 2840                                     CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5,
 2841                                     buf, sizeof(buf));
 2842                                 buf[sizeof(buf) - 1] = '\0';
 2843                                 if (strcmp(buf, CAS_PHY_INTERFACE) == 0)
 2844                                         k = sizeof(CAS_PHY_INTERFACE);
 2845                                 else if (strcmp(buf, CAS_PHY_TYPE) == 0)
 2846                                         k = sizeof(CAS_PHY_TYPE);
 2847                                 else
 2848                                         continue;
 2849                                 bus_read_region_1(sc->sc_res[CAS_RES_MEM],
 2850                                     CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE +
 2851                                     5 + k, buf, sizeof(buf));
 2852                                 buf[sizeof(buf) - 1] = '\0';
 2853                                 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0)
 2854                                         pcs[phy] = 1;
 2855                                 phy++;
 2856                                 if (lma == 4 && phy == 4)
 2857                                         break;
 2858                         }
 2859                 }
 2860                 break;
 2861         default:
 2862                 device_printf(dev, "unexpected PCI VPD\n");
 2863                 goto fail_prom;
 2864         }
 2865 
 2866  fail_prom:
 2867         CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0);
 2868 
 2869         if (lma == 0) {
 2870                 device_printf(dev, "could not determine Ethernet address\n");
 2871                 goto fail;
 2872         }
 2873         i = 0;
 2874         if (lma > 1 && pci_get_slot(dev) < sizeof(enaddr) / sizeof(*enaddr))
 2875                 i = pci_get_slot(dev);
 2876         memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN);
 2877 
 2878         if (phy == 0) {
 2879                 device_printf(dev, "could not determine PHY type\n");
 2880                 goto fail;
 2881         }
 2882         i = 0;
 2883         if (phy > 1 && pci_get_slot(dev) < sizeof(pcs) / sizeof(*pcs))
 2884                 i = pci_get_slot(dev);
 2885         if (pcs[i] != 0)
 2886                 sc->sc_flags |= CAS_SERDES;
 2887 #endif
 2888 
 2889         if (cas_attach(sc) != 0) {
 2890                 device_printf(dev, "could not be attached\n");
 2891                 goto fail;
 2892         }
 2893 
 2894         if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET |
 2895             INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) {
 2896                 device_printf(dev, "failed to set up interrupt\n");
 2897                 cas_detach(sc);
 2898                 goto fail;
 2899         }
 2900         return (0);
 2901 
 2902  fail:
 2903         CAS_LOCK_DESTROY(sc);
 2904         bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
 2905         return (ENXIO);
 2906 }
 2907 
 2908 static int
 2909 cas_pci_detach(device_t dev)
 2910 {
 2911         struct cas_softc *sc;
 2912 
 2913         sc = device_get_softc(dev);
 2914         bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih);
 2915         cas_detach(sc);
 2916         CAS_LOCK_DESTROY(sc);
 2917         bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
 2918         return (0);
 2919 }
 2920 
 2921 static int
 2922 cas_pci_suspend(device_t dev)
 2923 {
 2924 
 2925         cas_suspend(device_get_softc(dev));
 2926         return (0);
 2927 }
 2928 
 2929 static int
 2930 cas_pci_resume(device_t dev)
 2931 {
 2932 
 2933         cas_resume(device_get_softc(dev));
 2934         return (0);
 2935 }

Cache object: e71593c73323022ce8f4dc17ebd46b52


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.