The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/if_dge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_dge.c,v 1.1.2.2 2004/04/16 08:02:53 tron Exp $ */
    2 
    3 /*
    4  * Copyright (c) 2004, SUNET, Swedish University Computer Network.
    5  * All rights reserved.
    6  *
    7  * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed for the NetBSD Project by
   20  *      SUNET, Swedish University Computer Network.
   21  * 4. The name of SUNET may not be used to endorse or promote products
   22  *    derived from this software without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   34  * POSSIBILITY OF SUCH DAMAGE.
   35  */
   36 
   37 /*
   38  * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
   39  * All rights reserved.
   40  *
   41  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
   42  *
   43  * Redistribution and use in source and binary forms, with or without
   44  * modification, are permitted provided that the following conditions
   45  * are met:
   46  * 1. Redistributions of source code must retain the above copyright
   47  *    notice, this list of conditions and the following disclaimer.
   48  * 2. Redistributions in binary form must reproduce the above copyright
   49  *    notice, this list of conditions and the following disclaimer in the
   50  *    documentation and/or other materials provided with the distribution.
   51  * 3. All advertising materials mentioning features or use of this software
   52  *    must display the following acknowledgement:
   53  *      This product includes software developed for the NetBSD Project by
   54  *      Wasabi Systems, Inc.
   55  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   56  *    or promote products derived from this software without specific prior
   57  *    written permission.
   58  *
   59  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   61  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   62  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   69  * POSSIBILITY OF SUCH DAMAGE.
   70  */
   71 
   72 /*
   73  * Device driver for the Intel 82597EX Ten Gigabit Ethernet controller.
   74  *
   75  * TODO (in no specific order):
   76  *      HW VLAN support.
   77  *      TSE offloading (needs kernel changes...)
   78  *      RAIDC (receive interrupt delay adaptation)
   79  *      Use memory > 4GB.
   80  */
   81 
   82 #include <sys/cdefs.h>
   83 __KERNEL_RCSID(0, "$NetBSD: if_dge.c,v 1.1.2.2 2004/04/16 08:02:53 tron Exp $");
   84 
   85 #include "bpfilter.h"
   86 #include "rnd.h"
   87 
   88 #include <sys/param.h>
   89 #include <sys/systm.h>
   90 #include <sys/callout.h> 
   91 #include <sys/mbuf.h>
   92 #include <sys/malloc.h>
   93 #include <sys/kernel.h>
   94 #include <sys/socket.h>
   95 #include <sys/ioctl.h>
   96 #include <sys/errno.h>
   97 #include <sys/device.h>
   98 #include <sys/queue.h>
   99 
  100 #include <uvm/uvm_extern.h>             /* for PAGE_SIZE */
  101 
  102 #if NRND > 0
  103 #include <sys/rnd.h>
  104 #endif
  105 
  106 #include <net/if.h>
  107 #include <net/if_dl.h> 
  108 #include <net/if_media.h>
  109 #include <net/if_ether.h>
  110 
  111 #if NBPFILTER > 0 
  112 #include <net/bpf.h>
  113 #endif
  114 
  115 #include <netinet/in.h>                 /* XXX for struct ip */
  116 #include <netinet/in_systm.h>           /* XXX for struct ip */
  117 #include <netinet/ip.h>                 /* XXX for struct ip */
  118 #include <netinet/tcp.h>                /* XXX for struct tcphdr */
  119 
  120 #include <machine/bus.h>
  121 #include <machine/intr.h>
  122 #include <machine/endian.h>
  123 
  124 #include <dev/mii/mii.h>
  125 #include <dev/mii/miivar.h>
  126 #include <dev/mii/mii_bitbang.h>
  127 
  128 #include <dev/pci/pcireg.h>
  129 #include <dev/pci/pcivar.h>
  130 #include <dev/pci/pcidevs.h>
  131 
  132 #include <dev/pci/if_dgereg.h>
  133 
  134 /*
  135  * The receive engine may sometimes become off-by-one when writing back
  136  * chained descriptors.  Avoid this by allocating a large chunk of
  137  * memory and use if instead (to avoid chained descriptors).
  138  * This only happens with chained descriptors under heavy load.
  139  */
  140 #define DGE_OFFBYONE_RXBUG
  141 
  142 #define DGE_EVENT_COUNTERS
  143 #define DGE_DEBUG
  144 
  145 #ifdef DGE_DEBUG
  146 #define DGE_DEBUG_LINK          0x01
  147 #define DGE_DEBUG_TX            0x02
  148 #define DGE_DEBUG_RX            0x04
  149 #define DGE_DEBUG_CKSUM         0x08
  150 int     dge_debug = 0;
  151 
  152 #define DPRINTF(x, y)   if (dge_debug & (x)) printf y
  153 #else
  154 #define DPRINTF(x, y)   /* nothing */
  155 #endif /* DGE_DEBUG */
  156 
  157 /*
  158  * Transmit descriptor list size. We allow up to 100 DMA segments per
  159  * packet (Intel reports of jumbo frame packets with as
  160  * many as 80 DMA segments when using 16k buffers).
  161  */
  162 #define DGE_NTXSEGS             100
  163 #define DGE_IFQUEUELEN          20000
  164 #define DGE_TXQUEUELEN          2048
  165 #define DGE_TXQUEUELEN_MASK     (DGE_TXQUEUELEN - 1)
  166 #define DGE_TXQUEUE_GC          (DGE_TXQUEUELEN / 8)
  167 #define DGE_NTXDESC             1024
  168 #define DGE_NTXDESC_MASK                (DGE_NTXDESC - 1)
  169 #define DGE_NEXTTX(x)           (((x) + 1) & DGE_NTXDESC_MASK)
  170 #define DGE_NEXTTXS(x)          (((x) + 1) & DGE_TXQUEUELEN_MASK)
  171 
  172 /*
  173  * Receive descriptor list size.
  174  * Packet is of size MCLBYTES, and for jumbo packets buffers may
  175  * be chained.  Due to the nature of the card (high-speed), keep this
  176  * ring large. With 2k buffers the ring can store 400 jumbo packets,
  177  * which at full speed will be received in just under 3ms.
  178  */
  179 #define DGE_NRXDESC             2048
  180 #define DGE_NRXDESC_MASK        (DGE_NRXDESC - 1)
  181 #define DGE_NEXTRX(x)           (((x) + 1) & DGE_NRXDESC_MASK)
  182 /*
  183  * # of descriptors between head and written descriptors.
  184  * This is to work-around two erratas.
  185  */
  186 #define DGE_RXSPACE             10
  187 #define DGE_PREVRX(x)           (((x) - DGE_RXSPACE) & DGE_NRXDESC_MASK)
  188 /*
  189  * Receive descriptor fetch threshholds. These are values recommended
  190  * by Intel, do not touch them unless you know what you are doing.
  191  */
  192 #define RXDCTL_PTHRESH_VAL      128
  193 #define RXDCTL_HTHRESH_VAL      16
  194 #define RXDCTL_WTHRESH_VAL      16
  195 
  196 
  197 /*
  198  * Tweakable parameters; default values.
  199  */
  200 #define FCRTH   0x30000 /* Send XOFF water mark */
  201 #define FCRTL   0x28000 /* Send XON water mark */
  202 #define RDTR    0x20    /* Interrupt delay after receive, .8192us units */
  203 #define TIDV    0x20    /* Interrupt delay after send, .8192us units */
  204 
  205 /*
  206  * Control structures are DMA'd to the i82597 chip.  We allocate them in
  207  * a single clump that maps to a single DMA segment to make serveral things
  208  * easier.
  209  */
  210 struct dge_control_data {
  211         /*
  212          * The transmit descriptors.
  213          */
  214         struct dge_tdes wcd_txdescs[DGE_NTXDESC];
  215 
  216         /*
  217          * The receive descriptors.
  218          */
  219         struct dge_rdes wcd_rxdescs[DGE_NRXDESC];
  220 };
  221 
  222 #define DGE_CDOFF(x)    offsetof(struct dge_control_data, x)
  223 #define DGE_CDTXOFF(x)  DGE_CDOFF(wcd_txdescs[(x)])
  224 #define DGE_CDRXOFF(x)  DGE_CDOFF(wcd_rxdescs[(x)])
  225 
  226 /*
  227  * The DGE interface have a higher max MTU size than normal jumbo frames.
  228  */
  229 #define DGE_MAX_MTU     16288   /* Max MTU size for this interface */
  230 
  231 /*
  232  * Software state for transmit jobs.
  233  */
  234 struct dge_txsoft {
  235         struct mbuf *txs_mbuf;          /* head of our mbuf chain */
  236         bus_dmamap_t txs_dmamap;        /* our DMA map */
  237         int txs_firstdesc;              /* first descriptor in packet */
  238         int txs_lastdesc;               /* last descriptor in packet */
  239         int txs_ndesc;                  /* # of descriptors used */
  240 };
  241 
  242 /*
  243  * Software state for receive buffers.  Each descriptor gets a
  244  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
  245  * more than one buffer, we chain them together.
  246  */
  247 struct dge_rxsoft {
  248         struct mbuf *rxs_mbuf;          /* head of our mbuf chain */
  249         bus_dmamap_t rxs_dmamap;        /* our DMA map */
  250 };
  251 
  252 /*
  253  * Software state per device.
  254  */
  255 struct dge_softc {
  256         struct device sc_dev;           /* generic device information */
  257         bus_space_tag_t sc_st;          /* bus space tag */
  258         bus_space_handle_t sc_sh;       /* bus space handle */
  259         bus_dma_tag_t sc_dmat;          /* bus DMA tag */
  260         struct ethercom sc_ethercom;    /* ethernet common data */
  261         void *sc_sdhook;                /* shutdown hook */
  262 
  263         int sc_flags;                   /* flags; see below */
  264         int sc_bus_speed;               /* PCI/PCIX bus speed */
  265         int sc_pcix_offset;             /* PCIX capability register offset */
  266 
  267         pci_chipset_tag_t sc_pc;
  268         pcitag_t sc_pt;
  269         int sc_mmrbc;                   /* Max PCIX memory read byte count */
  270 
  271         void *sc_ih;                    /* interrupt cookie */
  272 
  273         struct ifmedia sc_media;
  274 
  275         bus_dmamap_t sc_cddmamap;       /* control data DMA map */
  276 #define sc_cddma        sc_cddmamap->dm_segs[0].ds_addr
  277 
  278         int             sc_align_tweak;
  279 
  280         /*
  281          * Software state for the transmit and receive descriptors.
  282          */
  283         struct dge_txsoft sc_txsoft[DGE_TXQUEUELEN];
  284         struct dge_rxsoft sc_rxsoft[DGE_NRXDESC];
  285 
  286         /*
  287          * Control data structures.
  288          */
  289         struct dge_control_data *sc_control_data;
  290 #define sc_txdescs      sc_control_data->wcd_txdescs
  291 #define sc_rxdescs      sc_control_data->wcd_rxdescs
  292 
  293 #ifdef DGE_EVENT_COUNTERS
  294         /* Event counters. */
  295         struct evcnt sc_ev_txsstall;    /* Tx stalled due to no txs */
  296         struct evcnt sc_ev_txdstall;    /* Tx stalled due to no txd */
  297         struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
  298         struct evcnt sc_ev_txdw;        /* Tx descriptor interrupts */
  299         struct evcnt sc_ev_txqe;        /* Tx queue empty interrupts */
  300         struct evcnt sc_ev_rxintr;      /* Rx interrupts */
  301         struct evcnt sc_ev_linkintr;    /* Link interrupts */
  302 
  303         struct evcnt sc_ev_rxipsum;     /* IP checksums checked in-bound */
  304         struct evcnt sc_ev_rxtusum;     /* TCP/UDP cksums checked in-bound */
  305         struct evcnt sc_ev_txipsum;     /* IP checksums comp. out-bound */
  306         struct evcnt sc_ev_txtusum;     /* TCP/UDP cksums comp. out-bound */
  307 
  308         struct evcnt sc_ev_txctx_init;  /* Tx cksum context cache initialized */
  309         struct evcnt sc_ev_txctx_hit;   /* Tx cksum context cache hit */
  310         struct evcnt sc_ev_txctx_miss;  /* Tx cksum context cache miss */
  311 
  312         struct evcnt sc_ev_txseg[DGE_NTXSEGS]; /* Tx packets w/ N segments */
  313         struct evcnt sc_ev_txdrop;      /* Tx packets dropped (too many segs) */
  314 #endif /* DGE_EVENT_COUNTERS */
  315 
  316         int     sc_txfree;              /* number of free Tx descriptors */
  317         int     sc_txnext;              /* next ready Tx descriptor */
  318 
  319         int     sc_txsfree;             /* number of free Tx jobs */
  320         int     sc_txsnext;             /* next free Tx job */
  321         int     sc_txsdirty;            /* dirty Tx jobs */
  322 
  323         uint32_t sc_txctx_ipcs;         /* cached Tx IP cksum ctx */
  324         uint32_t sc_txctx_tucs;         /* cached Tx TCP/UDP cksum ctx */
  325 
  326         int     sc_rxptr;               /* next ready Rx descriptor/queue ent */
  327         int     sc_rxdiscard;
  328         int     sc_rxlen;
  329         struct mbuf *sc_rxhead;
  330         struct mbuf *sc_rxtail;
  331         struct mbuf **sc_rxtailp;
  332 
  333         uint32_t sc_ctrl0;              /* prototype CTRL0 register */
  334         uint32_t sc_icr;                /* prototype interrupt bits */
  335         uint32_t sc_tctl;               /* prototype TCTL register */
  336         uint32_t sc_rctl;               /* prototype RCTL register */
  337 
  338         int sc_mchash_type;             /* multicast filter offset */
  339 
  340         uint16_t sc_eeprom[EEPROM_SIZE];
  341 
  342 #if NRND > 0
  343         rndsource_element_t rnd_source; /* random source */
  344 #endif
  345 #ifdef DGE_OFFBYONE_RXBUG
  346         caddr_t sc_bugbuf;
  347         SLIST_HEAD(, rxbugentry) sc_buglist;
  348         bus_dmamap_t sc_bugmap;
  349         struct rxbugentry *sc_entry;
  350 #endif
  351 };
  352 
  353 #define DGE_RXCHAIN_RESET(sc)                                           \
  354 do {                                                                    \
  355         (sc)->sc_rxtailp = &(sc)->sc_rxhead;                            \
  356         *(sc)->sc_rxtailp = NULL;                                       \
  357         (sc)->sc_rxlen = 0;                                             \
  358 } while (/*CONSTCOND*/0)
  359 
  360 #define DGE_RXCHAIN_LINK(sc, m)                                         \
  361 do {                                                                    \
  362         *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);                      \
  363         (sc)->sc_rxtailp = &(m)->m_next;                                \
  364 } while (/*CONSTCOND*/0)
  365 
  366 /* sc_flags */
  367 #define DGE_F_BUS64             0x20    /* bus is 64-bit */
  368 #define DGE_F_PCIX              0x40    /* bus is PCI-X */
  369 
  370 #ifdef DGE_EVENT_COUNTERS
  371 #define DGE_EVCNT_INCR(ev)      (ev)->ev_count++
  372 #else
  373 #define DGE_EVCNT_INCR(ev)      /* nothing */
  374 #endif
  375 
  376 #define CSR_READ(sc, reg)                                               \
  377         bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
  378 #define CSR_WRITE(sc, reg, val)                                         \
  379         bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
  380 
  381 #define DGE_CDTXADDR(sc, x)     ((sc)->sc_cddma + DGE_CDTXOFF((x)))
  382 #define DGE_CDRXADDR(sc, x)     ((sc)->sc_cddma + DGE_CDRXOFF((x)))
  383 
  384 #define DGE_CDTXSYNC(sc, x, n, ops)                                     \
  385 do {                                                                    \
  386         int __x, __n;                                                   \
  387                                                                         \
  388         __x = (x);                                                      \
  389         __n = (n);                                                      \
  390                                                                         \
  391         /* If it will wrap around, sync to the end of the ring. */      \
  392         if ((__x + __n) > DGE_NTXDESC) {                                        \
  393                 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,       \
  394                     DGE_CDTXOFF(__x), sizeof(struct dge_tdes) *         \
  395                     (DGE_NTXDESC - __x), (ops));                                \
  396                 __n -= (DGE_NTXDESC - __x);                             \
  397                 __x = 0;                                                \
  398         }                                                               \
  399                                                                         \
  400         /* Now sync whatever is left. */                                \
  401         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  402             DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * __n, (ops));    \
  403 } while (/*CONSTCOND*/0)
  404 
  405 #define DGE_CDRXSYNC(sc, x, ops)                                                \
  406 do {                                                                    \
  407         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  408            DGE_CDRXOFF((x)), sizeof(struct dge_rdes), (ops));           \
  409 } while (/*CONSTCOND*/0)
  410 
  411 #ifdef DGE_OFFBYONE_RXBUG
  412 #define DGE_INIT_RXDESC(sc, x)                                          \
  413 do {                                                                    \
  414         struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];               \
  415         struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)];                \
  416         struct mbuf *__m = __rxs->rxs_mbuf;                             \
  417                                                                         \
  418         __rxd->dr_baddrl = htole32(sc->sc_bugmap->dm_segs[0].ds_addr +  \
  419             (mtod((__m), char *) - (char *)sc->sc_bugbuf));             \
  420         __rxd->dr_baddrh = 0;                                           \
  421         __rxd->dr_len = 0;                                              \
  422         __rxd->dr_cksum = 0;                                            \
  423         __rxd->dr_status = 0;                                           \
  424         __rxd->dr_errors = 0;                                           \
  425         __rxd->dr_special = 0;                                          \
  426         DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
  427                                                                         \
  428         CSR_WRITE((sc), DGE_RDT, (x));                                  \
  429 } while (/*CONSTCOND*/0)
  430 #else
  431 #define DGE_INIT_RXDESC(sc, x)                                          \
  432 do {                                                                    \
  433         struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];               \
  434         struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)];                \
  435         struct mbuf *__m = __rxs->rxs_mbuf;                             \
  436                                                                         \
  437         /*                                                              \
  438          * Note: We scoot the packet forward 2 bytes in the buffer      \
  439          * so that the payload after the Ethernet header is aligned     \
  440          * to a 4-byte boundary.                                        \
  441          *                                                              \
  442          * XXX BRAINDAMAGE ALERT!                                       \
  443          * The stupid chip uses the same size for every buffer, which   \
  444          * is set in the Receive Control register.  We are using the 2K \
  445          * size option, but what we REALLY want is (2K - 2)!  For this  \
  446          * reason, we can't "scoot" packets longer than the standard    \
  447          * Ethernet MTU.  On strict-alignment platforms, if the total   \
  448          * size exceeds (2K - 2) we set align_tweak to 0 and let        \
  449          * the upper layer copy the headers.                            \
  450          */                                                             \
  451         __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;        \
  452                                                                         \
  453         __rxd->dr_baddrl =                                      \
  454             htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr +             \
  455                 (sc)->sc_align_tweak);                                  \
  456         __rxd->dr_baddrh = 0;                                   \
  457         __rxd->dr_len = 0;                                              \
  458         __rxd->dr_cksum = 0;                                            \
  459         __rxd->dr_status = 0;                                           \
  460         __rxd->dr_errors = 0;                                           \
  461         __rxd->dr_special = 0;                                          \
  462         DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
  463                                                                         \
  464         CSR_WRITE((sc), DGE_RDT, (x));                                  \
  465 } while (/*CONSTCOND*/0)
  466 #endif
  467 
  468 #ifdef DGE_OFFBYONE_RXBUG
  469 /*
  470  * Allocation constants.  Much memory may be used for this.
  471  */
  472 #ifndef DGE_BUFFER_SIZE
  473 #define DGE_BUFFER_SIZE DGE_MAX_MTU
  474 #endif
  475 #define DGE_NBUFFERS    (4*DGE_NRXDESC)
  476 #define DGE_RXMEM       (DGE_NBUFFERS*DGE_BUFFER_SIZE)
  477 
  478 struct rxbugentry {
  479         SLIST_ENTRY(rxbugentry) rb_entry;
  480         int rb_slot;
  481 };
  482 
  483 static int
  484 dge_alloc_rcvmem(struct dge_softc *sc)
  485 {
  486         caddr_t ptr, kva;
  487         bus_dma_segment_t seg;
  488         int i, rseg, state, error;
  489         struct rxbugentry *entry;
  490 
  491         state = error = 0;
  492 
  493         if (bus_dmamem_alloc(sc->sc_dmat, DGE_RXMEM, PAGE_SIZE, 0,
  494              &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
  495                 printf("%s: can't alloc rx buffers\n", sc->sc_dev.dv_xname);
  496                 return ENOBUFS;
  497         }
  498 
  499         state = 1;
  500         if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, DGE_RXMEM, &kva,
  501             BUS_DMA_NOWAIT)) {
  502                 printf("%s: can't map DMA buffers (%d bytes)\n",
  503                     sc->sc_dev.dv_xname, (int)DGE_RXMEM);
  504                 error = ENOBUFS;
  505                 goto out;
  506         }
  507 
  508         state = 2;
  509         if (bus_dmamap_create(sc->sc_dmat, DGE_RXMEM, 1, DGE_RXMEM, 0,
  510             BUS_DMA_NOWAIT, &sc->sc_bugmap)) {
  511                 printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname);
  512                 error = ENOBUFS;
  513                 goto out;
  514         }
  515 
  516         state = 3;
  517         if (bus_dmamap_load(sc->sc_dmat, sc->sc_bugmap,
  518             kva, DGE_RXMEM, NULL, BUS_DMA_NOWAIT)) {
  519                 printf("%s: can't load DMA map\n", sc->sc_dev.dv_xname);
  520                 error = ENOBUFS;
  521                 goto out;
  522         }
  523 
  524         state = 4;
  525         sc->sc_bugbuf = (caddr_t)kva;
  526         SLIST_INIT(&sc->sc_buglist);
  527 
  528         /*
  529          * Now divide it up into DGE_BUFFER_SIZE pieces and save the addresses
  530          * in an array.
  531          */
  532         ptr = sc->sc_bugbuf;
  533         if ((entry = malloc(sizeof(*entry) * DGE_NBUFFERS,
  534             M_DEVBUF, M_NOWAIT)) == NULL) {
  535                 error = ENOBUFS;
  536                 goto out;
  537         }
  538         sc->sc_entry = entry;
  539         for (i = 0; i < DGE_NBUFFERS; i++) {
  540                 entry[i].rb_slot = i;
  541                 SLIST_INSERT_HEAD(&sc->sc_buglist, &entry[i], rb_entry);
  542         }
  543 out:
  544         if (error != 0) {
  545                 switch (state) {
  546                 case 4:
  547                         bus_dmamap_unload(sc->sc_dmat, sc->sc_bugmap);
  548                 case 3:
  549                         bus_dmamap_destroy(sc->sc_dmat, sc->sc_bugmap);
  550                 case 2:
  551                         bus_dmamem_unmap(sc->sc_dmat, kva, DGE_RXMEM);
  552                 case 1:
  553                         bus_dmamem_free(sc->sc_dmat, &seg, rseg);
  554                         break;
  555                 default:
  556                         break;
  557                 }
  558         }
  559 
  560         return error;
  561 }
  562 
  563 /*
  564  * Allocate a jumbo buffer.
  565  */
  566 static void *
  567 dge_getbuf(struct dge_softc *sc)
  568 {
  569         struct rxbugentry *entry;
  570 
  571         entry = SLIST_FIRST(&sc->sc_buglist);
  572 
  573         if (entry == NULL) {
  574                 printf("%s: no free RX buffers\n", sc->sc_dev.dv_xname);
  575                 return(NULL);
  576         }
  577 
  578         SLIST_REMOVE_HEAD(&sc->sc_buglist, rb_entry);
  579         return sc->sc_bugbuf + entry->rb_slot * DGE_BUFFER_SIZE;
  580 }
  581 
  582 /*
  583  * Release a jumbo buffer.
  584  */
  585 static void
  586 dge_freebuf(struct mbuf *m, caddr_t buf, size_t size, void *arg)
  587 {
  588         struct rxbugentry *entry;
  589         struct dge_softc *sc;
  590         int i, s;
  591 
  592         /* Extract the softc struct pointer. */
  593         sc = (struct dge_softc *)arg;
  594 
  595         if (sc == NULL)
  596                 panic("dge_freebuf: can't find softc pointer!");
  597 
  598         /* calculate the slot this buffer belongs to */
  599 
  600         i = (buf - sc->sc_bugbuf) / DGE_BUFFER_SIZE;
  601 
  602         if ((i < 0) || (i >= DGE_NBUFFERS))
  603                 panic("dge_freebuf: asked to free buffer %d!", i);
  604 
  605         s = splvm();
  606         entry = sc->sc_entry + i;
  607         SLIST_INSERT_HEAD(&sc->sc_buglist, entry, rb_entry);
  608 
  609         if (__predict_true(m != NULL))
  610                 pool_cache_put(&mbpool_cache, m);
  611         splx(s);
  612 }
  613 #endif
  614 
  615 static void     dge_start(struct ifnet *);
  616 static void     dge_watchdog(struct ifnet *);
  617 static int      dge_ioctl(struct ifnet *, u_long, caddr_t);
  618 static int      dge_init(struct ifnet *);
  619 static void     dge_stop(struct ifnet *, int);
  620 
  621 static void     dge_shutdown(void *);
  622 
  623 static void     dge_reset(struct dge_softc *);
  624 static void     dge_rxdrain(struct dge_softc *);
  625 static int      dge_add_rxbuf(struct dge_softc *, int);
  626 
  627 static void     dge_set_filter(struct dge_softc *);
  628 
  629 static int      dge_intr(void *);
  630 static void     dge_txintr(struct dge_softc *);
  631 static void     dge_rxintr(struct dge_softc *);
  632 static void     dge_linkintr(struct dge_softc *, uint32_t);
  633 
  634 static int      dge_match(struct device *, struct cfdata *, void *);
  635 static void     dge_attach(struct device *, struct device *, void *);
  636 
  637 static int      dge_read_eeprom(struct dge_softc *sc);
  638 static int      dge_eeprom_clockin(struct dge_softc *sc);
  639 static void     dge_eeprom_clockout(struct dge_softc *sc, int bit);
  640 static uint16_t dge_eeprom_word(struct dge_softc *sc, int addr);
  641 static int      dge_xgmii_mediachange(struct ifnet *);
  642 static void     dge_xgmii_mediastatus(struct ifnet *, struct ifmediareq *);
  643 static void     dge_xgmii_reset(struct dge_softc *);
  644 static void     dge_xgmii_writereg(struct device *, int, int, int);
  645 
  646 
  647 CFATTACH_DECL(dge, sizeof(struct dge_softc),
  648     dge_match, dge_attach, NULL, NULL);
  649 
  650 #ifdef DGE_EVENT_COUNTERS
  651 #if DGE_NTXSEGS > 100
  652 #error Update dge_txseg_evcnt_names
  653 #endif
  654 static char (*dge_txseg_evcnt_names)[DGE_NTXSEGS][8 /* "txseg00" + \0 */];
  655 #endif /* DGE_EVENT_COUNTERS */
  656 
  657 static int
  658 dge_match(struct device *parent, struct cfdata *cf, void *aux)
  659 {
  660         struct pci_attach_args *pa = aux;
  661 
  662         if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
  663             PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82597EX)
  664                 return (1);
  665 
  666         return (0);
  667 }
  668 
  669 static void
  670 dge_attach(struct device *parent, struct device *self, void *aux)
  671 {
  672         struct dge_softc *sc = (void *) self;
  673         struct pci_attach_args *pa = aux;
  674         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  675         pci_chipset_tag_t pc = pa->pa_pc;
  676         pci_intr_handle_t ih;
  677         const char *intrstr = NULL;
  678         bus_dma_segment_t seg;
  679         int i, rseg, error;
  680         uint8_t enaddr[ETHER_ADDR_LEN];
  681         pcireg_t preg, memtype;
  682         uint32_t reg;
  683 
  684         sc->sc_dmat = pa->pa_dmat;
  685         sc->sc_pc = pa->pa_pc;
  686         sc->sc_pt = pa->pa_tag;
  687 
  688         preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
  689         aprint_naive(": Ethernet controller\n");
  690         aprint_normal(": Intel i82597EX 10GbE-LR Ethernet, rev. %d\n", preg);
  691 
  692         memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, DGE_PCI_BAR);
  693         if (pci_mapreg_map(pa, DGE_PCI_BAR, memtype, 0,
  694             &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
  695                 aprint_error("%s: unable to map device registers\n",
  696                     sc->sc_dev.dv_xname);
  697                 return;
  698         }
  699 
  700         /* Enable bus mastering */
  701         preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
  702         preg |= PCI_COMMAND_MASTER_ENABLE;
  703         pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
  704 
  705         /*
  706          * Map and establish our interrupt.
  707          */
  708         if (pci_intr_map(pa, &ih)) {
  709                 aprint_error("%s: unable to map interrupt\n",
  710                     sc->sc_dev.dv_xname);
  711                 return;
  712         }
  713         intrstr = pci_intr_string(pc, ih);
  714         sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, dge_intr, sc);
  715         if (sc->sc_ih == NULL) {
  716                 aprint_error("%s: unable to establish interrupt",
  717                     sc->sc_dev.dv_xname);
  718                 if (intrstr != NULL)
  719                         aprint_normal(" at %s", intrstr);
  720                 aprint_normal("\n");
  721                 return;
  722         }
  723         aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
  724 
  725         /*
  726          * Determine a few things about the bus we're connected to.
  727          */
  728         reg = CSR_READ(sc, DGE_STATUS);
  729         if (reg & STATUS_BUS64)
  730                 sc->sc_flags |= DGE_F_BUS64;
  731 
  732         sc->sc_flags |= DGE_F_PCIX;
  733         if (pci_get_capability(pa->pa_pc, pa->pa_tag,
  734                                PCI_CAP_PCIX,
  735                                &sc->sc_pcix_offset, NULL) == 0)
  736                 aprint_error("%s: unable to find PCIX "
  737                     "capability\n", sc->sc_dev.dv_xname);
  738 
  739         if (sc->sc_flags & DGE_F_PCIX) {
  740                 switch (reg & STATUS_PCIX_MSK) {
  741                 case STATUS_PCIX_66:
  742                         sc->sc_bus_speed = 66;
  743                         break;
  744                 case STATUS_PCIX_100:
  745                         sc->sc_bus_speed = 100;
  746                         break;
  747                 case STATUS_PCIX_133:
  748                         sc->sc_bus_speed = 133;
  749                         break;
  750                 default:
  751                         aprint_error(
  752                             "%s: unknown PCIXSPD %d; assuming 66MHz\n",
  753                             sc->sc_dev.dv_xname,
  754                             reg & STATUS_PCIX_MSK);
  755                         sc->sc_bus_speed = 66;
  756                 }
  757         } else
  758                 sc->sc_bus_speed = (reg & STATUS_BUS64) ? 66 : 33;
  759         aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
  760             (sc->sc_flags & DGE_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
  761             (sc->sc_flags & DGE_F_PCIX) ? "PCIX" : "PCI");
  762 
  763         /*
  764          * Allocate the control data structures, and create and load the
  765          * DMA map for it.
  766          */
  767         if ((error = bus_dmamem_alloc(sc->sc_dmat,
  768             sizeof(struct dge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
  769             0)) != 0) {
  770                 aprint_error(
  771                     "%s: unable to allocate control data, error = %d\n",
  772                     sc->sc_dev.dv_xname, error);
  773                 goto fail_0;
  774         }
  775 
  776         if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
  777             sizeof(struct dge_control_data), (caddr_t *)&sc->sc_control_data,
  778             0)) != 0) {
  779                 aprint_error("%s: unable to map control data, error = %d\n",
  780                     sc->sc_dev.dv_xname, error);
  781                 goto fail_1;
  782         }
  783 
  784         if ((error = bus_dmamap_create(sc->sc_dmat,
  785             sizeof(struct dge_control_data), 1,
  786             sizeof(struct dge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
  787                 aprint_error("%s: unable to create control data DMA map, "
  788                     "error = %d\n", sc->sc_dev.dv_xname, error);
  789                 goto fail_2;
  790         }
  791 
  792         if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
  793             sc->sc_control_data, sizeof(struct dge_control_data), NULL,
  794             0)) != 0) {
  795                 aprint_error(
  796                     "%s: unable to load control data DMA map, error = %d\n",
  797                     sc->sc_dev.dv_xname, error);
  798                 goto fail_3;
  799         }
  800 
  801 #ifdef DGE_OFFBYONE_RXBUG 
  802         if (dge_alloc_rcvmem(sc) != 0)
  803                 return; /* Already complained */
  804 #endif
  805         /*
  806          * Create the transmit buffer DMA maps.
  807          */
  808         for (i = 0; i < DGE_TXQUEUELEN; i++) {
  809                 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_MAX_MTU,
  810                     DGE_NTXSEGS, MCLBYTES, 0, 0,
  811                     &sc->sc_txsoft[i].txs_dmamap)) != 0) {
  812                         aprint_error("%s: unable to create Tx DMA map %d, "
  813                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  814                         goto fail_4;
  815                 }
  816         }
  817 
  818         /*
  819          * Create the receive buffer DMA maps.
  820          */
  821         for (i = 0; i < DGE_NRXDESC; i++) {
  822 #ifdef DGE_OFFBYONE_RXBUG
  823                 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_BUFFER_SIZE, 1,
  824                     DGE_BUFFER_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  825 #else
  826                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
  827                     MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  828 #endif
  829                         aprint_error("%s: unable to create Rx DMA map %d, "
  830                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  831                         goto fail_5;
  832                 }
  833                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  834         }
  835 
  836         /*
  837          * Set bits in ctrl0 register.
  838          * Should get the software defined pins out of EEPROM?
  839          */
  840         sc->sc_ctrl0 |= CTRL0_RPE | CTRL0_TPE; /* XON/XOFF */
  841         sc->sc_ctrl0 |= CTRL0_SDP3_DIR | CTRL0_SDP2_DIR | CTRL0_SDP1_DIR |
  842             CTRL0_SDP0_DIR | CTRL0_SDP3 | CTRL0_SDP2 | CTRL0_SDP0;
  843 
  844         /*
  845          * Reset the chip to a known state.
  846          */
  847         dge_reset(sc);
  848 
  849         /*
  850          * Reset the PHY.
  851          */
  852         dge_xgmii_reset(sc);
  853 
  854         /*
  855          * Read in EEPROM data.
  856          */
  857         if (dge_read_eeprom(sc)) {
  858                 aprint_error("%s: couldn't read EEPROM\n", sc->sc_dev.dv_xname);
  859                 return;
  860         }
  861 
  862         /*
  863          * Get the ethernet address.
  864          */
  865         enaddr[0] = sc->sc_eeprom[EE_ADDR01] & 0377;
  866         enaddr[1] = sc->sc_eeprom[EE_ADDR01] >> 8;
  867         enaddr[2] = sc->sc_eeprom[EE_ADDR23] & 0377;
  868         enaddr[3] = sc->sc_eeprom[EE_ADDR23] >> 8;
  869         enaddr[4] = sc->sc_eeprom[EE_ADDR45] & 0377;
  870         enaddr[5] = sc->sc_eeprom[EE_ADDR45] >> 8;
  871 
  872         aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
  873             ether_sprintf(enaddr));
  874 
  875         /*
  876          * Setup media stuff.
  877          */
  878         ifmedia_init(&sc->sc_media, IFM_IMASK, dge_xgmii_mediachange,
  879             dge_xgmii_mediastatus);
  880         ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_LR, 0, NULL);
  881         ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_10G_LR);
  882 
  883         ifp = &sc->sc_ethercom.ec_if;
  884         strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
  885         ifp->if_softc = sc;
  886         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  887         ifp->if_ioctl = dge_ioctl;
  888         ifp->if_start = dge_start;
  889         ifp->if_watchdog = dge_watchdog;
  890         ifp->if_init = dge_init;
  891         ifp->if_stop = dge_stop;
  892         IFQ_SET_MAXLEN(&ifp->if_snd, max(DGE_IFQUEUELEN, IFQ_MAXLEN));
  893         IFQ_SET_READY(&ifp->if_snd);
  894 
  895         sc->sc_ethercom.ec_capabilities |=
  896             ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
  897 
  898         /*
  899          * We can perform TCPv4 and UDPv4 checkums in-bound.
  900          */
  901         ifp->if_capabilities |=
  902             IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
  903 
  904         /*
  905          * Attach the interface.
  906          */
  907         if_attach(ifp);
  908         ether_ifattach(ifp, enaddr);
  909 #if NRND > 0
  910         rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
  911             RND_TYPE_NET, 0);
  912 #endif
  913 
  914 #ifdef DGE_EVENT_COUNTERS
  915         /* Fix segment event naming */
  916         if (dge_txseg_evcnt_names == NULL) {
  917                 dge_txseg_evcnt_names =
  918                     malloc(sizeof(*dge_txseg_evcnt_names), M_DEVBUF, M_WAITOK);
  919                 for (i = 0; i < DGE_NTXSEGS; i++)
  920                         sprintf((*dge_txseg_evcnt_names)[i], "txseg%d", i);
  921         }
  922 
  923         /* Attach event counters. */
  924         evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
  925             NULL, sc->sc_dev.dv_xname, "txsstall");
  926         evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
  927             NULL, sc->sc_dev.dv_xname, "txdstall");
  928         evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
  929             NULL, sc->sc_dev.dv_xname, "txforceintr");
  930         evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
  931             NULL, sc->sc_dev.dv_xname, "txdw");
  932         evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
  933             NULL, sc->sc_dev.dv_xname, "txqe");
  934         evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
  935             NULL, sc->sc_dev.dv_xname, "rxintr");
  936         evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
  937             NULL, sc->sc_dev.dv_xname, "linkintr");
  938 
  939         evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
  940             NULL, sc->sc_dev.dv_xname, "rxipsum");
  941         evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
  942             NULL, sc->sc_dev.dv_xname, "rxtusum");
  943         evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
  944             NULL, sc->sc_dev.dv_xname, "txipsum");
  945         evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
  946             NULL, sc->sc_dev.dv_xname, "txtusum");
  947 
  948         evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
  949             NULL, sc->sc_dev.dv_xname, "txctx init");
  950         evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
  951             NULL, sc->sc_dev.dv_xname, "txctx hit");
  952         evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
  953             NULL, sc->sc_dev.dv_xname, "txctx miss");
  954 
  955         for (i = 0; i < DGE_NTXSEGS; i++)
  956                 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
  957                     NULL, sc->sc_dev.dv_xname, (*dge_txseg_evcnt_names)[i]);
  958 
  959         evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
  960             NULL, sc->sc_dev.dv_xname, "txdrop");
  961 
  962 #endif /* DGE_EVENT_COUNTERS */
  963 
  964         /*
  965          * Make sure the interface is shutdown during reboot.
  966          */
  967         sc->sc_sdhook = shutdownhook_establish(dge_shutdown, sc);
  968         if (sc->sc_sdhook == NULL)
  969                 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
  970                     sc->sc_dev.dv_xname);
  971         return;
  972 
  973         /*
  974          * Free any resources we've allocated during the failed attach
  975          * attempt.  Do this in reverse order and fall through.
  976          */
  977  fail_5:
  978         for (i = 0; i < DGE_NRXDESC; i++) {
  979                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  980                         bus_dmamap_destroy(sc->sc_dmat,
  981                             sc->sc_rxsoft[i].rxs_dmamap);
  982         }
  983  fail_4:
  984         for (i = 0; i < DGE_TXQUEUELEN; i++) {
  985                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
  986                         bus_dmamap_destroy(sc->sc_dmat,
  987                             sc->sc_txsoft[i].txs_dmamap);
  988         }
  989         bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
  990  fail_3:
  991         bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
  992  fail_2:
  993         bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
  994             sizeof(struct dge_control_data));
  995  fail_1:
  996         bus_dmamem_free(sc->sc_dmat, &seg, rseg);
  997  fail_0:
  998         return;
  999 }
 1000 
 1001 /*
 1002  * dge_shutdown:
 1003  *
 1004  *      Make sure the interface is stopped at reboot time.
 1005  */
 1006 static void
 1007 dge_shutdown(void *arg)
 1008 {
 1009         struct dge_softc *sc = arg;
 1010 
 1011         dge_stop(&sc->sc_ethercom.ec_if, 1);
 1012 }
 1013 
 1014 /*
 1015  * dge_tx_cksum:
 1016  *
 1017  *      Set up TCP/IP checksumming parameters for the
 1018  *      specified packet.
 1019  */
 1020 static int
 1021 dge_tx_cksum(struct dge_softc *sc, struct dge_txsoft *txs, uint8_t *fieldsp)
 1022 {
 1023         struct mbuf *m0 = txs->txs_mbuf;
 1024         struct dge_ctdes *t;
 1025         uint32_t ipcs, tucs;
 1026         struct ip *ip;
 1027         struct ether_header *eh;
 1028         int offset, iphl;
 1029         uint8_t fields = 0;
 1030 
 1031         /*
 1032          * XXX It would be nice if the mbuf pkthdr had offset
 1033          * fields for the protocol headers.
 1034          */
 1035 
 1036         eh = mtod(m0, struct ether_header *);
 1037         switch (htons(eh->ether_type)) {
 1038         case ETHERTYPE_IP:
 1039                 iphl = sizeof(struct ip);
 1040                 offset = ETHER_HDR_LEN;
 1041                 break;
 1042 
 1043         case ETHERTYPE_VLAN:
 1044                 iphl = sizeof(struct ip);
 1045                 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1046                 break;
 1047 
 1048         default:
 1049                 /*
 1050                  * Don't support this protocol or encapsulation.
 1051                  */
 1052                 *fieldsp = 0;
 1053                 return (0);
 1054         }
 1055 
 1056         if (m0->m_len < (offset + iphl)) {
 1057                 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
 1058                         printf("%s: dge_tx_cksum: mbuf allocation failed, "
 1059                             "packet dropped\n", sc->sc_dev.dv_xname);
 1060                         return (ENOMEM);
 1061                 }
 1062                 m0 = txs->txs_mbuf;
 1063         }
 1064 
 1065         ip = (struct ip *) (mtod(m0, caddr_t) + offset);
 1066         iphl = ip->ip_hl << 2;
 1067 
 1068         /*
 1069          * NOTE: Even if we're not using the IP or TCP/UDP checksum
 1070          * offload feature, if we load the context descriptor, we
 1071          * MUST provide valid values for IPCSS and TUCSS fields.
 1072          */
 1073 
 1074         if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
 1075                 DGE_EVCNT_INCR(&sc->sc_ev_txipsum);
 1076                 fields |= TDESC_POPTS_IXSM;
 1077                 ipcs = DGE_TCPIP_IPCSS(offset) |
 1078                     DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
 1079                     DGE_TCPIP_IPCSE(offset + iphl - 1);
 1080         } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
 1081                 /* Use the cached value. */
 1082                 ipcs = sc->sc_txctx_ipcs;
 1083         } else {
 1084                 /* Just initialize it to the likely value anyway. */
 1085                 ipcs = DGE_TCPIP_IPCSS(offset) |
 1086                     DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
 1087                     DGE_TCPIP_IPCSE(offset + iphl - 1);
 1088         }
 1089         DPRINTF(DGE_DEBUG_CKSUM,
 1090             ("%s: CKSUM: offset %d ipcs 0x%x\n", 
 1091             sc->sc_dev.dv_xname, offset, ipcs));
 1092 
 1093         offset += iphl;
 1094 
 1095         if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
 1096                 DGE_EVCNT_INCR(&sc->sc_ev_txtusum);
 1097                 fields |= TDESC_POPTS_TXSM;
 1098                 tucs = DGE_TCPIP_TUCSS(offset) |
 1099                     DGE_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
 1100                     DGE_TCPIP_TUCSE(0) /* rest of packet */;
 1101         } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
 1102                 /* Use the cached value. */
 1103                 tucs = sc->sc_txctx_tucs;
 1104         } else {
 1105                 /* Just initialize it to a valid TCP context. */
 1106                 tucs = DGE_TCPIP_TUCSS(offset) |
 1107                     DGE_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
 1108                     DGE_TCPIP_TUCSE(0) /* rest of packet */;
 1109         }
 1110 
 1111         DPRINTF(DGE_DEBUG_CKSUM,
 1112             ("%s: CKSUM: offset %d tucs 0x%x\n",
 1113             sc->sc_dev.dv_xname, offset, tucs));
 1114 
 1115         if (sc->sc_txctx_ipcs == ipcs &&
 1116             sc->sc_txctx_tucs == tucs) {
 1117                 /* Cached context is fine. */
 1118                 DGE_EVCNT_INCR(&sc->sc_ev_txctx_hit);
 1119         } else {
 1120                 /* Fill in the context descriptor. */
 1121 #ifdef DGE_EVENT_COUNTERS
 1122                 if (sc->sc_txctx_ipcs == 0xffffffff &&
 1123                     sc->sc_txctx_tucs == 0xffffffff)
 1124                         DGE_EVCNT_INCR(&sc->sc_ev_txctx_init);
 1125                 else
 1126                         DGE_EVCNT_INCR(&sc->sc_ev_txctx_miss);
 1127 #endif
 1128                 t = (struct dge_ctdes *)&sc->sc_txdescs[sc->sc_txnext];
 1129                 t->dc_tcpip_ipcs = htole32(ipcs);
 1130                 t->dc_tcpip_tucs = htole32(tucs);
 1131                 t->dc_tcpip_cmdlen = htole32(TDESC_DTYP_CTD);
 1132                 t->dc_tcpip_seg = 0;
 1133                 DGE_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
 1134 
 1135                 sc->sc_txctx_ipcs = ipcs;
 1136                 sc->sc_txctx_tucs = tucs;
 1137 
 1138                 sc->sc_txnext = DGE_NEXTTX(sc->sc_txnext);
 1139                 txs->txs_ndesc++;
 1140         }
 1141 
 1142         *fieldsp = fields;
 1143 
 1144         return (0);
 1145 }
 1146 
 1147 /*
 1148  * dge_start:           [ifnet interface function]
 1149  *
 1150  *      Start packet transmission on the interface.
 1151  */
 1152 static void
 1153 dge_start(struct ifnet *ifp)
 1154 {
 1155         struct dge_softc *sc = ifp->if_softc;
 1156         struct mbuf *m0;
 1157         struct dge_txsoft *txs;
 1158         bus_dmamap_t dmamap;
 1159         int error, nexttx, lasttx = -1, ofree, seg;
 1160         uint32_t cksumcmd;
 1161         uint8_t cksumfields;
 1162 
 1163         if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
 1164                 return;
 1165 
 1166         /*
 1167          * Remember the previous number of free descriptors.
 1168          */
 1169         ofree = sc->sc_txfree;
 1170 
 1171         /*
 1172          * Loop through the send queue, setting up transmit descriptors
 1173          * until we drain the queue, or use up all available transmit
 1174          * descriptors.
 1175          */
 1176         for (;;) {
 1177                 /* Grab a packet off the queue. */
 1178                 IFQ_POLL(&ifp->if_snd, m0);
 1179                 if (m0 == NULL)
 1180                         break;
 1181 
 1182                 DPRINTF(DGE_DEBUG_TX,
 1183                     ("%s: TX: have packet to transmit: %p\n",
 1184                     sc->sc_dev.dv_xname, m0));
 1185 
 1186                 /* Get a work queue entry. */
 1187                 if (sc->sc_txsfree < DGE_TXQUEUE_GC) {
 1188                         dge_txintr(sc);
 1189                         if (sc->sc_txsfree == 0) {
 1190                                 DPRINTF(DGE_DEBUG_TX,
 1191                                     ("%s: TX: no free job descriptors\n",
 1192                                         sc->sc_dev.dv_xname));
 1193                                 DGE_EVCNT_INCR(&sc->sc_ev_txsstall);
 1194                                 break;
 1195                         }
 1196                 }
 1197 
 1198                 txs = &sc->sc_txsoft[sc->sc_txsnext];
 1199                 dmamap = txs->txs_dmamap;
 1200 
 1201                 /*
 1202                  * Load the DMA map.  If this fails, the packet either
 1203                  * didn't fit in the allotted number of segments, or we
 1204                  * were short on resources.  For the too-many-segments
 1205                  * case, we simply report an error and drop the packet,
 1206                  * since we can't sanely copy a jumbo packet to a single
 1207                  * buffer.
 1208                  */
 1209                 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
 1210                     BUS_DMA_WRITE|BUS_DMA_NOWAIT);
 1211                 if (error) {
 1212                         if (error == EFBIG) {
 1213                                 DGE_EVCNT_INCR(&sc->sc_ev_txdrop);
 1214                                 printf("%s: Tx packet consumes too many "
 1215                                     "DMA segments, dropping...\n",
 1216                                     sc->sc_dev.dv_xname);
 1217                                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1218                                 m_freem(m0);
 1219                                 continue;
 1220                         }
 1221                         /*
 1222                          * Short on resources, just stop for now.
 1223                          */
 1224                         DPRINTF(DGE_DEBUG_TX,
 1225                             ("%s: TX: dmamap load failed: %d\n",
 1226                             sc->sc_dev.dv_xname, error));
 1227                         break;
 1228                 }
 1229 
 1230                 /*
 1231                  * Ensure we have enough descriptors free to describe
 1232                  * the packet.  Note, we always reserve one descriptor
 1233                  * at the end of the ring due to the semantics of the
 1234                  * TDT register, plus one more in the event we need
 1235                  * to re-load checksum offload context.
 1236                  */
 1237                 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
 1238                         /*
 1239                          * Not enough free descriptors to transmit this
 1240                          * packet.  We haven't committed anything yet,
 1241                          * so just unload the DMA map, put the packet
 1242                          * pack on the queue, and punt.  Notify the upper
 1243                          * layer that there are no more slots left.
 1244                          */
 1245                         DPRINTF(DGE_DEBUG_TX,
 1246                             ("%s: TX: need %d descriptors, have %d\n",
 1247                             sc->sc_dev.dv_xname, dmamap->dm_nsegs,
 1248                             sc->sc_txfree - 1));
 1249                         ifp->if_flags |= IFF_OACTIVE;
 1250                         bus_dmamap_unload(sc->sc_dmat, dmamap);
 1251                         DGE_EVCNT_INCR(&sc->sc_ev_txdstall);
 1252                         break;
 1253                 }
 1254 
 1255                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 1256 
 1257                 /*
 1258                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
 1259                  */
 1260 
 1261                 /* Sync the DMA map. */
 1262                 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
 1263                     BUS_DMASYNC_PREWRITE);
 1264 
 1265                 DPRINTF(DGE_DEBUG_TX,
 1266                     ("%s: TX: packet has %d DMA segments\n",
 1267                     sc->sc_dev.dv_xname, dmamap->dm_nsegs));
 1268 
 1269                 DGE_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
 1270 
 1271                 /*
 1272                  * Store a pointer to the packet so that we can free it
 1273                  * later.
 1274                  *
 1275                  * Initially, we consider the number of descriptors the
 1276                  * packet uses the number of DMA segments.  This may be
 1277                  * incremented by 1 if we do checksum offload (a descriptor
 1278                  * is used to set the checksum context).
 1279                  */
 1280                 txs->txs_mbuf = m0;
 1281                 txs->txs_firstdesc = sc->sc_txnext;
 1282                 txs->txs_ndesc = dmamap->dm_nsegs;
 1283 
 1284                 /*
 1285                  * Set up checksum offload parameters for
 1286                  * this packet.
 1287                  */
 1288                 if (m0->m_pkthdr.csum_flags &
 1289                     (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
 1290                         if (dge_tx_cksum(sc, txs, &cksumfields) != 0) {
 1291                                 /* Error message already displayed. */
 1292                                 bus_dmamap_unload(sc->sc_dmat, dmamap);
 1293                                 continue;
 1294                         }
 1295                 } else {
 1296                         cksumfields = 0;
 1297                 }
 1298 
 1299                 cksumcmd = TDESC_DCMD_IDE | TDESC_DTYP_DATA;
 1300 
 1301                 /*
 1302                  * Initialize the transmit descriptor.
 1303                  */
 1304                 for (nexttx = sc->sc_txnext, seg = 0;
 1305                      seg < dmamap->dm_nsegs;
 1306                      seg++, nexttx = DGE_NEXTTX(nexttx)) {
 1307                         /*
 1308                          * Note: we currently only use 32-bit DMA
 1309                          * addresses.
 1310                          */
 1311                         sc->sc_txdescs[nexttx].dt_baddrh = 0;
 1312                         sc->sc_txdescs[nexttx].dt_baddrl =
 1313                             htole32(dmamap->dm_segs[seg].ds_addr);
 1314                         sc->sc_txdescs[nexttx].dt_ctl =
 1315                             htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
 1316                         sc->sc_txdescs[nexttx].dt_status = 0;
 1317                         sc->sc_txdescs[nexttx].dt_popts = cksumfields;
 1318                         sc->sc_txdescs[nexttx].dt_vlan = 0;
 1319                         lasttx = nexttx;
 1320 
 1321                         DPRINTF(DGE_DEBUG_TX,
 1322                             ("%s: TX: desc %d: low 0x%08lx, len 0x%04lx\n",
 1323                             sc->sc_dev.dv_xname, nexttx,
 1324                             le32toh(dmamap->dm_segs[seg].ds_addr),
 1325                             le32toh(dmamap->dm_segs[seg].ds_len)));
 1326                 }
 1327 
 1328                 KASSERT(lasttx != -1);
 1329 
 1330                 /*
 1331                  * Set up the command byte on the last descriptor of
 1332                  * the packet.  If we're in the interrupt delay window,
 1333                  * delay the interrupt.
 1334                  */
 1335                 sc->sc_txdescs[lasttx].dt_ctl |=
 1336                     htole32(TDESC_DCMD_EOP | TDESC_DCMD_RS);
 1337 
 1338                 txs->txs_lastdesc = lasttx;
 1339 
 1340                 DPRINTF(DGE_DEBUG_TX,
 1341                     ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
 1342                     lasttx, le32toh(sc->sc_txdescs[lasttx].dt_ctl)));
 1343 
 1344                 /* Sync the descriptors we're using. */
 1345                 DGE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
 1346                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1347 
 1348                 /* Give the packet to the chip. */
 1349                 CSR_WRITE(sc, DGE_TDT, nexttx);
 1350 
 1351                 DPRINTF(DGE_DEBUG_TX,
 1352                     ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
 1353 
 1354                 DPRINTF(DGE_DEBUG_TX,
 1355                     ("%s: TX: finished transmitting packet, job %d\n",
 1356                     sc->sc_dev.dv_xname, sc->sc_txsnext));
 1357 
 1358                 /* Advance the tx pointer. */
 1359                 sc->sc_txfree -= txs->txs_ndesc;
 1360                 sc->sc_txnext = nexttx;
 1361 
 1362                 sc->sc_txsfree--;
 1363                 sc->sc_txsnext = DGE_NEXTTXS(sc->sc_txsnext);
 1364 
 1365 #if NBPFILTER > 0
 1366                 /* Pass the packet to any BPF listeners. */
 1367                 if (ifp->if_bpf)
 1368                         bpf_mtap(ifp->if_bpf, m0);
 1369 #endif /* NBPFILTER > 0 */
 1370         }
 1371 
 1372         if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
 1373                 /* No more slots; notify upper layer. */
 1374                 ifp->if_flags |= IFF_OACTIVE;
 1375         }
 1376 
 1377         if (sc->sc_txfree != ofree) {
 1378                 /* Set a watchdog timer in case the chip flakes out. */
 1379                 ifp->if_timer = 5;
 1380         }
 1381 }
 1382 
 1383 /*
 1384  * dge_watchdog:                [ifnet interface function]
 1385  *
 1386  *      Watchdog timer handler.
 1387  */
 1388 static void
 1389 dge_watchdog(struct ifnet *ifp)
 1390 {
 1391         struct dge_softc *sc = ifp->if_softc;
 1392 
 1393         /*
 1394          * Since we're using delayed interrupts, sweep up
 1395          * before we report an error.
 1396          */
 1397         dge_txintr(sc);
 1398 
 1399         if (sc->sc_txfree != DGE_NTXDESC) {
 1400                 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
 1401                     sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
 1402                     sc->sc_txnext);
 1403                 ifp->if_oerrors++;
 1404 
 1405                 /* Reset the interface. */
 1406                 (void) dge_init(ifp);
 1407         }
 1408 
 1409         /* Try to get more packets going. */
 1410         dge_start(ifp);
 1411 }
 1412 
 1413 /*
 1414  * dge_ioctl:           [ifnet interface function]
 1415  *
 1416  *      Handle control requests from the operator.
 1417  */
 1418 static int
 1419 dge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1420 {
 1421         struct dge_softc *sc = ifp->if_softc;
 1422         struct ifreq *ifr = (struct ifreq *) data;
 1423         pcireg_t preg;
 1424         int s, error, mmrbc;
 1425 
 1426         s = splnet();
 1427 
 1428         switch (cmd) {
 1429         case SIOCSIFMEDIA:
 1430         case SIOCGIFMEDIA:
 1431                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
 1432                 break;
 1433 
 1434         case SIOCSIFMTU:
 1435                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > DGE_MAX_MTU) {
 1436                         error = EINVAL;
 1437                 } else {
 1438                         error = 0;
 1439                         ifp->if_mtu = ifr->ifr_mtu;
 1440                         if (ifp->if_flags & IFF_UP)
 1441                                 error = (*ifp->if_init)(ifp);
 1442                 }
 1443                 break;
 1444 
 1445         case SIOCSIFFLAGS:
 1446                 /* extract link flags */
 1447                 if ((ifp->if_flags & IFF_LINK0) == 0 &&
 1448                     (ifp->if_flags & IFF_LINK1) == 0)
 1449                         mmrbc = PCIX_MMRBC_512;
 1450                 else if ((ifp->if_flags & IFF_LINK0) == 0 &&
 1451                     (ifp->if_flags & IFF_LINK1) != 0)
 1452                         mmrbc = PCIX_MMRBC_1024;
 1453                 else if ((ifp->if_flags & IFF_LINK0) != 0 &&
 1454                     (ifp->if_flags & IFF_LINK1) == 0)
 1455                         mmrbc = PCIX_MMRBC_2048;
 1456                 else
 1457                         mmrbc = PCIX_MMRBC_4096;
 1458                 if (mmrbc != sc->sc_mmrbc) {
 1459                         preg = pci_conf_read(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD);
 1460                         preg &= ~PCIX_MMRBC_MSK;
 1461                         preg |= mmrbc;
 1462                         pci_conf_write(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD, preg);
 1463                         sc->sc_mmrbc = mmrbc;
 1464                 }
 1465                 /* FALLTHROUGH */
 1466         default:
 1467                 error = ether_ioctl(ifp, cmd, data);
 1468                 if (error == ENETRESET) {
 1469                         /*
 1470                          * Multicast list has changed; set the hardware filter
 1471                          * accordingly.
 1472                          */
 1473                         dge_set_filter(sc);
 1474                         error = 0;
 1475                 }
 1476                 break;
 1477         }
 1478 
 1479         /* Try to get more packets going. */
 1480         dge_start(ifp);
 1481 
 1482         splx(s);
 1483         return (error);
 1484 }
 1485 
 1486 /*
 1487  * dge_intr:
 1488  *
 1489  *      Interrupt service routine.
 1490  */
 1491 static int
 1492 dge_intr(void *arg)
 1493 {
 1494         struct dge_softc *sc = arg;
 1495         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1496         uint32_t icr;
 1497         int wantinit, handled = 0;
 1498 
 1499         for (wantinit = 0; wantinit == 0;) {
 1500                 icr = CSR_READ(sc, DGE_ICR);
 1501                 if ((icr & sc->sc_icr) == 0)
 1502                         break;
 1503 
 1504 #if 0 /*NRND > 0*/
 1505                 if (RND_ENABLED(&sc->rnd_source))
 1506                         rnd_add_uint32(&sc->rnd_source, icr);
 1507 #endif
 1508 
 1509                 handled = 1;
 1510 
 1511 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
 1512                 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
 1513                         DPRINTF(DGE_DEBUG_RX,
 1514                             ("%s: RX: got Rx intr 0x%08x\n",
 1515                             sc->sc_dev.dv_xname,
 1516                             icr & (ICR_RXDMT0|ICR_RXT0)));
 1517                         DGE_EVCNT_INCR(&sc->sc_ev_rxintr);
 1518                 }
 1519 #endif
 1520                 dge_rxintr(sc);
 1521 
 1522 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
 1523                 if (icr & ICR_TXDW) {
 1524                         DPRINTF(DGE_DEBUG_TX,
 1525                             ("%s: TX: got TXDW interrupt\n",
 1526                             sc->sc_dev.dv_xname));
 1527                         DGE_EVCNT_INCR(&sc->sc_ev_txdw);
 1528                 }
 1529                 if (icr & ICR_TXQE)
 1530                         DGE_EVCNT_INCR(&sc->sc_ev_txqe);
 1531 #endif
 1532                 dge_txintr(sc);
 1533 
 1534                 if (icr & (ICR_LSC|ICR_RXSEQ)) {
 1535                         DGE_EVCNT_INCR(&sc->sc_ev_linkintr);
 1536                         dge_linkintr(sc, icr);
 1537                 }
 1538 
 1539                 if (icr & ICR_RXO) {
 1540                         printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
 1541                         wantinit = 1;
 1542                 }
 1543         }
 1544 
 1545         if (handled) {
 1546                 if (wantinit)
 1547                         dge_init(ifp);
 1548 
 1549                 /* Try to get more packets going. */
 1550                 dge_start(ifp);
 1551         }
 1552 
 1553         return (handled);
 1554 }
 1555 
 1556 /*
 1557  * dge_txintr:
 1558  *
 1559  *      Helper; handle transmit interrupts.
 1560  */
 1561 static void
 1562 dge_txintr(struct dge_softc *sc)
 1563 {
 1564         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1565         struct dge_txsoft *txs;
 1566         uint8_t status;
 1567         int i;
 1568 
 1569         ifp->if_flags &= ~IFF_OACTIVE;
 1570 
 1571         /*
 1572          * Go through the Tx list and free mbufs for those
 1573          * frames which have been transmitted.
 1574          */
 1575         for (i = sc->sc_txsdirty; sc->sc_txsfree != DGE_TXQUEUELEN;
 1576              i = DGE_NEXTTXS(i), sc->sc_txsfree++) {
 1577                 txs = &sc->sc_txsoft[i];
 1578 
 1579                 DPRINTF(DGE_DEBUG_TX,
 1580                     ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
 1581 
 1582                 DGE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
 1583                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1584 
 1585                 status =
 1586                     sc->sc_txdescs[txs->txs_lastdesc].dt_status;
 1587                 if ((status & TDESC_STA_DD) == 0) {
 1588                         DGE_CDTXSYNC(sc, txs->txs_lastdesc, 1,
 1589                             BUS_DMASYNC_PREREAD);
 1590                         break;
 1591                 }
 1592 
 1593                 DPRINTF(DGE_DEBUG_TX,
 1594                     ("%s: TX: job %d done: descs %d..%d\n",
 1595                     sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
 1596                     txs->txs_lastdesc));
 1597 
 1598                 ifp->if_opackets++;
 1599                 sc->sc_txfree += txs->txs_ndesc;
 1600                 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
 1601                     0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 1602                 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 1603                 m_freem(txs->txs_mbuf);
 1604                 txs->txs_mbuf = NULL;
 1605         }
 1606 
 1607         /* Update the dirty transmit buffer pointer. */
 1608         sc->sc_txsdirty = i;
 1609         DPRINTF(DGE_DEBUG_TX,
 1610             ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
 1611 
 1612         /*
 1613          * If there are no more pending transmissions, cancel the watchdog
 1614          * timer.
 1615          */
 1616         if (sc->sc_txsfree == DGE_TXQUEUELEN)
 1617                 ifp->if_timer = 0;
 1618 }
 1619 
 1620 /*
 1621  * dge_rxintr:
 1622  *
 1623  *      Helper; handle receive interrupts.
 1624  */
 1625 static void
 1626 dge_rxintr(struct dge_softc *sc)
 1627 {
 1628         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1629         struct dge_rxsoft *rxs;
 1630         struct mbuf *m;
 1631         int i, len;
 1632         uint8_t status, errors;
 1633 
 1634         for (i = sc->sc_rxptr;; i = DGE_NEXTRX(i)) {
 1635                 rxs = &sc->sc_rxsoft[i];
 1636 
 1637                 DPRINTF(DGE_DEBUG_RX,
 1638                     ("%s: RX: checking descriptor %d\n",
 1639                     sc->sc_dev.dv_xname, i));
 1640 
 1641                 DGE_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1642 
 1643                 status = sc->sc_rxdescs[i].dr_status;
 1644                 errors = sc->sc_rxdescs[i].dr_errors;
 1645                 len = le16toh(sc->sc_rxdescs[i].dr_len);
 1646 
 1647                 if ((status & RDESC_STS_DD) == 0) {
 1648                         /*
 1649                          * We have processed all of the receive descriptors.
 1650                          */
 1651                         DGE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
 1652                         break;
 1653                 }
 1654 
 1655                 if (__predict_false(sc->sc_rxdiscard)) {
 1656                         DPRINTF(DGE_DEBUG_RX,
 1657                             ("%s: RX: discarding contents of descriptor %d\n",
 1658                             sc->sc_dev.dv_xname, i));
 1659                         DGE_INIT_RXDESC(sc, i);
 1660                         if (status & RDESC_STS_EOP) {
 1661                                 /* Reset our state. */
 1662                                 DPRINTF(DGE_DEBUG_RX,
 1663                                     ("%s: RX: resetting rxdiscard -> 0\n",
 1664                                     sc->sc_dev.dv_xname));
 1665                                 sc->sc_rxdiscard = 0;
 1666                         }
 1667                         continue;
 1668                 }
 1669 
 1670                 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1671                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 1672 
 1673                 m = rxs->rxs_mbuf;
 1674 
 1675                 /*
 1676                  * Add a new receive buffer to the ring.
 1677                  */
 1678                 if (dge_add_rxbuf(sc, i) != 0) {
 1679                         /*
 1680                          * Failed, throw away what we've done so
 1681                          * far, and discard the rest of the packet.
 1682                          */
 1683                         ifp->if_ierrors++;
 1684                         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1685                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1686                         DGE_INIT_RXDESC(sc, i);
 1687                         if ((status & RDESC_STS_EOP) == 0)
 1688                                 sc->sc_rxdiscard = 1;
 1689                         if (sc->sc_rxhead != NULL)
 1690                                 m_freem(sc->sc_rxhead);
 1691                         DGE_RXCHAIN_RESET(sc);
 1692                         DPRINTF(DGE_DEBUG_RX,
 1693                             ("%s: RX: Rx buffer allocation failed, "
 1694                             "dropping packet%s\n", sc->sc_dev.dv_xname,
 1695                             sc->sc_rxdiscard ? " (discard)" : ""));
 1696                         continue;
 1697                 }
 1698                 DGE_INIT_RXDESC(sc, DGE_PREVRX(i)); /* Write the descriptor */
 1699 
 1700                 DGE_RXCHAIN_LINK(sc, m);
 1701 
 1702                 m->m_len = len;
 1703 
 1704                 DPRINTF(DGE_DEBUG_RX,
 1705                     ("%s: RX: buffer at %p len %d\n",
 1706                     sc->sc_dev.dv_xname, m->m_data, len));
 1707 
 1708                 /*
 1709                  * If this is not the end of the packet, keep
 1710                  * looking.
 1711                  */
 1712                 if ((status & RDESC_STS_EOP) == 0) {
 1713                         sc->sc_rxlen += len;
 1714                         DPRINTF(DGE_DEBUG_RX,
 1715                             ("%s: RX: not yet EOP, rxlen -> %d\n",
 1716                             sc->sc_dev.dv_xname, sc->sc_rxlen));
 1717                         continue;
 1718                 }
 1719 
 1720                 /*
 1721                  * Okay, we have the entire packet now...
 1722                  */
 1723                 *sc->sc_rxtailp = NULL;
 1724                 m = sc->sc_rxhead;
 1725                 len += sc->sc_rxlen;
 1726 
 1727                 DGE_RXCHAIN_RESET(sc);
 1728 
 1729                 DPRINTF(DGE_DEBUG_RX,
 1730                     ("%s: RX: have entire packet, len -> %d\n",
 1731                     sc->sc_dev.dv_xname, len));
 1732 
 1733                 /*
 1734                  * If an error occurred, update stats and drop the packet.
 1735                  */
 1736                 if (errors &
 1737                      (RDESC_ERR_CE|RDESC_ERR_SE|RDESC_ERR_P|RDESC_ERR_RXE)) {
 1738                         ifp->if_ierrors++;
 1739                         if (errors & RDESC_ERR_SE)
 1740                                 printf("%s: symbol error\n",
 1741                                     sc->sc_dev.dv_xname);
 1742                         else if (errors & RDESC_ERR_P)
 1743                                 printf("%s: parity error\n",
 1744                                     sc->sc_dev.dv_xname);
 1745                         else if (errors & RDESC_ERR_CE)
 1746                                 printf("%s: CRC error\n",
 1747                                     sc->sc_dev.dv_xname);
 1748                         m_freem(m);
 1749                         continue;
 1750                 }
 1751 
 1752                 /*
 1753                  * No errors.  Receive the packet.
 1754                  */
 1755                 m->m_pkthdr.rcvif = ifp;
 1756                 m->m_pkthdr.len = len;
 1757 
 1758                 /*
 1759                  * Set up checksum info for this packet.
 1760                  */
 1761                 if (status & RDESC_STS_IPCS) {
 1762                         DGE_EVCNT_INCR(&sc->sc_ev_rxipsum);
 1763                         m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
 1764                         if (errors & RDESC_ERR_IPE)
 1765                                 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
 1766                 }
 1767                 if (status & RDESC_STS_TCPCS) {
 1768                         /*
 1769                          * Note: we don't know if this was TCP or UDP,
 1770                          * so we just set both bits, and expect the
 1771                          * upper layers to deal.
 1772                          */
 1773                         DGE_EVCNT_INCR(&sc->sc_ev_rxtusum);
 1774                         m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
 1775                         if (errors & RDESC_ERR_TCPE)
 1776                                 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
 1777                 }
 1778 
 1779                 ifp->if_ipackets++;
 1780 
 1781 #if NBPFILTER > 0
 1782                 /* Pass this up to any BPF listeners. */
 1783                 if (ifp->if_bpf)
 1784                         bpf_mtap(ifp->if_bpf, m);
 1785 #endif /* NBPFILTER > 0 */
 1786 
 1787                 /* Pass it on. */
 1788                 (*ifp->if_input)(ifp, m);
 1789         }
 1790 
 1791         /* Update the receive pointer. */
 1792         sc->sc_rxptr = i;
 1793 
 1794         DPRINTF(DGE_DEBUG_RX,
 1795             ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
 1796 }
 1797 
 1798 /*
 1799  * dge_linkintr:
 1800  *
 1801  *      Helper; handle link interrupts.
 1802  */
 1803 static void
 1804 dge_linkintr(struct dge_softc *sc, uint32_t icr)
 1805 {
 1806         uint32_t status;
 1807 
 1808         if (icr & ICR_LSC) {
 1809                 status = CSR_READ(sc, DGE_STATUS);
 1810                 if (status & STATUS_LINKUP) {
 1811                         DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
 1812                             sc->sc_dev.dv_xname));
 1813                 } else {
 1814                         DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
 1815                             sc->sc_dev.dv_xname));
 1816                 }
 1817         } else if (icr & ICR_RXSEQ) {
 1818                 DPRINTF(DGE_DEBUG_LINK,
 1819                     ("%s: LINK: Receive sequence error\n",
 1820                     sc->sc_dev.dv_xname));
 1821         }
 1822         /* XXX - fix errata */
 1823 }
 1824 
 1825 /*
 1826  * dge_reset:
 1827  *
 1828  *      Reset the i82597 chip.
 1829  */
 1830 static void
 1831 dge_reset(struct dge_softc *sc)
 1832 {
 1833         int i;
 1834 
 1835         /*
 1836          * Do a chip reset.
 1837          */
 1838         CSR_WRITE(sc, DGE_CTRL0, CTRL0_RST | sc->sc_ctrl0);
 1839 
 1840         delay(10000);
 1841 
 1842         for (i = 0; i < 1000; i++) {
 1843                 if ((CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) == 0)
 1844                         break;
 1845                 delay(20);
 1846         }
 1847 
 1848         if (CSR_READ(sc, DGE_CTRL0) & CTRL0_RST)
 1849                 printf("%s: WARNING: reset failed to complete\n",
 1850                     sc->sc_dev.dv_xname);
 1851         /*
 1852          * Reset the EEPROM logic.
 1853          * This will cause the chip to reread its default values,
 1854          * which doesn't happen otherwise (errata).
 1855          */
 1856         CSR_WRITE(sc, DGE_CTRL1, CTRL1_EE_RST);
 1857         delay(10000);
 1858 }
 1859 
 1860 /*
 1861  * dge_init:            [ifnet interface function]
 1862  *
 1863  *      Initialize the interface.  Must be called at splnet().
 1864  */
 1865 static int
 1866 dge_init(struct ifnet *ifp)
 1867 {
 1868         struct dge_softc *sc = ifp->if_softc;
 1869         struct dge_rxsoft *rxs;
 1870         int i, error = 0;
 1871         uint32_t reg;
 1872 
 1873         /*
 1874          * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
 1875          * There is a small but measurable benefit to avoiding the adjusment
 1876          * of the descriptor so that the headers are aligned, for normal mtu,
 1877          * on such platforms.  One possibility is that the DMA itself is
 1878          * slightly more efficient if the front of the entire packet (instead
 1879          * of the front of the headers) is aligned.
 1880          *
 1881          * Note we must always set align_tweak to 0 if we are using
 1882          * jumbo frames.
 1883          */
 1884 #ifdef __NO_STRICT_ALIGNMENT
 1885         sc->sc_align_tweak = 0;
 1886 #else
 1887         if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
 1888                 sc->sc_align_tweak = 0;
 1889         else
 1890                 sc->sc_align_tweak = 2;
 1891 #endif /* __NO_STRICT_ALIGNMENT */
 1892 
 1893         /* Cancel any pending I/O. */
 1894         dge_stop(ifp, 0);
 1895 
 1896         /* Reset the chip to a known state. */
 1897         dge_reset(sc);
 1898 
 1899         /* Initialize the transmit descriptor ring. */
 1900         memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
 1901         DGE_CDTXSYNC(sc, 0, DGE_NTXDESC,
 1902             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1903         sc->sc_txfree = DGE_NTXDESC;
 1904         sc->sc_txnext = 0;
 1905 
 1906         sc->sc_txctx_ipcs = 0xffffffff;
 1907         sc->sc_txctx_tucs = 0xffffffff;
 1908 
 1909         CSR_WRITE(sc, DGE_TDBAH, 0);
 1910         CSR_WRITE(sc, DGE_TDBAL, DGE_CDTXADDR(sc, 0));
 1911         CSR_WRITE(sc, DGE_TDLEN, sizeof(sc->sc_txdescs));
 1912         CSR_WRITE(sc, DGE_TDH, 0);
 1913         CSR_WRITE(sc, DGE_TDT, 0);
 1914         CSR_WRITE(sc, DGE_TIDV, TIDV);
 1915 
 1916 #if 0
 1917         CSR_WRITE(sc, DGE_TXDCTL, TXDCTL_PTHRESH(0) |
 1918             TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
 1919 #endif
 1920         CSR_WRITE(sc, DGE_RXDCTL,
 1921             RXDCTL_PTHRESH(RXDCTL_PTHRESH_VAL) |
 1922             RXDCTL_HTHRESH(RXDCTL_HTHRESH_VAL) |
 1923             RXDCTL_WTHRESH(RXDCTL_WTHRESH_VAL));
 1924 
 1925         /* Initialize the transmit job descriptors. */
 1926         for (i = 0; i < DGE_TXQUEUELEN; i++)
 1927                 sc->sc_txsoft[i].txs_mbuf = NULL;
 1928         sc->sc_txsfree = DGE_TXQUEUELEN;
 1929         sc->sc_txsnext = 0;
 1930         sc->sc_txsdirty = 0;
 1931 
 1932         /*
 1933          * Initialize the receive descriptor and receive job
 1934          * descriptor rings.
 1935          */
 1936         CSR_WRITE(sc, DGE_RDBAH, 0);
 1937         CSR_WRITE(sc, DGE_RDBAL, DGE_CDRXADDR(sc, 0));
 1938         CSR_WRITE(sc, DGE_RDLEN, sizeof(sc->sc_rxdescs));
 1939         CSR_WRITE(sc, DGE_RDH, DGE_RXSPACE);
 1940         CSR_WRITE(sc, DGE_RDT, 0);
 1941         CSR_WRITE(sc, DGE_RDTR, RDTR | 0x80000000);
 1942         CSR_WRITE(sc, DGE_FCRTL, FCRTL | FCRTL_XONE);
 1943         CSR_WRITE(sc, DGE_FCRTH, FCRTH);
 1944 
 1945         for (i = 0; i < DGE_NRXDESC; i++) {
 1946                 rxs = &sc->sc_rxsoft[i];
 1947                 if (rxs->rxs_mbuf == NULL) {
 1948                         if ((error = dge_add_rxbuf(sc, i)) != 0) {
 1949                                 printf("%s: unable to allocate or map rx "
 1950                                     "buffer %d, error = %d\n",
 1951                                     sc->sc_dev.dv_xname, i, error);
 1952                                 /*
 1953                                  * XXX Should attempt to run with fewer receive
 1954                                  * XXX buffers instead of just failing.
 1955                                  */
 1956                                 dge_rxdrain(sc);
 1957                                 goto out;
 1958                         }
 1959                 }
 1960                 DGE_INIT_RXDESC(sc, i);
 1961         }
 1962         sc->sc_rxptr = DGE_RXSPACE;
 1963         sc->sc_rxdiscard = 0;
 1964         DGE_RXCHAIN_RESET(sc);
 1965 
 1966         if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) {
 1967                 sc->sc_ctrl0 |= CTRL0_JFE;
 1968                 CSR_WRITE(sc, DGE_MFS, ETHER_MAX_LEN_JUMBO << 16);
 1969         }
 1970 
 1971         /* Write the control registers. */
 1972         CSR_WRITE(sc, DGE_CTRL0, sc->sc_ctrl0);
 1973 
 1974         /*
 1975          * Set up checksum offload parameters.
 1976          */
 1977         reg = CSR_READ(sc, DGE_RXCSUM);
 1978         if (ifp->if_capenable & IFCAP_CSUM_IPv4)
 1979                 reg |= RXCSUM_IPOFL;
 1980         else
 1981                 reg &= ~RXCSUM_IPOFL;
 1982         if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
 1983                 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
 1984         else {
 1985                 reg &= ~RXCSUM_TUOFL;
 1986                 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
 1987                         reg &= ~RXCSUM_IPOFL;
 1988         }
 1989         CSR_WRITE(sc, DGE_RXCSUM, reg);
 1990 
 1991         /*
 1992          * Set up the interrupt registers.
 1993          */
 1994         CSR_WRITE(sc, DGE_IMC, 0xffffffffU);
 1995         sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
 1996             ICR_RXO | ICR_RXT0;
 1997 
 1998         CSR_WRITE(sc, DGE_IMS, sc->sc_icr);
 1999 
 2000         /*
 2001          * Set up the transmit control register.
 2002          */
 2003         sc->sc_tctl = TCTL_TCE|TCTL_TPDE|TCTL_TXEN;
 2004         CSR_WRITE(sc, DGE_TCTL, sc->sc_tctl);
 2005 
 2006         /*
 2007          * Set up the receive control register; we actually program
 2008          * the register when we set the receive filter.  Use multicast
 2009          * address offset type 0.
 2010          */
 2011         sc->sc_mchash_type = 0;
 2012 
 2013         sc->sc_rctl = RCTL_RXEN | RCTL_RDMTS_12 | RCTL_RPDA_MC | 
 2014             RCTL_CFF | RCTL_SECRC | RCTL_MO(sc->sc_mchash_type);
 2015 
 2016 #ifdef DGE_OFFBYONE_RXBUG
 2017         sc->sc_rctl |= RCTL_BSIZE_16k;
 2018 #else
 2019         switch(MCLBYTES) {
 2020         case 2048:
 2021                 sc->sc_rctl |= RCTL_BSIZE_2k;
 2022                 break;
 2023         case 4096:
 2024                 sc->sc_rctl |= RCTL_BSIZE_4k;
 2025                 break;
 2026         case 8192:
 2027                 sc->sc_rctl |= RCTL_BSIZE_8k;
 2028                 break;
 2029         case 16384:
 2030                 sc->sc_rctl |= RCTL_BSIZE_16k;
 2031                 break;
 2032         default:
 2033                 panic("dge_init: MCLBYTES %d unsupported", MCLBYTES);
 2034         }
 2035 #endif
 2036 
 2037         /* Set the receive filter. */
 2038         /* Also sets RCTL */
 2039         dge_set_filter(sc);
 2040 
 2041         /* ...all done! */
 2042         ifp->if_flags |= IFF_RUNNING; 
 2043         ifp->if_flags &= ~IFF_OACTIVE;
 2044 
 2045  out:
 2046         if (error)
 2047                 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
 2048         return (error);
 2049 }
 2050 
 2051 /*
 2052  * dge_rxdrain:
 2053  *
 2054  *      Drain the receive queue.
 2055  */
 2056 static void
 2057 dge_rxdrain(struct dge_softc *sc)
 2058 {
 2059         struct dge_rxsoft *rxs;
 2060         int i;
 2061 
 2062         for (i = 0; i < DGE_NRXDESC; i++) {
 2063                 rxs = &sc->sc_rxsoft[i];
 2064                 if (rxs->rxs_mbuf != NULL) {
 2065                         bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 2066                         m_freem(rxs->rxs_mbuf);
 2067                         rxs->rxs_mbuf = NULL;
 2068                 }
 2069         }
 2070 }
 2071 
 2072 /*
 2073  * dge_stop:            [ifnet interface function]
 2074  *
 2075  *      Stop transmission on the interface.
 2076  */
 2077 static void
 2078 dge_stop(struct ifnet *ifp, int disable)
 2079 {
 2080         struct dge_softc *sc = ifp->if_softc;
 2081         struct dge_txsoft *txs;
 2082         int i;
 2083 
 2084         /* Stop the transmit and receive processes. */
 2085         CSR_WRITE(sc, DGE_TCTL, 0);
 2086         CSR_WRITE(sc, DGE_RCTL, 0);
 2087 
 2088         /* Release any queued transmit buffers. */
 2089         for (i = 0; i < DGE_TXQUEUELEN; i++) {
 2090                 txs = &sc->sc_txsoft[i];
 2091                 if (txs->txs_mbuf != NULL) {
 2092                         bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 2093                         m_freem(txs->txs_mbuf);
 2094                         txs->txs_mbuf = NULL;
 2095                 }
 2096         }
 2097 
 2098         if (disable)
 2099                 dge_rxdrain(sc);
 2100 
 2101         /* Mark the interface as down and cancel the watchdog timer. */
 2102         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 2103         ifp->if_timer = 0;
 2104 }
 2105 
 2106 /*
 2107  * dge_add_rxbuf:
 2108  *
 2109  *      Add a receive buffer to the indiciated descriptor.
 2110  */
 2111 static int
 2112 dge_add_rxbuf(struct dge_softc *sc, int idx)
 2113 {
 2114         struct dge_rxsoft *rxs = &sc->sc_rxsoft[idx];
 2115         struct mbuf *m;
 2116         int error;
 2117 #ifdef DGE_OFFBYONE_RXBUG
 2118         caddr_t buf;
 2119 #endif
 2120 
 2121         MGETHDR(m, M_DONTWAIT, MT_DATA);
 2122         if (m == NULL)
 2123                 return (ENOBUFS);
 2124 
 2125 #ifdef DGE_OFFBYONE_RXBUG
 2126         if ((buf = dge_getbuf(sc)) == NULL)
 2127                 return ENOBUFS;
 2128 
 2129         m->m_len = m->m_pkthdr.len = DGE_BUFFER_SIZE;
 2130         MEXTADD(m, buf, DGE_BUFFER_SIZE, M_DEVBUF, dge_freebuf, sc);
 2131 
 2132         if (rxs->rxs_mbuf != NULL)
 2133                 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 2134         rxs->rxs_mbuf = m;
 2135 
 2136         error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, buf,
 2137             DGE_BUFFER_SIZE, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
 2138 #else
 2139         MCLGET(m, M_DONTWAIT);
 2140         if ((m->m_flags & M_EXT) == 0) {
 2141                 m_freem(m);
 2142                 return (ENOBUFS);
 2143         }
 2144 
 2145         if (rxs->rxs_mbuf != NULL)
 2146                 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 2147 
 2148         rxs->rxs_mbuf = m;
 2149 
 2150         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
 2151         error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
 2152             BUS_DMA_READ|BUS_DMA_NOWAIT);
 2153 #endif
 2154         if (error) {
 2155                 printf("%s: unable to load rx DMA map %d, error = %d\n",
 2156                     sc->sc_dev.dv_xname, idx, error);
 2157                 panic("dge_add_rxbuf"); /* XXX XXX XXX */
 2158         }
 2159         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 2160             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 2161 
 2162         return (0);
 2163 }
 2164 
 2165 /*
 2166  * dge_set_ral:
 2167  *
 2168  *      Set an entry in the receive address list.
 2169  */
 2170 static void
 2171 dge_set_ral(struct dge_softc *sc, const uint8_t *enaddr, int idx)
 2172 {
 2173         uint32_t ral_lo, ral_hi;
 2174 
 2175         if (enaddr != NULL) {
 2176                 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
 2177                     (enaddr[3] << 24);
 2178                 ral_hi = enaddr[4] | (enaddr[5] << 8);
 2179                 ral_hi |= RAH_AV;
 2180         } else {
 2181                 ral_lo = 0;
 2182                 ral_hi = 0;
 2183         }
 2184         CSR_WRITE(sc, RA_ADDR(DGE_RAL, idx), ral_lo);
 2185         CSR_WRITE(sc, RA_ADDR(DGE_RAH, idx), ral_hi);
 2186 }
 2187 
 2188 /*
 2189  * dge_mchash:
 2190  *
 2191  *      Compute the hash of the multicast address for the 4096-bit
 2192  *      multicast filter.
 2193  */
 2194 static uint32_t
 2195 dge_mchash(struct dge_softc *sc, const uint8_t *enaddr)
 2196 {
 2197         static const int lo_shift[4] = { 4, 3, 2, 0 };
 2198         static const int hi_shift[4] = { 4, 5, 6, 8 };
 2199         uint32_t hash;
 2200 
 2201         hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
 2202             (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
 2203 
 2204         return (hash & 0xfff);
 2205 }
 2206 
 2207 /*
 2208  * dge_set_filter:
 2209  *
 2210  *      Set up the receive filter.
 2211  */
 2212 static void
 2213 dge_set_filter(struct dge_softc *sc)
 2214 {
 2215         struct ethercom *ec = &sc->sc_ethercom;
 2216         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2217         struct ether_multi *enm;
 2218         struct ether_multistep step;
 2219         uint32_t hash, reg, bit;
 2220         int i;
 2221 
 2222         sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
 2223 
 2224         if (ifp->if_flags & IFF_BROADCAST)
 2225                 sc->sc_rctl |= RCTL_BAM;
 2226         if (ifp->if_flags & IFF_PROMISC) {
 2227                 sc->sc_rctl |= RCTL_UPE;
 2228                 goto allmulti;
 2229         }
 2230 
 2231         /*
 2232          * Set the station address in the first RAL slot, and
 2233          * clear the remaining slots.
 2234          */
 2235         dge_set_ral(sc, LLADDR(ifp->if_sadl), 0);
 2236         for (i = 1; i < RA_TABSIZE; i++)
 2237                 dge_set_ral(sc, NULL, i);
 2238 
 2239         /* Clear out the multicast table. */
 2240         for (i = 0; i < MC_TABSIZE; i++)
 2241                 CSR_WRITE(sc, DGE_MTA + (i << 2), 0);
 2242 
 2243         ETHER_FIRST_MULTI(step, ec, enm);
 2244         while (enm != NULL) {
 2245                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 2246                         /*
 2247                          * We must listen to a range of multicast addresses.
 2248                          * For now, just accept all multicasts, rather than
 2249                          * trying to set only those filter bits needed to match
 2250                          * the range.  (At this time, the only use of address
 2251                          * ranges is for IP multicast routing, for which the
 2252                          * range is big enough to require all bits set.)
 2253                          */
 2254                         goto allmulti;
 2255                 }
 2256 
 2257                 hash = dge_mchash(sc, enm->enm_addrlo);
 2258 
 2259                 reg = (hash >> 5) & 0x7f;
 2260                 bit = hash & 0x1f;
 2261 
 2262                 hash = CSR_READ(sc, DGE_MTA + (reg << 2));
 2263                 hash |= 1U << bit;
 2264 
 2265                 CSR_WRITE(sc, DGE_MTA + (reg << 2), hash);
 2266 
 2267                 ETHER_NEXT_MULTI(step, enm);
 2268         }
 2269 
 2270         ifp->if_flags &= ~IFF_ALLMULTI;
 2271         goto setit;
 2272 
 2273  allmulti:
 2274         ifp->if_flags |= IFF_ALLMULTI;
 2275         sc->sc_rctl |= RCTL_MPE;
 2276 
 2277  setit:
 2278         CSR_WRITE(sc, DGE_RCTL, sc->sc_rctl);
 2279 }
 2280 
 2281 /*
 2282  * Read in the EEPROM info and verify checksum.
 2283  */
 2284 int
 2285 dge_read_eeprom(struct dge_softc *sc)
 2286 {
 2287         uint16_t cksum;
 2288         int i;
 2289 
 2290         cksum = 0;
 2291         for (i = 0; i < EEPROM_SIZE; i++) {
 2292                 sc->sc_eeprom[i] = dge_eeprom_word(sc, i);
 2293                 cksum += sc->sc_eeprom[i];
 2294         }
 2295         return cksum != EEPROM_CKSUM;
 2296 }
 2297 
 2298 
 2299 /*
 2300  * Read a 16-bit word from address addr in the serial EEPROM.
 2301  */
 2302 uint16_t
 2303 dge_eeprom_word(struct dge_softc *sc, int addr)
 2304 {
 2305         uint32_t reg;
 2306         uint16_t rval = 0;
 2307         int i;
 2308 
 2309         reg = CSR_READ(sc, DGE_EECD) & ~(EECD_SK|EECD_DI|EECD_CS);
 2310 
 2311         /* Lower clock pulse (and data in to chip) */
 2312         CSR_WRITE(sc, DGE_EECD, reg);
 2313         /* Select chip */
 2314         CSR_WRITE(sc, DGE_EECD, reg|EECD_CS);
 2315 
 2316         /* Send read command */
 2317         dge_eeprom_clockout(sc, 1);
 2318         dge_eeprom_clockout(sc, 1);
 2319         dge_eeprom_clockout(sc, 0);
 2320 
 2321         /* Send address */
 2322         for (i = 5; i >= 0; i--)
 2323                 dge_eeprom_clockout(sc, (addr >> i) & 1);
 2324 
 2325         /* Read data */
 2326         for (i = 0; i < 16; i++) {
 2327                 rval <<= 1;
 2328                 rval |= dge_eeprom_clockin(sc);
 2329         }
 2330 
 2331         /* Deselect chip */
 2332         CSR_WRITE(sc, DGE_EECD, reg);
 2333 
 2334         return rval;
 2335 }
 2336 
 2337 /*
 2338  * Clock out a single bit to the EEPROM.
 2339  */
 2340 void
 2341 dge_eeprom_clockout(struct dge_softc *sc, int bit)
 2342 {
 2343         int reg;
 2344 
 2345         reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_SK);
 2346         if (bit)
 2347                 reg |= EECD_DI;
 2348         
 2349         CSR_WRITE(sc, DGE_EECD, reg);
 2350         delay(2);
 2351         CSR_WRITE(sc, DGE_EECD, reg|EECD_SK);
 2352         delay(2);
 2353         CSR_WRITE(sc, DGE_EECD, reg);
 2354         delay(2);
 2355 }
 2356 
 2357 /*
 2358  * Clock in a single bit from EEPROM.
 2359  */
 2360 int
 2361 dge_eeprom_clockin(struct dge_softc *sc)
 2362 {
 2363         int reg, rv;
 2364 
 2365         reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_DO|EECD_SK);
 2366 
 2367         CSR_WRITE(sc, DGE_EECD, reg|EECD_SK); /* Raise clock */
 2368         delay(2);
 2369         rv = (CSR_READ(sc, DGE_EECD) & EECD_DO) != 0; /* Get bit */
 2370         CSR_WRITE(sc, DGE_EECD, reg); /* Lower clock */
 2371         delay(2);
 2372 
 2373         return rv;
 2374 }
 2375 
 2376 static void
 2377 dge_xgmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 2378 {
 2379         struct dge_softc *sc = ifp->if_softc;
 2380 
 2381         ifmr->ifm_status = IFM_AVALID;
 2382         ifmr->ifm_active = IFM_ETHER|IFM_10G_LR;
 2383 
 2384         if (CSR_READ(sc, DGE_STATUS) & STATUS_LINKUP)
 2385                 ifmr->ifm_status |= IFM_ACTIVE;
 2386 }
 2387 
 2388 static inline int
 2389 phwait(struct dge_softc *sc, int p, int r, int d, int type)
 2390 {
 2391         int i, mdic;
 2392 
 2393         CSR_WRITE(sc, DGE_MDIO,
 2394             MDIO_PHY(p) | MDIO_REG(r) | MDIO_DEV(d) | type | MDIO_CMD);
 2395         for (i = 0; i < 10; i++) { 
 2396                 delay(10);
 2397                 if (((mdic = CSR_READ(sc, DGE_MDIO)) & MDIO_CMD) == 0)
 2398                         break;
 2399         }
 2400         return mdic;
 2401 }
 2402 
 2403 
 2404 static void
 2405 dge_xgmii_writereg(struct device *self, int phy, int reg, int val)
 2406 {
 2407         struct dge_softc *sc = (void *) self;
 2408         int mdic;
 2409 
 2410         CSR_WRITE(sc, DGE_MDIRW, val);
 2411         if (((mdic = phwait(sc, phy, reg, 1, MDIO_ADDR)) & MDIO_CMD)) {
 2412                 printf("%s: address cycle timeout; phy %d reg %d\n",
 2413                     sc->sc_dev.dv_xname, phy, reg);
 2414                 return;
 2415         }
 2416         if (((mdic = phwait(sc, phy, reg, 1, MDIO_WRITE)) & MDIO_CMD)) {
 2417                 printf("%s: read cycle timeout; phy %d reg %d\n",
 2418                     sc->sc_dev.dv_xname, phy, reg);
 2419                 return;
 2420         }
 2421 }
 2422 
 2423 static void
 2424 dge_xgmii_reset(struct dge_softc *sc)
 2425 {
 2426         dge_xgmii_writereg((void *)sc, 0, 0, BMCR_RESET);
 2427 }
 2428 
 2429 static int
 2430 dge_xgmii_mediachange(struct ifnet *ifp)
 2431 {
 2432         return 0;
 2433 }
 2434 

Cache object: 64f92a471fba07542e57ca20890b3613


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.