The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/if_bge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_bge.c,v 1.66.2.3 2004/05/29 09:00:24 tron Exp $     */
    2 
    3 /*
    4  * Copyright (c) 2001 Wind River Systems
    5  * Copyright (c) 1997, 1998, 1999, 2001
    6  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Bill Paul.
   19  * 4. Neither the name of the author nor the names of any co-contributors
   20  *    may be used to endorse or promote products derived from this software
   21  *    without specific prior written permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   33  * THE POSSIBILITY OF SUCH DAMAGE.
   34  *
   35  * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
   36  */
   37 
   38 /*
   39  * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
   40  *
   41  * NetBSD version by:
   42  *
   43  *      Frank van der Linden <fvdl@wasabisystems.com>
   44  *      Jason Thorpe <thorpej@wasabisystems.com>
   45  *      Jonathan Stone <jonathan@dsg.stanford.edu>
   46  *
   47  * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com>
   48  * Senior Engineer, Wind River Systems
   49  */
   50 
   51 /*
   52  * The Broadcom BCM5700 is based on technology originally developed by
   53  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
   54  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
   55  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
   56  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
   57  * frames, highly configurable RX filtering, and 16 RX and TX queues
   58  * (which, along with RX filter rules, can be used for QOS applications).
   59  * Other features, such as TCP segmentation, may be available as part
   60  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
   61  * firmware images can be stored in hardware and need not be compiled
   62  * into the driver.
   63  *
   64  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
   65  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
   66  *
   67  * The BCM5701 is a single-chip solution incorporating both the BCM5700
   68  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
   69  * does not support external SSRAM.
   70  *
   71  * Broadcom also produces a variation of the BCM5700 under the "Altima"
   72  * brand name, which is functionally similar but lacks PCI-X support.
   73  *
   74  * Without external SSRAM, you can only have at most 4 TX rings,
   75  * and the use of the mini RX ring is disabled. This seems to imply
   76  * that these features are simply not available on the BCM5701. As a
   77  * result, this driver does not implement any support for the mini RX
   78  * ring.
   79  */
   80 
   81 #include <sys/cdefs.h>
   82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.66.2.3 2004/05/29 09:00:24 tron Exp $");
   83 
   84 #include "bpfilter.h"
   85 #include "vlan.h"
   86 
   87 #include <sys/param.h>
   88 #include <sys/systm.h>
   89 #include <sys/callout.h>
   90 #include <sys/sockio.h>
   91 #include <sys/mbuf.h>
   92 #include <sys/malloc.h>
   93 #include <sys/kernel.h>
   94 #include <sys/device.h>
   95 #include <sys/socket.h>
   96 #include <sys/sysctl.h>
   97 
   98 #include <net/if.h>
   99 #include <net/if_dl.h>
  100 #include <net/if_media.h>
  101 #include <net/if_ether.h>
  102 
  103 #ifdef INET
  104 #include <netinet/in.h>
  105 #include <netinet/in_systm.h>
  106 #include <netinet/in_var.h>
  107 #include <netinet/ip.h>
  108 #endif
  109 
  110 #if NBPFILTER > 0
  111 #include <net/bpf.h>
  112 #endif
  113 
  114 #include <dev/pci/pcireg.h>
  115 #include <dev/pci/pcivar.h>
  116 #include <dev/pci/pcidevs.h>
  117 
  118 #include <dev/mii/mii.h>
  119 #include <dev/mii/miivar.h>
  120 #include <dev/mii/miidevs.h>
  121 #include <dev/mii/brgphyreg.h>
  122 
  123 #include <dev/pci/if_bgereg.h>
  124 
  125 #include <uvm/uvm_extern.h>
  126 
  127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
  128 
  129 
  130 /*
  131  * Tunable thresholds for rx-side bge interrupt mitigation.
  132  */
  133 
  134 /*
  135  * The pairs of values below were obtained from empirical measurement
  136  * on bcm5700 rev B2; they ar designed to give roughly 1 receive
  137  * interrupt for every N packets received, where N is, approximately,
  138  * the second value (rx_max_bds) in each pair.  The values are chosen
  139  * such that moving from one pair to the succeeding pair was observed
  140  * to roughly halve interrupt rate under sustained input packet load.
  141  * The values were empirically chosen to avoid overflowing internal
  142  * limits on the  bcm5700: inreasing rx_ticks much beyond 600
  143  * results in internal wrapping and higher interrupt rates.
  144  * The limit of 46 frames was chosen to match NFS workloads.
  145  * 
  146  * These values also work well on bcm5701, bcm5704C, and (less
  147  * tested) bcm5703.  On other chipsets, (including the Altima chip
  148  * family), the larger values may overflow internal chip limits,
  149  * leading to increasing interrupt rates rather than lower interrupt
  150  * rates.
  151  *
  152  * Applications using heavy interrupt mitigation (interrupting every
  153  * 32 or 46 frames) in both directions may need to increase the TCP
  154  * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
  155  * full link bandwidth, due to ACKs and window updates lingering 
  156  * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
  157  */
  158 struct bge_load_rx_thresh {
  159         int rx_ticks;
  160         int rx_max_bds; }
  161 bge_rx_threshes[] = {
  162         { 32,   2 },
  163         { 50,   4 },
  164         { 100,  8 },
  165         { 192, 16 },
  166         { 416, 32 },
  167         { 598, 46 }
  168 };
  169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
  170 
  171 /* XXX patchable; should be sysctl'able */
  172 static int      bge_auto_thresh = 1;
  173 static int      bge_rx_thresh_lvl;
  174 
  175 #ifdef __NetBSD__
  176 static int bge_rxthresh_nodenum;
  177 #endif /* __NetBSD__ */
  178 
  179 int bge_probe(struct device *, struct cfdata *, void *);
  180 void bge_attach(struct device *, struct device *, void *);
  181 void bge_release_resources(struct bge_softc *);
  182 void bge_txeof(struct bge_softc *);
  183 void bge_rxeof(struct bge_softc *);
  184 
  185 void bge_tick(void *);
  186 void bge_stats_update(struct bge_softc *);
  187 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
  188 static __inline int bge_cksum_pad(struct mbuf *pkt);
  189 static __inline int bge_compact_dma_runt(struct mbuf *pkt);
  190 
  191 int bge_intr(void *);
  192 void bge_start(struct ifnet *);
  193 int bge_ioctl(struct ifnet *, u_long, caddr_t);
  194 int bge_init(struct ifnet *);
  195 void bge_stop(struct bge_softc *);
  196 void bge_watchdog(struct ifnet *);
  197 void bge_shutdown(void *);
  198 int bge_ifmedia_upd(struct ifnet *);
  199 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  200 
  201 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
  202 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
  203 
  204 void bge_setmulti(struct bge_softc *);
  205 
  206 void bge_handle_events(struct bge_softc *);
  207 int bge_alloc_jumbo_mem(struct bge_softc *);
  208 void bge_free_jumbo_mem(struct bge_softc *);
  209 void *bge_jalloc(struct bge_softc *);
  210 void bge_jfree(struct mbuf *, caddr_t, size_t, void *);
  211 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
  212 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
  213 int bge_init_rx_ring_std(struct bge_softc *);
  214 void bge_free_rx_ring_std(struct bge_softc *);
  215 int bge_init_rx_ring_jumbo(struct bge_softc *);
  216 void bge_free_rx_ring_jumbo(struct bge_softc *);
  217 void bge_free_tx_ring(struct bge_softc *);
  218 int bge_init_tx_ring(struct bge_softc *);
  219 
  220 int bge_chipinit(struct bge_softc *);
  221 int bge_blockinit(struct bge_softc *);
  222 int bge_setpowerstate(struct bge_softc *, int);
  223 
  224 #ifdef notdef
  225 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
  226 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
  227 void bge_vpd_read(struct bge_softc *);
  228 #endif
  229 
  230 u_int32_t bge_readmem_ind(struct bge_softc *, int);
  231 void bge_writemem_ind(struct bge_softc *, int, int);
  232 #ifdef notdef
  233 u_int32_t bge_readreg_ind(struct bge_softc *, int);
  234 #endif
  235 void bge_writereg_ind(struct bge_softc *, int, int);
  236 
  237 int bge_miibus_readreg(struct device *, int, int);
  238 void bge_miibus_writereg(struct device *, int, int, int);
  239 void bge_miibus_statchg(struct device *);
  240 
  241 void bge_reset(struct bge_softc *);
  242 
  243 void    bge_set_thresh(struct ifnet *  /*ifp*/, int /*lvl*/);
  244 void    bge_update_all_threshes(int /*lvl*/);
  245 
  246 void bge_dump_status(struct bge_softc *);
  247 void bge_dump_rxbd(struct bge_rx_bd *);
  248 
  249 #define BGE_DEBUG
  250 #ifdef BGE_DEBUG
  251 #define DPRINTF(x)      if (bgedebug) printf x
  252 #define DPRINTFN(n,x)   if (bgedebug >= (n)) printf x
  253 int     bgedebug = 0;
  254 #else
  255 #define DPRINTF(x)
  256 #define DPRINTFN(n,x)
  257 #endif
  258 
  259 /* Various chip quirks. */
  260 #define BGE_QUIRK_LINK_STATE_BROKEN     0x00000001
  261 #define BGE_QUIRK_CSUM_BROKEN           0x00000002
  262 #define BGE_QUIRK_ONLY_PHY_1            0x00000004
  263 #define BGE_QUIRK_5700_SMALLDMA         0x00000008
  264 #define BGE_QUIRK_5700_PCIX_REG_BUG     0x00000010
  265 #define BGE_QUIRK_PRODUCER_BUG          0x00000020
  266 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG    0x00000040
  267 #define BGE_QUIRK_5705_CORE             0x00000080
  268 #define BGE_QUIRK_FEWER_MBUFS           0x00000100
  269 
  270 /* following bugs are common to bcm5700 rev B, all flavours */
  271 #define BGE_QUIRK_5700_COMMON \
  272         (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
  273 
  274 CFATTACH_DECL(bge, sizeof(struct bge_softc),
  275     bge_probe, bge_attach, NULL, NULL);
  276 
  277 u_int32_t
  278 bge_readmem_ind(sc, off)
  279         struct bge_softc *sc;
  280         int off;
  281 {
  282         struct pci_attach_args  *pa = &(sc->bge_pa);
  283         pcireg_t val;
  284 
  285         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
  286         val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
  287         return val;
  288 }
  289 
  290 void
  291 bge_writemem_ind(sc, off, val)
  292         struct bge_softc *sc;
  293         int off, val;
  294 {
  295         struct pci_attach_args  *pa = &(sc->bge_pa);
  296 
  297         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
  298         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
  299 }
  300 
  301 #ifdef notdef
  302 u_int32_t
  303 bge_readreg_ind(sc, off)
  304         struct bge_softc *sc;
  305         int off;
  306 {
  307         struct pci_attach_args  *pa = &(sc->bge_pa);
  308 
  309         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
  310         return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
  311 }
  312 #endif
  313 
  314 void
  315 bge_writereg_ind(sc, off, val)
  316         struct bge_softc *sc;
  317         int off, val;
  318 {
  319         struct pci_attach_args  *pa = &(sc->bge_pa);
  320 
  321         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
  322         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
  323 }
  324 
  325 #ifdef notdef
  326 u_int8_t
  327 bge_vpd_readbyte(sc, addr)
  328         struct bge_softc *sc;
  329         int addr;
  330 {
  331         int i;
  332         u_int32_t val;
  333         struct pci_attach_args  *pa = &(sc->bge_pa);
  334 
  335         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
  336         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
  337                 DELAY(10);
  338                 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
  339                     BGE_VPD_FLAG)
  340                         break;
  341         }
  342 
  343         if (i == BGE_TIMEOUT) {
  344                 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
  345                 return(0);
  346         }
  347 
  348         val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
  349 
  350         return((val >> ((addr % 4) * 8)) & 0xFF);
  351 }
  352 
  353 void
  354 bge_vpd_read_res(sc, res, addr)
  355         struct bge_softc *sc;
  356         struct vpd_res *res;
  357         int addr;
  358 {
  359         int i;
  360         u_int8_t *ptr;
  361 
  362         ptr = (u_int8_t *)res;
  363         for (i = 0; i < sizeof(struct vpd_res); i++)
  364                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
  365 }
  366 
  367 void
  368 bge_vpd_read(sc)
  369         struct bge_softc *sc;
  370 {
  371         int pos = 0, i;
  372         struct vpd_res res;
  373 
  374         if (sc->bge_vpd_prodname != NULL)
  375                 free(sc->bge_vpd_prodname, M_DEVBUF);
  376         if (sc->bge_vpd_readonly != NULL)
  377                 free(sc->bge_vpd_readonly, M_DEVBUF);
  378         sc->bge_vpd_prodname = NULL;
  379         sc->bge_vpd_readonly = NULL;
  380 
  381         bge_vpd_read_res(sc, &res, pos);
  382 
  383         if (res.vr_id != VPD_RES_ID) {
  384                 printf("%s: bad VPD resource id: expected %x got %x\n",
  385                         sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
  386                 return;
  387         }
  388 
  389         pos += sizeof(res);
  390         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
  391         if (sc->bge_vpd_prodname == NULL)
  392                 panic("bge_vpd_read");
  393         for (i = 0; i < res.vr_len; i++)
  394                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
  395         sc->bge_vpd_prodname[i] = '\0';
  396         pos += i;
  397 
  398         bge_vpd_read_res(sc, &res, pos);
  399 
  400         if (res.vr_id != VPD_RES_READ) {
  401                 printf("%s: bad VPD resource id: expected %x got %x\n",
  402                     sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
  403                 return;
  404         }
  405 
  406         pos += sizeof(res);
  407         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
  408         if (sc->bge_vpd_readonly == NULL)
  409                 panic("bge_vpd_read");
  410         for (i = 0; i < res.vr_len + 1; i++)
  411                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
  412 }
  413 #endif
  414 
  415 /*
  416  * Read a byte of data stored in the EEPROM at address 'addr.' The
  417  * BCM570x supports both the traditional bitbang interface and an
  418  * auto access interface for reading the EEPROM. We use the auto
  419  * access method.
  420  */
  421 u_int8_t
  422 bge_eeprom_getbyte(sc, addr, dest)
  423         struct bge_softc *sc;
  424         int addr;
  425         u_int8_t *dest;
  426 {
  427         int i;
  428         u_int32_t byte = 0;
  429 
  430         /*
  431          * Enable use of auto EEPROM access so we can avoid
  432          * having to use the bitbang method.
  433          */
  434         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
  435 
  436         /* Reset the EEPROM, load the clock period. */
  437         CSR_WRITE_4(sc, BGE_EE_ADDR,
  438             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
  439         DELAY(20);
  440 
  441         /* Issue the read EEPROM command. */
  442         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
  443 
  444         /* Wait for completion */
  445         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
  446                 DELAY(10);
  447                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
  448                         break;
  449         }
  450 
  451         if (i == BGE_TIMEOUT) {
  452                 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
  453                 return(0);
  454         }
  455 
  456         /* Get result. */
  457         byte = CSR_READ_4(sc, BGE_EE_DATA);
  458 
  459         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
  460 
  461         return(0);
  462 }
  463 
  464 /*
  465  * Read a sequence of bytes from the EEPROM.
  466  */
  467 int
  468 bge_read_eeprom(sc, dest, off, cnt)
  469         struct bge_softc *sc;
  470         caddr_t dest;
  471         int off;
  472         int cnt;
  473 {
  474         int err = 0, i;
  475         u_int8_t byte = 0;
  476 
  477         for (i = 0; i < cnt; i++) {
  478                 err = bge_eeprom_getbyte(sc, off + i, &byte);
  479                 if (err)
  480                         break;
  481                 *(dest + i) = byte;
  482         }
  483 
  484         return(err ? 1 : 0);
  485 }
  486 
  487 int
  488 bge_miibus_readreg(dev, phy, reg)
  489         struct device *dev;
  490         int phy, reg;
  491 {
  492         struct bge_softc *sc = (struct bge_softc *)dev;
  493         u_int32_t val;
  494         u_int32_t saved_autopoll;
  495         int i;
  496 
  497         /*
  498          * Several chips with builtin PHYs will incorrectly answer to
  499          * other PHY instances than the builtin PHY at id 1.
  500          */
  501         if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
  502                 return(0);
  503 
  504         /* Reading with autopolling on may trigger PCI errors */
  505         saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  506         if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
  507                 CSR_WRITE_4(sc, BGE_MI_MODE,
  508                     saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
  509                 DELAY(40);
  510         }
  511 
  512         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
  513             BGE_MIPHY(phy)|BGE_MIREG(reg));
  514 
  515         for (i = 0; i < BGE_TIMEOUT; i++) {
  516                 val = CSR_READ_4(sc, BGE_MI_COMM);
  517                 if (!(val & BGE_MICOMM_BUSY))
  518                         break;
  519                 delay(10);
  520         }
  521 
  522         if (i == BGE_TIMEOUT) {
  523                 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
  524                 val = 0;
  525                 goto done;
  526         }
  527 
  528         val = CSR_READ_4(sc, BGE_MI_COMM);
  529 
  530 done:
  531         if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
  532                 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
  533                 DELAY(40);
  534         }
  535 
  536         if (val & BGE_MICOMM_READFAIL)
  537                 return(0);
  538 
  539         return(val & 0xFFFF);
  540 }
  541 
  542 void
  543 bge_miibus_writereg(dev, phy, reg, val)
  544         struct device *dev;
  545         int phy, reg, val;
  546 {
  547         struct bge_softc *sc = (struct bge_softc *)dev;
  548         u_int32_t saved_autopoll;
  549         int i;
  550 
  551         /* Touching the PHY while autopolling is on may trigger PCI errors */
  552         saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  553         if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
  554                 delay(40);
  555                 CSR_WRITE_4(sc, BGE_MI_MODE,
  556                     saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
  557                 delay(10); /* 40 usec is supposed to be adequate */
  558         }
  559 
  560         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
  561             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
  562 
  563         for (i = 0; i < BGE_TIMEOUT; i++) {
  564                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
  565                         break;
  566                 delay(10);
  567         }
  568 
  569         if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
  570                 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
  571                 delay(40);
  572         }
  573 
  574         if (i == BGE_TIMEOUT) {
  575                 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
  576         }
  577 }
  578 
  579 void
  580 bge_miibus_statchg(dev)
  581         struct device *dev;
  582 {
  583         struct bge_softc *sc = (struct bge_softc *)dev;
  584         struct mii_data *mii = &sc->bge_mii;
  585 
  586         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
  587         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
  588                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
  589         } else {
  590                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
  591         }
  592 
  593         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  594                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  595         } else {
  596                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  597         }
  598 }
  599 
  600 /*
  601  * Update rx threshold levels to values in a particular slot
  602  * of the interrupt-mitigation table bge_rx_threshes.
  603  */
  604 void
  605 bge_set_thresh(struct ifnet *ifp, int lvl)
  606 {
  607         struct bge_softc *sc = ifp->if_softc;
  608         int s;
  609 
  610         /* For now, just save the new Rx-intr thresholds and record
  611          * that a threshold update is pending.  Updating the hardware
  612          * registers here (even at splhigh()) is observed to
  613          * occasionaly cause glitches where Rx-interrupts are not
  614          * honoured for up to 10 seconds. jonathan@netbsd.org, 2003-04-05
  615          */
  616         s = splnet();
  617         sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
  618         sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
  619         sc->bge_pending_rxintr_change = 1;
  620         splx(s);
  621 
  622          return;
  623 }
  624 
  625 
  626 /*
  627  * Update Rx thresholds of all bge devices
  628  */
  629 void
  630 bge_update_all_threshes(int lvl)
  631 {
  632         struct ifnet *ifp;
  633         const char * const namebuf = "bge";
  634         int namelen;
  635 
  636         if (lvl < 0)
  637                 lvl = 0;
  638         else if( lvl >= NBGE_RX_THRESH)
  639                 lvl = NBGE_RX_THRESH - 1;
  640     
  641         namelen = strlen(namebuf);
  642         /*
  643          * Now search all the interfaces for this name/number
  644          */
  645         TAILQ_FOREACH(ifp, &ifnet, if_list) {
  646                 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
  647                       continue;
  648                 /* We got a match: update if doing auto-threshold-tuning */
  649                 if (bge_auto_thresh)
  650                         bge_set_thresh(ifp, lvl);
  651         }
  652 }
  653 
  654 /*
  655  * Handle events that have triggered interrupts.
  656  */
  657 void
  658 bge_handle_events(sc)
  659         struct bge_softc                *sc;
  660 {
  661 
  662         return;
  663 }
  664 
  665 /*
  666  * Memory management for jumbo frames.
  667  */
  668 
  669 int
  670 bge_alloc_jumbo_mem(sc)
  671         struct bge_softc                *sc;
  672 {
  673         caddr_t                 ptr, kva;
  674         bus_dma_segment_t       seg;
  675         int             i, rseg, state, error;
  676         struct bge_jpool_entry   *entry;
  677 
  678         state = error = 0;
  679 
  680         /* Grab a big chunk o' storage. */
  681         if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
  682              &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
  683                 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
  684                 return ENOBUFS;
  685         }
  686 
  687         state = 1;
  688         if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
  689             BUS_DMA_NOWAIT)) {
  690                 printf("%s: can't map DMA buffers (%d bytes)\n",
  691                     sc->bge_dev.dv_xname, (int)BGE_JMEM);
  692                 error = ENOBUFS;
  693                 goto out;
  694         }
  695 
  696         state = 2;
  697         if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
  698             BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
  699                 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname);
  700                 error = ENOBUFS;
  701                 goto out;
  702         }
  703 
  704         state = 3;
  705         if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
  706             kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
  707                 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname);
  708                 error = ENOBUFS;
  709                 goto out;
  710         }
  711 
  712         state = 4;
  713         sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
  714         DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
  715 
  716         SLIST_INIT(&sc->bge_jfree_listhead);
  717         SLIST_INIT(&sc->bge_jinuse_listhead);
  718 
  719         /*
  720          * Now divide it up into 9K pieces and save the addresses
  721          * in an array.
  722          */
  723         ptr = sc->bge_cdata.bge_jumbo_buf;
  724         for (i = 0; i < BGE_JSLOTS; i++) {
  725                 sc->bge_cdata.bge_jslots[i] = ptr;
  726                 ptr += BGE_JLEN;
  727                 entry = malloc(sizeof(struct bge_jpool_entry),
  728                     M_DEVBUF, M_NOWAIT);
  729                 if (entry == NULL) {
  730                         printf("%s: no memory for jumbo buffer queue!\n",
  731                             sc->bge_dev.dv_xname);
  732                         error = ENOBUFS;
  733                         goto out;
  734                 }
  735                 entry->slot = i;
  736                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
  737                                  entry, jpool_entries);
  738         }
  739 out:
  740         if (error != 0) {
  741                 switch (state) {
  742                 case 4:
  743                         bus_dmamap_unload(sc->bge_dmatag,
  744                             sc->bge_cdata.bge_rx_jumbo_map);
  745                 case 3:
  746                         bus_dmamap_destroy(sc->bge_dmatag,
  747                             sc->bge_cdata.bge_rx_jumbo_map);
  748                 case 2:
  749                         bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
  750                 case 1:
  751                         bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
  752                         break;
  753                 default:
  754                         break;
  755                 }
  756         }
  757 
  758         return error;
  759 }
  760 
  761 /*
  762  * Allocate a jumbo buffer.
  763  */
  764 void *
  765 bge_jalloc(sc)
  766         struct bge_softc                *sc;
  767 {
  768         struct bge_jpool_entry   *entry;
  769 
  770         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  771 
  772         if (entry == NULL) {
  773                 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
  774                 return(NULL);
  775         }
  776 
  777         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  778         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
  779         return(sc->bge_cdata.bge_jslots[entry->slot]);
  780 }
  781 
  782 /*
  783  * Release a jumbo buffer.
  784  */
  785 void
  786 bge_jfree(m, buf, size, arg)
  787         struct mbuf     *m;
  788         caddr_t         buf;
  789         size_t          size;
  790         void            *arg;
  791 {
  792         struct bge_jpool_entry *entry;
  793         struct bge_softc *sc;
  794         int i, s;
  795 
  796         /* Extract the softc struct pointer. */
  797         sc = (struct bge_softc *)arg;
  798 
  799         if (sc == NULL)
  800                 panic("bge_jfree: can't find softc pointer!");
  801 
  802         /* calculate the slot this buffer belongs to */
  803 
  804         i = ((caddr_t)buf
  805              - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
  806 
  807         if ((i < 0) || (i >= BGE_JSLOTS))
  808                 panic("bge_jfree: asked to free buffer that we don't manage!");
  809 
  810         s = splvm();
  811         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
  812         if (entry == NULL)
  813                 panic("bge_jfree: buffer not in use!");
  814         entry->slot = i;
  815         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
  816         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
  817 
  818         if (__predict_true(m != NULL))
  819                 pool_cache_put(&mbpool_cache, m);
  820         splx(s);
  821 }
  822 
  823 
  824 /*
  825  * Intialize a standard receive ring descriptor.
  826  */
  827 int
  828 bge_newbuf_std(sc, i, m, dmamap)
  829         struct bge_softc        *sc;
  830         int                     i;
  831         struct mbuf             *m;
  832         bus_dmamap_t dmamap;
  833 {
  834         struct mbuf             *m_new = NULL;
  835         struct bge_rx_bd        *r;
  836         int                     error;
  837 
  838         if (dmamap == NULL) {
  839                 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
  840                     MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
  841                 if (error != 0)
  842                         return error;
  843         }
  844 
  845         sc->bge_cdata.bge_rx_std_map[i] = dmamap;
  846 
  847         if (m == NULL) {
  848                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  849                 if (m_new == NULL) {
  850                         return(ENOBUFS);
  851                 }
  852 
  853                 MCLGET(m_new, M_DONTWAIT);
  854                 if (!(m_new->m_flags & M_EXT)) {
  855                         m_freem(m_new);
  856                         return(ENOBUFS);
  857                 }
  858                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  859                 if (!sc->bge_rx_alignment_bug)
  860                     m_adj(m_new, ETHER_ALIGN);
  861 
  862                 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
  863                     BUS_DMA_READ|BUS_DMA_NOWAIT))
  864                         return(ENOBUFS);
  865         } else {
  866                 m_new = m;
  867                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  868                 m_new->m_data = m_new->m_ext.ext_buf;
  869                 if (!sc->bge_rx_alignment_bug)
  870                     m_adj(m_new, ETHER_ALIGN);
  871         }
  872 
  873         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
  874         r = &sc->bge_rdata->bge_rx_std_ring[i];
  875         bge_set_hostaddr(&r->bge_addr,
  876             dmamap->dm_segs[0].ds_addr);
  877         r->bge_flags = BGE_RXBDFLAG_END;
  878         r->bge_len = m_new->m_len;
  879         r->bge_idx = i;
  880 
  881         bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
  882             offsetof(struct bge_ring_data, bge_rx_std_ring) +
  883                 i * sizeof (struct bge_rx_bd),
  884             sizeof (struct bge_rx_bd),
  885             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
  886 
  887         return(0);
  888 }
  889 
  890 /*
  891  * Initialize a jumbo receive ring descriptor. This allocates
  892  * a jumbo buffer from the pool managed internally by the driver.
  893  */
  894 int
  895 bge_newbuf_jumbo(sc, i, m)
  896         struct bge_softc *sc;
  897         int i;
  898         struct mbuf *m;
  899 {
  900         struct mbuf *m_new = NULL;
  901         struct bge_rx_bd *r;
  902 
  903         if (m == NULL) {
  904                 caddr_t                 *buf = NULL;
  905 
  906                 /* Allocate the mbuf. */
  907                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  908                 if (m_new == NULL) {
  909                         return(ENOBUFS);
  910                 }
  911 
  912                 /* Allocate the jumbo buffer */
  913                 buf = bge_jalloc(sc);
  914                 if (buf == NULL) {
  915                         m_freem(m_new);
  916                         printf("%s: jumbo allocation failed "
  917                             "-- packet dropped!\n", sc->bge_dev.dv_xname);
  918                         return(ENOBUFS);
  919                 }
  920 
  921                 /* Attach the buffer to the mbuf. */
  922                 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
  923                 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
  924                     bge_jfree, sc);
  925         } else {
  926                 m_new = m;
  927                 m_new->m_data = m_new->m_ext.ext_buf;
  928                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
  929         }
  930 
  931         if (!sc->bge_rx_alignment_bug)
  932             m_adj(m_new, ETHER_ALIGN);
  933         /* Set up the descriptor. */
  934         r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
  935         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
  936         bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
  937         r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
  938         r->bge_len = m_new->m_len;
  939         r->bge_idx = i;
  940 
  941         bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
  942             offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
  943                 i * sizeof (struct bge_rx_bd),
  944             sizeof (struct bge_rx_bd),
  945             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
  946 
  947         return(0);
  948 }
  949 
  950 /*
  951  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
  952  * that's 1MB or memory, which is a lot. For now, we fill only the first
  953  * 256 ring entries and hope that our CPU is fast enough to keep up with
  954  * the NIC.
  955  */
  956 int
  957 bge_init_rx_ring_std(sc)
  958         struct bge_softc *sc;
  959 {
  960         int i;
  961 
  962         if (sc->bge_flags & BGE_RXRING_VALID)
  963                 return 0;
  964 
  965         for (i = 0; i < BGE_SSLOTS; i++) {
  966                 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
  967                         return(ENOBUFS);
  968         }
  969 
  970         sc->bge_std = i - 1;
  971         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
  972 
  973         sc->bge_flags |= BGE_RXRING_VALID;
  974 
  975         return(0);
  976 }
  977 
  978 void
  979 bge_free_rx_ring_std(sc)
  980         struct bge_softc *sc;
  981 {
  982         int i;
  983 
  984         if (!(sc->bge_flags & BGE_RXRING_VALID))
  985                 return;
  986 
  987         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
  988                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
  989                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
  990                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
  991                         bus_dmamap_destroy(sc->bge_dmatag, 
  992                             sc->bge_cdata.bge_rx_std_map[i]);
  993                 }
  994                 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
  995                     sizeof(struct bge_rx_bd));
  996         }
  997 
  998         sc->bge_flags &= ~BGE_RXRING_VALID;
  999 }
 1000 
 1001 int
 1002 bge_init_rx_ring_jumbo(sc)
 1003         struct bge_softc *sc;
 1004 {
 1005         int i;
 1006         volatile struct bge_rcb *rcb;
 1007 
 1008         if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
 1009                 return 0;
 1010 
 1011         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1012                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
 1013                         return(ENOBUFS);
 1014         };
 1015 
 1016         sc->bge_jumbo = i - 1;
 1017         sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
 1018 
 1019         rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
 1020         rcb->bge_maxlen_flags = 0;
 1021         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1022 
 1023         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 1024 
 1025         return(0);
 1026 }
 1027 
 1028 void
 1029 bge_free_rx_ring_jumbo(sc)
 1030         struct bge_softc *sc;
 1031 {
 1032         int i;
 1033 
 1034         if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
 1035                 return;
 1036 
 1037         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1038                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
 1039                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
 1040                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
 1041                 }
 1042                 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
 1043                     sizeof(struct bge_rx_bd));
 1044         }
 1045 
 1046         sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
 1047 }
 1048 
 1049 void
 1050 bge_free_tx_ring(sc)
 1051         struct bge_softc *sc;
 1052 {
 1053         int i, freed;
 1054         struct txdmamap_pool_entry *dma;
 1055 
 1056         if (!(sc->bge_flags & BGE_TXRING_VALID))
 1057                 return;
 1058 
 1059         freed = 0;
 1060 
 1061         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1062                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
 1063                         freed++;
 1064                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
 1065                         sc->bge_cdata.bge_tx_chain[i] = NULL;
 1066                         SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
 1067                                             link);
 1068                         sc->txdma[i] = 0;
 1069                 }
 1070                 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
 1071                     sizeof(struct bge_tx_bd));
 1072         }
 1073 
 1074         while ((dma = SLIST_FIRST(&sc->txdma_list))) {
 1075                 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
 1076                 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
 1077                 free(dma, M_DEVBUF);
 1078         }
 1079 
 1080         sc->bge_flags &= ~BGE_TXRING_VALID;
 1081 }
 1082 
 1083 int
 1084 bge_init_tx_ring(sc)
 1085         struct bge_softc *sc;
 1086 {
 1087         int i;
 1088         bus_dmamap_t dmamap;
 1089         struct txdmamap_pool_entry *dma;
 1090 
 1091         if (sc->bge_flags & BGE_TXRING_VALID)
 1092                 return 0;
 1093 
 1094         sc->bge_txcnt = 0;
 1095         sc->bge_tx_saved_considx = 0;
 1096         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1097         if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)    /* 5700 b2 errata */
 1098                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1099 
 1100         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1101         if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)    /* 5700 b2 errata */
 1102                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1103 
 1104         SLIST_INIT(&sc->txdma_list);
 1105         for (i = 0; i < BGE_RSLOTS; i++) {
 1106                 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
 1107                     BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
 1108                     &dmamap))
 1109                         return(ENOBUFS);
 1110                 if (dmamap == NULL)
 1111                         panic("dmamap NULL in bge_init_tx_ring");
 1112                 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
 1113                 if (dma == NULL) {
 1114                         printf("%s: can't alloc txdmamap_pool_entry\n",
 1115                             sc->bge_dev.dv_xname);
 1116                         bus_dmamap_destroy(sc->bge_dmatag, dmamap);
 1117                         return (ENOMEM);
 1118                 }
 1119                 dma->dmamap = dmamap;
 1120                 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
 1121         }
 1122 
 1123         sc->bge_flags |= BGE_TXRING_VALID;
 1124 
 1125         return(0);
 1126 }
 1127 
 1128 void
 1129 bge_setmulti(sc)
 1130         struct bge_softc *sc;
 1131 {
 1132         struct ethercom         *ac = &sc->ethercom;
 1133         struct ifnet            *ifp = &ac->ec_if;
 1134         struct ether_multi      *enm;
 1135         struct ether_multistep  step;
 1136         u_int32_t               hashes[4] = { 0, 0, 0, 0 };
 1137         u_int32_t               h;
 1138         int                     i;
 1139 
 1140         if (ifp->if_flags & IFF_PROMISC)
 1141                 goto allmulti;
 1142 
 1143         /* Now program new ones. */
 1144         ETHER_FIRST_MULTI(step, ac, enm);
 1145         while (enm != NULL) {
 1146                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 1147                         /*
 1148                          * We must listen to a range of multicast addresses.
 1149                          * For now, just accept all multicasts, rather than
 1150                          * trying to set only those filter bits needed to match
 1151                          * the range.  (At this time, the only use of address
 1152                          * ranges is for IP multicast routing, for which the
 1153                          * range is big enough to require all bits set.)
 1154                          */
 1155                         goto allmulti;
 1156                 }
 1157 
 1158                 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
 1159 
 1160                 /* Just want the 7 least-significant bits. */
 1161                 h &= 0x7f;
 1162 
 1163                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
 1164                 ETHER_NEXT_MULTI(step, enm);
 1165         }
 1166 
 1167         ifp->if_flags &= ~IFF_ALLMULTI;
 1168         goto setit;
 1169 
 1170  allmulti:
 1171         ifp->if_flags |= IFF_ALLMULTI;
 1172         hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
 1173 
 1174  setit:
 1175         for (i = 0; i < 4; i++)
 1176                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
 1177 }
 1178 
 1179 const int bge_swapbits[] = {
 1180         0,
 1181         BGE_MODECTL_BYTESWAP_DATA,
 1182         BGE_MODECTL_WORDSWAP_DATA,
 1183         BGE_MODECTL_BYTESWAP_NONFRAME,
 1184         BGE_MODECTL_WORDSWAP_NONFRAME,
 1185 
 1186         BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
 1187         BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
 1188         BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
 1189 
 1190         BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
 1191         BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
 1192 
 1193         BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
 1194 
 1195         BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
 1196             BGE_MODECTL_BYTESWAP_NONFRAME,
 1197         BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
 1198             BGE_MODECTL_WORDSWAP_NONFRAME,
 1199         BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
 1200             BGE_MODECTL_WORDSWAP_NONFRAME,
 1201         BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
 1202             BGE_MODECTL_WORDSWAP_NONFRAME,
 1203 
 1204         BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
 1205             BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
 1206 };
 1207 
 1208 int bge_swapindex = 0;
 1209 
 1210 /*
 1211  * Do endian, PCI and DMA initialization. Also check the on-board ROM
 1212  * self-test results.
 1213  */
 1214 int
 1215 bge_chipinit(sc)
 1216         struct bge_softc *sc;
 1217 {
 1218         u_int32_t               cachesize;
 1219         int                     i;
 1220         u_int32_t               dma_rw_ctl;
 1221         struct pci_attach_args  *pa = &(sc->bge_pa);
 1222 
 1223 
 1224         /* Set endianness before we access any non-PCI registers. */
 1225         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
 1226             BGE_INIT);
 1227 
 1228         /* Set power state to D0. */
 1229         bge_setpowerstate(sc, 0);
 1230         
 1231         /*
 1232          * Check the 'ROM failed' bit on the RX CPU to see if
 1233          * self-tests passed.
 1234          */
 1235         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
 1236                 printf("%s: RX CPU self-diagnostics failed!\n",
 1237                     sc->bge_dev.dv_xname);
 1238                 return(ENODEV);
 1239         }
 1240 
 1241         /* Clear the MAC control register */
 1242         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 1243 
 1244         /*
 1245          * Clear the MAC statistics block in the NIC's
 1246          * internal memory.
 1247          */
 1248         for (i = BGE_STATS_BLOCK;
 1249             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1250                 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
 1251 
 1252         for (i = BGE_STATUS_BLOCK;
 1253             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1254                 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
 1255 
 1256         /* Set up the PCI DMA control register. */
 1257         if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
 1258             BGE_PCISTATE_PCI_BUSMODE) {
 1259                 /* Conventional PCI bus */
 1260                 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname));
 1261                 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
 1262                    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1263                    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
 1264                 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1265                         dma_rw_ctl |= 0x0F;
 1266                 }
 1267         } else {
 1268                 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname));
 1269                 /* PCI-X bus */
 1270                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1271                     (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1272                     (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1273                     (0x0F);
 1274                 /*
 1275                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
 1276                  * for hardware bugs, which means we should also clear
 1277                  * the low-order MINDMA bits.  In addition, the 5704
 1278                  * uses a different encoding of read/write watermarks.
 1279                  */
 1280                 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
 1281                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1282                           /* should be 0x1f0000 */
 1283                           (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1284                           (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1285                         dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
 1286                 }
 1287                 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
 1288                         dma_rw_ctl &=  0xfffffff0;
 1289                         dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
 1290                 }
 1291         }
 1292 
 1293         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
 1294 
 1295         /*
 1296          * Set up general mode register.
 1297          */
 1298         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
 1299                     BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
 1300                     BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
 1301 
 1302         /* Get cache line size. */
 1303         cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
 1304 
 1305         /*
 1306          * Avoid violating PCI spec on certain chip revs.
 1307          */
 1308         if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
 1309             PCIM_CMD_MWIEN) {
 1310                 switch(cachesize) {
 1311                 case 1:
 1312                         PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
 1313                                    BGE_PCI_WRITE_BNDRY_16BYTES);
 1314                         break;
 1315                 case 2:
 1316                         PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
 1317                                    BGE_PCI_WRITE_BNDRY_32BYTES);
 1318                         break;
 1319                 case 4:
 1320                         PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
 1321                                    BGE_PCI_WRITE_BNDRY_64BYTES);
 1322                         break;
 1323                 case 8:
 1324                         PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
 1325                                    BGE_PCI_WRITE_BNDRY_128BYTES);
 1326                         break;
 1327                 case 16:
 1328                         PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
 1329                                    BGE_PCI_WRITE_BNDRY_256BYTES);
 1330                         break;
 1331                 case 32:
 1332                         PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
 1333                                    BGE_PCI_WRITE_BNDRY_512BYTES);
 1334                         break;
 1335                 case 64:
 1336                         PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
 1337                                    BGE_PCI_WRITE_BNDRY_1024BYTES);
 1338                         break;
 1339                 default:
 1340                 /* Disable PCI memory write and invalidate. */
 1341 #if 0
 1342                         if (bootverbose)
 1343                                 printf("%s: cache line size %d not "
 1344                                     "supported; disabling PCI MWI\n",
 1345                                     sc->bge_dev.dv_xname, cachesize);
 1346 #endif
 1347                         PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
 1348                             PCIM_CMD_MWIEN);
 1349                         break;
 1350                 }
 1351         }
 1352 
 1353         /*
 1354          * Disable memory write invalidate.  Apparently it is not supported
 1355          * properly by these devices.
 1356          */
 1357         PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
 1358 
 1359 
 1360 #ifdef __brokenalpha__
 1361         /*
 1362          * Must insure that we do not cross an 8K (bytes) boundary
 1363          * for DMA reads.  Our highest limit is 1K bytes.  This is a
 1364          * restriction on some ALPHA platforms with early revision
 1365          * 21174 PCI chipsets, such as the AlphaPC 164lx
 1366          */
 1367         PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
 1368 #endif
 1369 
 1370         /* Set the timer prescaler (always 66MHz) */
 1371         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
 1372 
 1373         return(0);
 1374 }
 1375 
 1376 int
 1377 bge_blockinit(sc)
 1378         struct bge_softc *sc;
 1379 {
 1380         volatile struct bge_rcb         *rcb;
 1381         bus_size_t              rcb_addr;
 1382         int                     i;
 1383         struct ifnet            *ifp = &sc->ethercom.ec_if;
 1384         bge_hostaddr            taddr;
 1385 
 1386         /*
 1387          * Initialize the memory window pointer register so that
 1388          * we can access the first 32K of internal NIC RAM. This will
 1389          * allow us to set up the TX send ring RCBs and the RX return
 1390          * ring RCBs, plus other things which live in NIC memory.
 1391          */
 1392 
 1393         pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
 1394             BGE_PCI_MEMWIN_BASEADDR, 0);
 1395 
 1396         /* Configure mbuf memory pool */
 1397         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1398                 if (sc->bge_extram) {
 1399                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1400                             BGE_EXT_SSRAM);
 1401                         if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
 1402                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1403                         else
 1404                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1405                 } else {
 1406                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1407                             BGE_BUFFPOOL_1);
 1408                         if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
 1409                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1410                         else
 1411                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1412                 }
 1413 
 1414                 /* Configure DMA resource pool */
 1415                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
 1416                     BGE_DMA_DESCRIPTORS);
 1417                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
 1418         }
 1419 
 1420         /* Configure mbuf pool watermarks */
 1421 #ifdef ORIG_WPAUL_VALUES
 1422         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
 1423         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
 1424         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
 1425 #else
 1426         /* new broadcom docs strongly recommend these: */
 1427         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1428                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
 1429                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
 1430         } else {
 1431                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
 1432                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
 1433         }
 1434         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
 1435 #endif
 1436 
 1437         /* Configure DMA resource watermarks */
 1438         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
 1439         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
 1440 
 1441         /* Enable buffer manager */
 1442         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1443                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
 1444                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
 1445 
 1446                 /* Poll for buffer manager start indication */
 1447                 for (i = 0; i < BGE_TIMEOUT; i++) {
 1448                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
 1449                                 break;
 1450                         DELAY(10);
 1451                 }
 1452 
 1453                 if (i == BGE_TIMEOUT) {
 1454                         printf("%s: buffer manager failed to start\n",
 1455                             sc->bge_dev.dv_xname);
 1456                         return(ENXIO);
 1457                 }
 1458         }
 1459 
 1460         /* Enable flow-through queues */
 1461         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 1462         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 1463 
 1464         /* Wait until queue initialization is complete */
 1465         for (i = 0; i < BGE_TIMEOUT; i++) {
 1466                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
 1467                         break;
 1468                 DELAY(10);
 1469         }
 1470 
 1471         if (i == BGE_TIMEOUT) {
 1472                 printf("%s: flow-through queue init failed\n",
 1473                     sc->bge_dev.dv_xname);
 1474                 return(ENXIO);
 1475         }
 1476 
 1477         /* Initialize the standard RX ring control block */
 1478         rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
 1479         bge_set_hostaddr(&rcb->bge_hostaddr,
 1480             BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
 1481         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1482                 rcb->bge_maxlen_flags =
 1483                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
 1484         } else {
 1485                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
 1486         }
 1487         if (sc->bge_extram)
 1488                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
 1489         else
 1490                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
 1491         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
 1492         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
 1493         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1494         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
 1495 
 1496         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1497                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
 1498         } else {
 1499                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
 1500         }
 1501 
 1502         /*
 1503          * Initialize the jumbo RX ring control block
 1504          * We set the 'ring disabled' bit in the flags
 1505          * field until we're actually ready to start
 1506          * using this ring (i.e. once we set the MTU
 1507          * high enough to require it).
 1508          */
 1509         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1510                 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
 1511                 bge_set_hostaddr(&rcb->bge_hostaddr,
 1512                     BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
 1513                 rcb->bge_maxlen_flags = 
 1514                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
 1515                         BGE_RCB_FLAG_RING_DISABLED);
 1516                 if (sc->bge_extram)
 1517                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
 1518                 else
 1519                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
 1520         
 1521                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
 1522                     rcb->bge_hostaddr.bge_addr_hi);
 1523                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
 1524                     rcb->bge_hostaddr.bge_addr_lo);
 1525                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
 1526                     rcb->bge_maxlen_flags);
 1527                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
 1528 
 1529                 /* Set up dummy disabled mini ring RCB */
 1530                 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
 1531                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
 1532                     BGE_RCB_FLAG_RING_DISABLED);
 1533                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
 1534                     rcb->bge_maxlen_flags);
 1535 
 1536                 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
 1537                     offsetof(struct bge_ring_data, bge_info),
 1538                     sizeof (struct bge_gib),
 1539                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1540         }
 1541 
 1542         /*
 1543          * Set the BD ring replentish thresholds. The recommended
 1544          * values are 1/8th the number of descriptors allocated to
 1545          * each ring.
 1546          */
 1547         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
 1548         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
 1549 
 1550         /*
 1551          * Disable all unused send rings by setting the 'ring disabled'
 1552          * bit in the flags field of all the TX send ring control blocks.
 1553          * These are located in NIC memory.
 1554          */
 1555         rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
 1556         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
 1557                 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
 1558                     BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
 1559                 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
 1560                 rcb_addr += sizeof(struct bge_rcb);
 1561         }
 1562 
 1563         /* Configure TX RCB 0 (we use only the first ring) */
 1564         rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
 1565         bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
 1566         RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
 1567         RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
 1568         RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
 1569                     BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
 1570         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1571                 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 
 1572                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
 1573         }
 1574 
 1575         /* Disable all unused RX return rings */
 1576         rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
 1577         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
 1578                 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
 1579                 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
 1580                 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 
 1581                             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
 1582                                      BGE_RCB_FLAG_RING_DISABLED));
 1583                 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
 1584                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
 1585                     (i * (sizeof(u_int64_t))), 0);
 1586                 rcb_addr += sizeof(struct bge_rcb);
 1587         }
 1588 
 1589         /* Initialize RX ring indexes */
 1590         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
 1591         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
 1592         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
 1593 
 1594         /*
 1595          * Set up RX return ring 0
 1596          * Note that the NIC address for RX return rings is 0x00000000.
 1597          * The return rings live entirely within the host, so the
 1598          * nicaddr field in the RCB isn't used.
 1599          */
 1600         rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
 1601         bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
 1602         RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
 1603         RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
 1604         RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
 1605         RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
 1606             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
 1607 
 1608         /* Set random backoff seed for TX */
 1609         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
 1610             LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
 1611             LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
 1612             LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
 1613             BGE_TX_BACKOFF_SEED_MASK);
 1614 
 1615         /* Set inter-packet gap */
 1616         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
 1617 
 1618         /*
 1619          * Specify which ring to use for packets that don't match
 1620          * any RX rules.
 1621          */
 1622         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
 1623 
 1624         /*
 1625          * Configure number of RX lists. One interrupt distribution
 1626          * list, sixteen active lists, one bad frames class.
 1627          */
 1628         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
 1629 
 1630         /* Inialize RX list placement stats mask. */
 1631         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
 1632         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
 1633 
 1634         /* Disable host coalescing until we get it set up */
 1635         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
 1636 
 1637         /* Poll to make sure it's shut down. */
 1638         for (i = 0; i < BGE_TIMEOUT; i++) {
 1639                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
 1640                         break;
 1641                 DELAY(10);
 1642         }
 1643 
 1644         if (i == BGE_TIMEOUT) {
 1645                 printf("%s: host coalescing engine failed to idle\n",
 1646                     sc->bge_dev.dv_xname);
 1647                 return(ENXIO);
 1648         }
 1649 
 1650         /* Set up host coalescing defaults */
 1651         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
 1652         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
 1653         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
 1654         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
 1655         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1656                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
 1657                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
 1658         }
 1659         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
 1660         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
 1661 
 1662         /* Set up address of statistics block */
 1663         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1664                 bge_set_hostaddr(&taddr,
 1665                     BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
 1666                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
 1667                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
 1668                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
 1669                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
 1670         }
 1671 
 1672         /* Set up address of status block */
 1673         bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
 1674         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
 1675         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
 1676         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
 1677         sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
 1678         sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
 1679 
 1680         /* Turn on host coalescing state machine */
 1681         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 1682 
 1683         /* Turn on RX BD completion state machine and enable attentions */
 1684         CSR_WRITE_4(sc, BGE_RBDC_MODE,
 1685             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
 1686 
 1687         /* Turn on RX list placement state machine */
 1688         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 1689 
 1690         /* Turn on RX list selector state machine. */
 1691         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1692                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 1693         }
 1694 
 1695         /* Turn on DMA, clear stats */
 1696         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
 1697             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
 1698             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
 1699             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
 1700             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
 1701 
 1702         /* Set misc. local control, enable interrupts on attentions */
 1703         sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
 1704 
 1705 #ifdef notdef
 1706         /* Assert GPIO pins for PHY reset */
 1707         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
 1708             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
 1709         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
 1710             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
 1711 #endif
 1712 
 1713 #if defined(not_quite_yet)
 1714         /* Linux driver enables enable gpio pin #1 on 5700s */
 1715         if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
 1716                 sc->bge_local_ctrl_reg |= 
 1717                   (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
 1718         }
 1719 #endif  
 1720         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
 1721 
 1722         /* Turn on DMA completion state machine */
 1723         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1724                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 1725         }
 1726 
 1727         /* Turn on write DMA state machine */
 1728         CSR_WRITE_4(sc, BGE_WDMA_MODE,
 1729             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
 1730 
 1731         /* Turn on read DMA state machine */
 1732         CSR_WRITE_4(sc, BGE_RDMA_MODE,
 1733             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
 1734 
 1735         /* Turn on RX data completion state machine */
 1736         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 1737 
 1738         /* Turn on RX BD initiator state machine */
 1739         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 1740 
 1741         /* Turn on RX data and RX BD initiator state machine */
 1742         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
 1743 
 1744         /* Turn on Mbuf cluster free state machine */
 1745         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 1746                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 1747         }
 1748 
 1749         /* Turn on send BD completion state machine */
 1750         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 1751 
 1752         /* Turn on send data completion state machine */
 1753         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 1754 
 1755         /* Turn on send data initiator state machine */
 1756         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 1757 
 1758         /* Turn on send BD initiator state machine */
 1759         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 1760 
 1761         /* Turn on send BD selector state machine */
 1762         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 1763 
 1764         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
 1765         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
 1766             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
 1767 
 1768         /* ack/clear link change events */
 1769         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 1770             BGE_MACSTAT_CFG_CHANGED);
 1771         CSR_WRITE_4(sc, BGE_MI_STS, 0);
 1772 
 1773         /* Enable PHY auto polling (for MII/GMII only) */
 1774         if (sc->bge_tbi) {
 1775                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
 1776         } else {
 1777                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
 1778                 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
 1779                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 1780                             BGE_EVTENB_MI_INTERRUPT);
 1781         }
 1782 
 1783         /* Enable link state change attentions. */
 1784         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
 1785 
 1786         return(0);
 1787 }
 1788 
 1789 static const struct bge_revision {
 1790         uint32_t                br_chipid;
 1791         uint32_t                br_quirks;
 1792         const char              *br_name;
 1793 } bge_revisions[] = {
 1794         { BGE_CHIPID_BCM5700_A0,
 1795           BGE_QUIRK_LINK_STATE_BROKEN,
 1796           "BCM5700 A0" },
 1797 
 1798         { BGE_CHIPID_BCM5700_A1,
 1799           BGE_QUIRK_LINK_STATE_BROKEN,
 1800           "BCM5700 A1" },
 1801 
 1802         { BGE_CHIPID_BCM5700_B0,
 1803           BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
 1804           "BCM5700 B0" },
 1805 
 1806         { BGE_CHIPID_BCM5700_B1,
 1807           BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
 1808           "BCM5700 B1" },
 1809 
 1810         { BGE_CHIPID_BCM5700_B2,
 1811           BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
 1812           "BCM5700 B2" },
 1813 
 1814         /* This is treated like a BCM5700 Bx */
 1815         { BGE_CHIPID_BCM5700_ALTIMA,
 1816           BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
 1817           "BCM5700 Altima" },
 1818 
 1819         { BGE_CHIPID_BCM5700_C0,
 1820           0,
 1821           "BCM5700 C0" },
 1822 
 1823         { BGE_CHIPID_BCM5701_A0,
 1824           0, /*XXX really, just not known */
 1825           "BCM5701 A0" },
 1826 
 1827         { BGE_CHIPID_BCM5701_B0,
 1828           BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
 1829           "BCM5701 B0" },
 1830 
 1831         { BGE_CHIPID_BCM5701_B2,
 1832           BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
 1833           "BCM5701 B2" },
 1834 
 1835         { BGE_CHIPID_BCM5701_B5,
 1836           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
 1837           "BCM5701 B5" },
 1838 
 1839         { BGE_CHIPID_BCM5703_A0,
 1840           0,
 1841           "BCM5703 A0" },
 1842 
 1843         { BGE_CHIPID_BCM5703_A1,
 1844           0,
 1845           "BCM5703 A1" },
 1846 
 1847         { BGE_CHIPID_BCM5703_A2,
 1848           BGE_QUIRK_ONLY_PHY_1,
 1849           "BCM5703 A2" },
 1850 
 1851         { BGE_CHIPID_BCM5703_A3,
 1852           BGE_QUIRK_ONLY_PHY_1,
 1853           "BCM5703 A3" },
 1854 
 1855         { BGE_CHIPID_BCM5704_A0,
 1856           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
 1857           "BCM5704 A0" },
 1858 
 1859         { BGE_CHIPID_BCM5704_A1,
 1860           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
 1861           "BCM5704 A1" },
 1862 
 1863         { BGE_CHIPID_BCM5704_A2,
 1864           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
 1865           "BCM5704 A2" },
 1866 
 1867         { BGE_CHIPID_BCM5704_A3,
 1868           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
 1869           "BCM5704 A3" },
 1870 
 1871         { BGE_CHIPID_BCM5705_A0,
 1872           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
 1873           "BCM5705 A0" },
 1874 
 1875         { BGE_CHIPID_BCM5705_A1,
 1876           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
 1877           "BCM5705 A1" },
 1878 
 1879         { BGE_CHIPID_BCM5705_A2,
 1880           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
 1881           "BCM5705 A2" },
 1882 
 1883         { BGE_CHIPID_BCM5705_A3,
 1884           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
 1885           "BCM5705 A3" },
 1886 
 1887         { 0, 0, NULL }
 1888 };
 1889 
 1890 /*
 1891  * Some defaults for major revisions, so that newer steppings
 1892  * that we don't know about have a shot at working.
 1893  */
 1894 static const struct bge_revision bge_majorrevs[] = {
 1895         { BGE_ASICREV_BCM5700,
 1896           BGE_QUIRK_LINK_STATE_BROKEN,
 1897           "unknown BCM5700" },
 1898 
 1899         { BGE_ASICREV_BCM5701,
 1900           BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
 1901           "unknown BCM5701" },
 1902 
 1903         { BGE_ASICREV_BCM5703,
 1904           0,
 1905           "unknown BCM5703" },
 1906 
 1907         { BGE_ASICREV_BCM5704,
 1908           BGE_QUIRK_ONLY_PHY_1,
 1909           "unknown BCM5704" },
 1910 
 1911         { BGE_ASICREV_BCM5705,
 1912           BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
 1913           "unknown BCM5705" },
 1914 
 1915         { 0,
 1916           0,
 1917           NULL }
 1918 };
 1919 
 1920 
 1921 static const struct bge_revision *
 1922 bge_lookup_rev(uint32_t chipid)
 1923 {
 1924         const struct bge_revision *br;
 1925 
 1926         for (br = bge_revisions; br->br_name != NULL; br++) {
 1927                 if (br->br_chipid == chipid)
 1928                         return (br);
 1929         }
 1930 
 1931         for (br = bge_majorrevs; br->br_name != NULL; br++) {
 1932                 if (br->br_chipid == BGE_ASICREV(chipid))
 1933                         return (br);
 1934         }
 1935 
 1936         return (NULL);
 1937 }
 1938 
 1939 static const struct bge_product {
 1940         pci_vendor_id_t         bp_vendor;
 1941         pci_product_id_t        bp_product;
 1942         const char              *bp_name;
 1943 } bge_products[] = {
 1944         /*
 1945          * The BCM5700 documentation seems to indicate that the hardware
 1946          * still has the Alteon vendor ID burned into it, though it
 1947          * should always be overridden by the value in the EEPROM.  We'll
 1948          * check for it anyway.
 1949          */
 1950         { PCI_VENDOR_ALTEON,
 1951           PCI_PRODUCT_ALTEON_BCM5700,
 1952           "Broadcom BCM5700 Gigabit Ethernet",
 1953           },
 1954         { PCI_VENDOR_ALTEON,
 1955           PCI_PRODUCT_ALTEON_BCM5701,
 1956           "Broadcom BCM5701 Gigabit Ethernet",
 1957           },
 1958 
 1959         { PCI_VENDOR_ALTIMA,
 1960           PCI_PRODUCT_ALTIMA_AC1000,
 1961           "Altima AC1000 Gigabit Ethernet",
 1962           },
 1963         { PCI_VENDOR_ALTIMA,
 1964           PCI_PRODUCT_ALTIMA_AC1001,
 1965           "Altima AC1001 Gigabit Ethernet",
 1966            },
 1967         { PCI_VENDOR_ALTIMA,
 1968           PCI_PRODUCT_ALTIMA_AC9100,
 1969           "Altima AC9100 Gigabit Ethernet",
 1970           },
 1971 
 1972         { PCI_VENDOR_BROADCOM,
 1973           PCI_PRODUCT_BROADCOM_BCM5700,
 1974           "Broadcom BCM5700 Gigabit Ethernet",
 1975           },
 1976         { PCI_VENDOR_BROADCOM,
 1977           PCI_PRODUCT_BROADCOM_BCM5701,
 1978           "Broadcom BCM5701 Gigabit Ethernet",
 1979           },
 1980         { PCI_VENDOR_BROADCOM,
 1981           PCI_PRODUCT_BROADCOM_BCM5702,
 1982           "Broadcom BCM5702 Gigabit Ethernet",
 1983           },
 1984         { PCI_VENDOR_BROADCOM,
 1985           PCI_PRODUCT_BROADCOM_BCM5702X,
 1986           "Broadcom BCM5702X Gigabit Ethernet" },
 1987 
 1988         { PCI_VENDOR_BROADCOM,
 1989           PCI_PRODUCT_BROADCOM_BCM5703,
 1990           "Broadcom BCM5703 Gigabit Ethernet",
 1991           },
 1992         { PCI_VENDOR_BROADCOM,
 1993           PCI_PRODUCT_BROADCOM_BCM5703X,
 1994           "Broadcom BCM5703X Gigabit Ethernet",
 1995           },
 1996         { PCI_VENDOR_BROADCOM,
 1997           PCI_PRODUCT_BROADCOM_BCM5703A3,
 1998           "Broadcom BCM5703A3 Gigabit Ethernet",
 1999           },
 2000 
 2001         { PCI_VENDOR_BROADCOM,
 2002           PCI_PRODUCT_BROADCOM_BCM5704C,
 2003           "Broadcom BCM5704C Dual Gigabit Ethernet",
 2004           },
 2005         { PCI_VENDOR_BROADCOM,
 2006           PCI_PRODUCT_BROADCOM_BCM5704S,
 2007           "Broadcom BCM5704S Dual Gigabit Ethernet",
 2008           },
 2009 
 2010         { PCI_VENDOR_BROADCOM,
 2011           PCI_PRODUCT_BROADCOM_BCM5705,
 2012           "Broadcom BCM5705 Gigabit Ethernet",
 2013           },
 2014         { PCI_VENDOR_BROADCOM,
 2015           PCI_PRODUCT_BROADCOM_BCM5705_ALT,
 2016           "Broadcom BCM5705 Gigabit Ethernet",
 2017           },
 2018         { PCI_VENDOR_BROADCOM,
 2019           PCI_PRODUCT_BROADCOM_BCM5705M,
 2020           "Broadcom BCM5705M Gigabit Ethernet",
 2021           },
 2022 
 2023         { PCI_VENDOR_BROADCOM,
 2024           PCI_PRODUCT_BROADCOM_BCM5782,
 2025           "Broadcom BCM5782 Gigabit Ethernet",
 2026           },
 2027         { PCI_VENDOR_BROADCOM,
 2028           PCI_PRODUCT_BROADCOM_BCM5788,
 2029           "Broadcom BCM5788 Gigabit Ethernet",
 2030           },
 2031 
 2032         { PCI_VENDOR_BROADCOM,
 2033           PCI_PRODUCT_BROADCOM_BCM5901,
 2034           "Broadcom BCM5901 Fast Ethernet",
 2035           },
 2036         { PCI_VENDOR_BROADCOM,
 2037           PCI_PRODUCT_BROADCOM_BCM5901A2,
 2038           "Broadcom BCM5901A2 Fast Ethernet",
 2039           },
 2040 
 2041         { PCI_VENDOR_SCHNEIDERKOCH,
 2042           PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
 2043           "SysKonnect SK-9Dx1 Gigabit Ethernet",
 2044           },
 2045 
 2046         { PCI_VENDOR_3COM,
 2047           PCI_PRODUCT_3COM_3C996,
 2048           "3Com 3c996 Gigabit Ethernet",
 2049           },
 2050 
 2051         { 0,
 2052           0,
 2053           NULL },
 2054 };
 2055 
 2056 static const struct bge_product *
 2057 bge_lookup(const struct pci_attach_args *pa)
 2058 {
 2059         const struct bge_product *bp;
 2060 
 2061         for (bp = bge_products; bp->bp_name != NULL; bp++) {
 2062                 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
 2063                     PCI_PRODUCT(pa->pa_id) == bp->bp_product)
 2064                         return (bp);
 2065         }
 2066 
 2067         return (NULL);
 2068 }
 2069 
 2070 int
 2071 bge_setpowerstate(sc, powerlevel)
 2072         struct bge_softc *sc;
 2073         int powerlevel;
 2074 {
 2075 #ifdef NOTYET
 2076         u_int32_t pm_ctl = 0;
 2077 
 2078         /* XXX FIXME: make sure indirect accesses enabled? */
 2079         pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
 2080         pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
 2081         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
 2082 
 2083         /* clear the PME_assert bit and power state bits, enable PME */
 2084         pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
 2085         pm_ctl &= ~PCIM_PSTAT_DMASK;
 2086         pm_ctl |= (1 << 8);
 2087 
 2088         if (powerlevel == 0) {
 2089                 pm_ctl |= PCIM_PSTAT_D0;
 2090                 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
 2091                     pm_ctl, 2);
 2092                 DELAY(10000);
 2093                 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
 2094                 DELAY(10000);
 2095 
 2096 #ifdef NOTYET
 2097                 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
 2098                 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
 2099 #endif
 2100                 DELAY(40); DELAY(40); DELAY(40);
 2101                 DELAY(10000);   /* above not quite adequate on 5700 */
 2102                 return 0;
 2103         }
 2104 
 2105 
 2106         /*
 2107          * Entering ACPI power states D1-D3 is achieved by wiggling
 2108          * GMII gpio pins. Example code assumes all hardware vendors
 2109          * followed Broadom's sample pcb layout. Until we verify that
 2110          * for all supported OEM cards, states D1-D3 are  unsupported.
 2111          */
 2112         printf("%s: power state %d unimplemented; check GPIO pins\n",
 2113                sc->bge_dev.dv_xname, powerlevel);
 2114 #endif
 2115         return EOPNOTSUPP;
 2116 }
 2117 
 2118 
 2119 /*
 2120  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
 2121  * against our list and return its name if we find a match. Note
 2122  * that since the Broadcom controller contains VPD support, we
 2123  * can get the device name string from the controller itself instead
 2124  * of the compiled-in string. This is a little slow, but it guarantees
 2125  * we'll always announce the right product name.
 2126  */
 2127 int
 2128 bge_probe(parent, match, aux)
 2129         struct device *parent;
 2130         struct cfdata *match;
 2131         void *aux;
 2132 {
 2133         struct pci_attach_args *pa = (struct pci_attach_args *)aux;
 2134 
 2135         if (bge_lookup(pa) != NULL)
 2136                 return (1);
 2137 
 2138         return (0);
 2139 }
 2140 
 2141 void
 2142 bge_attach(parent, self, aux)
 2143         struct device *parent, *self;
 2144         void *aux;
 2145 {
 2146         struct bge_softc        *sc = (struct bge_softc *)self;
 2147         struct pci_attach_args  *pa = aux;
 2148         const struct bge_product *bp;
 2149         const struct bge_revision *br;
 2150         pci_chipset_tag_t       pc = pa->pa_pc;
 2151         pci_intr_handle_t       ih;
 2152         const char              *intrstr = NULL;
 2153         bus_dma_segment_t       seg;
 2154         int                     rseg;
 2155         u_int32_t               hwcfg = 0;
 2156         u_int32_t               mac_addr = 0;
 2157         u_int32_t               command;
 2158         struct ifnet            *ifp;
 2159         caddr_t                 kva;
 2160         u_char                  eaddr[ETHER_ADDR_LEN];
 2161         pcireg_t                memtype;
 2162         bus_addr_t              memaddr;
 2163         bus_size_t              memsize;
 2164         u_int32_t               pm_ctl;
 2165         
 2166         bp = bge_lookup(pa);
 2167         KASSERT(bp != NULL);
 2168 
 2169         sc->bge_pa = *pa;
 2170 
 2171         aprint_naive(": Ethernet controller\n");
 2172         aprint_normal(": %s\n", bp->bp_name);
 2173 
 2174         /*
 2175          * Map control/status registers.
 2176          */
 2177         DPRINTFN(5, ("Map control/status regs\n"));
 2178         command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
 2179         command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
 2180         pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
 2181         command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
 2182 
 2183         if (!(command & PCI_COMMAND_MEM_ENABLE)) {
 2184                 aprint_error("%s: failed to enable memory mapping!\n",
 2185                     sc->bge_dev.dv_xname);
 2186                 return;
 2187         }
 2188 
 2189         DPRINTFN(5, ("pci_mem_find\n"));
 2190         memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
 2191         switch (memtype) {
 2192         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
 2193         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
 2194                 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
 2195                     memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
 2196                     &memaddr, &memsize) == 0)
 2197                         break;
 2198         default:
 2199                 aprint_error("%s: can't find mem space\n",
 2200                     sc->bge_dev.dv_xname);
 2201                 return;
 2202         }
 2203 
 2204         DPRINTFN(5, ("pci_intr_map\n"));
 2205         if (pci_intr_map(pa, &ih)) {
 2206                 aprint_error("%s: couldn't map interrupt\n",
 2207                     sc->bge_dev.dv_xname);
 2208                 return;
 2209         }
 2210 
 2211         DPRINTFN(5, ("pci_intr_string\n"));
 2212         intrstr = pci_intr_string(pc, ih);
 2213 
 2214         DPRINTFN(5, ("pci_intr_establish\n"));
 2215         sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
 2216 
 2217         if (sc->bge_intrhand == NULL) {
 2218                 aprint_error("%s: couldn't establish interrupt",
 2219                     sc->bge_dev.dv_xname);
 2220                 if (intrstr != NULL)
 2221                         aprint_normal(" at %s", intrstr);
 2222                 aprint_normal("\n");
 2223                 return;
 2224         }
 2225         aprint_normal("%s: interrupting at %s\n",
 2226             sc->bge_dev.dv_xname, intrstr);
 2227 
 2228         /*
 2229          * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
 2230          * can clobber the chip's PCI config-space power control registers,
 2231          * leaving the card in D3 powersave state.
 2232          * We do not have memory-mapped registers in this state,
 2233          * so force device into D0 state before starting initialization.
 2234          */
 2235         pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
 2236         pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
 2237         pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
 2238         pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
 2239         DELAY(1000);    /* 27 usec is allegedly sufficent */
 2240 
 2241         /* Try to reset the chip. */
 2242         DPRINTFN(5, ("bge_reset\n"));
 2243         bge_reset(sc);
 2244 
 2245         if (bge_chipinit(sc)) {
 2246                 aprint_error("%s: chip initialization failed\n",
 2247                     sc->bge_dev.dv_xname);
 2248                 bge_release_resources(sc);
 2249                 return;
 2250         }
 2251 
 2252         /*
 2253          * Get station address from the EEPROM.
 2254          */
 2255         mac_addr = bge_readmem_ind(sc, 0x0c14);
 2256         if ((mac_addr >> 16) == 0x484b) {
 2257                 eaddr[0] = (u_char)(mac_addr >> 8);
 2258                 eaddr[1] = (u_char)(mac_addr >> 0);
 2259                 mac_addr = bge_readmem_ind(sc, 0x0c18);
 2260                 eaddr[2] = (u_char)(mac_addr >> 24);
 2261                 eaddr[3] = (u_char)(mac_addr >> 16);
 2262                 eaddr[4] = (u_char)(mac_addr >> 8);
 2263                 eaddr[5] = (u_char)(mac_addr >> 0);
 2264         } else if (bge_read_eeprom(sc, (caddr_t)eaddr,
 2265             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
 2266                 aprint_error("%s: failed to read station address\n",
 2267                     sc->bge_dev.dv_xname);
 2268                 bge_release_resources(sc);
 2269                 return;
 2270         }
 2271 
 2272         /*
 2273          * Save ASIC rev.  Look up any quirks associated with this
 2274          * ASIC.
 2275          */
 2276         sc->bge_chipid =
 2277             pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
 2278             BGE_PCIMISCCTL_ASICREV;
 2279         br = bge_lookup_rev(sc->bge_chipid);
 2280 
 2281         aprint_normal("%s: ", sc->bge_dev.dv_xname);
 2282 
 2283         if (br == NULL) {
 2284                 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
 2285                 sc->bge_quirks = 0;
 2286         } else {
 2287                 aprint_normal("ASIC %s (0x%04x)",
 2288                     br->br_name, sc->bge_chipid >> 16);
 2289                 sc->bge_quirks |= br->br_quirks;
 2290         }
 2291         aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
 2292 
 2293         /* Allocate the general information block and ring buffers. */
 2294         if (pci_dma64_available(pa))
 2295                 sc->bge_dmatag = pa->pa_dmat64;
 2296         else
 2297                 sc->bge_dmatag = pa->pa_dmat;
 2298         DPRINTFN(5, ("bus_dmamem_alloc\n"));
 2299         if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
 2300                              PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
 2301                 aprint_error("%s: can't alloc rx buffers\n",
 2302                     sc->bge_dev.dv_xname);
 2303                 return;
 2304         }
 2305         DPRINTFN(5, ("bus_dmamem_map\n"));
 2306         if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
 2307                            sizeof(struct bge_ring_data), &kva,
 2308                            BUS_DMA_NOWAIT)) {
 2309                 aprint_error("%s: can't map DMA buffers (%d bytes)\n",
 2310                     sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
 2311                 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
 2312                 return;
 2313         }
 2314         DPRINTFN(5, ("bus_dmamem_create\n"));
 2315         if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
 2316             sizeof(struct bge_ring_data), 0,
 2317             BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
 2318                 aprint_error("%s: can't create DMA map\n",
 2319                     sc->bge_dev.dv_xname);
 2320                 bus_dmamem_unmap(sc->bge_dmatag, kva,
 2321                                  sizeof(struct bge_ring_data));
 2322                 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
 2323                 return;
 2324         }
 2325         DPRINTFN(5, ("bus_dmamem_load\n"));
 2326         if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
 2327                             sizeof(struct bge_ring_data), NULL,
 2328                             BUS_DMA_NOWAIT)) {
 2329                 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
 2330                 bus_dmamem_unmap(sc->bge_dmatag, kva,
 2331                                  sizeof(struct bge_ring_data));
 2332                 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
 2333                 return;
 2334         }
 2335 
 2336         DPRINTFN(5, ("bzero\n"));
 2337         sc->bge_rdata = (struct bge_ring_data *)kva;
 2338 
 2339         memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
 2340 
 2341         /* Try to allocate memory for jumbo buffers. */
 2342         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 2343                 if (bge_alloc_jumbo_mem(sc)) {
 2344                         aprint_error("%s: jumbo buffer allocation failed\n",
 2345                             sc->bge_dev.dv_xname);
 2346                 } else
 2347                         sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
 2348         }
 2349 
 2350         /* Set default tuneable values. */
 2351         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
 2352         sc->bge_rx_coal_ticks = 150;
 2353         sc->bge_rx_max_coal_bds = 64;
 2354 #ifdef ORIG_WPAUL_VALUES
 2355         sc->bge_tx_coal_ticks = 150;
 2356         sc->bge_tx_max_coal_bds = 128;
 2357 #else
 2358         sc->bge_tx_coal_ticks = 300;
 2359         sc->bge_tx_max_coal_bds = 400;
 2360 #endif
 2361 
 2362         /* Set up ifnet structure */
 2363         ifp = &sc->ethercom.ec_if;
 2364         ifp->if_softc = sc;
 2365         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2366         ifp->if_ioctl = bge_ioctl;
 2367         ifp->if_start = bge_start;
 2368         ifp->if_init = bge_init;
 2369         ifp->if_watchdog = bge_watchdog;
 2370         IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
 2371         IFQ_SET_READY(&ifp->if_snd);
 2372         DPRINTFN(5, ("bcopy\n"));
 2373         strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
 2374 
 2375         if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
 2376                 sc->ethercom.ec_if.if_capabilities |=
 2377                     IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
 2378         sc->ethercom.ec_capabilities |= 
 2379             ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
 2380 
 2381         /*
 2382          * Do MII setup.
 2383          */
 2384         DPRINTFN(5, ("mii setup\n"));
 2385         sc->bge_mii.mii_ifp = ifp;
 2386         sc->bge_mii.mii_readreg = bge_miibus_readreg;
 2387         sc->bge_mii.mii_writereg = bge_miibus_writereg;
 2388         sc->bge_mii.mii_statchg = bge_miibus_statchg;
 2389 
 2390         /*
 2391          * Figure out what sort of media we have by checking the
 2392          * hardware config word in the first 32k of NIC internal memory,
 2393          * or fall back to the config word in the EEPROM. Note: on some BCM5700
 2394          * cards, this value appears to be unset. If that's the
 2395          * case, we have to rely on identifying the NIC by its PCI
 2396          * subsystem ID, as we do below for the SysKonnect SK-9D41.
 2397          */
 2398         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
 2399                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
 2400         } else {
 2401                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
 2402                     BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
 2403                 hwcfg = be32toh(hwcfg);
 2404         }
 2405         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
 2406                 sc->bge_tbi = 1;
 2407 
 2408         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
 2409         if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
 2410             SK_SUBSYSID_9D41)
 2411                 sc->bge_tbi = 1;
 2412 
 2413         if (sc->bge_tbi) {
 2414                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
 2415                     bge_ifmedia_sts);
 2416                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
 2417                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
 2418                             0, NULL);
 2419                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
 2420                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
 2421         } else {
 2422                 /*
 2423                  * Do transceiver setup.
 2424                  */
 2425                 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
 2426                              bge_ifmedia_sts);
 2427                 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
 2428                            MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
 2429                 
 2430                 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
 2431                         printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
 2432                         ifmedia_add(&sc->bge_mii.mii_media,
 2433                                     IFM_ETHER|IFM_MANUAL, 0, NULL);
 2434                         ifmedia_set(&sc->bge_mii.mii_media,
 2435                                     IFM_ETHER|IFM_MANUAL);
 2436                 } else
 2437                         ifmedia_set(&sc->bge_mii.mii_media,
 2438                                     IFM_ETHER|IFM_AUTO);
 2439         }
 2440 
 2441         /*
 2442          * When using the BCM5701 in PCI-X mode, data corruption has
 2443          * been observed in the first few bytes of some received packets.
 2444          * Aligning the packet buffer in memory eliminates the corruption.
 2445          * Unfortunately, this misaligns the packet payloads.  On platforms
 2446          * which do not support unaligned accesses, we will realign the
 2447          * payloads by copying the received packets.
 2448          */
 2449         if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) {
 2450                 /* If in PCI-X mode, work around the alignment bug. */
 2451                 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
 2452                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
 2453                          BGE_PCISTATE_PCI_BUSSPEED)
 2454                 sc->bge_rx_alignment_bug = 1;
 2455         }
 2456 
 2457         /*
 2458          * Call MI attach routine.
 2459          */
 2460         DPRINTFN(5, ("if_attach\n"));
 2461         if_attach(ifp);
 2462         DPRINTFN(5, ("ether_ifattach\n"));
 2463         ether_ifattach(ifp, eaddr);
 2464         DPRINTFN(5, ("callout_init\n"));
 2465         callout_init(&sc->bge_timeout);
 2466 }
 2467 
 2468 void
 2469 bge_release_resources(sc)
 2470         struct bge_softc *sc;
 2471 {
 2472         if (sc->bge_vpd_prodname != NULL)
 2473                 free(sc->bge_vpd_prodname, M_DEVBUF);
 2474 
 2475         if (sc->bge_vpd_readonly != NULL)
 2476                 free(sc->bge_vpd_readonly, M_DEVBUF);
 2477 }
 2478 
 2479 void
 2480 bge_reset(sc)
 2481         struct bge_softc *sc;
 2482 {
 2483         struct pci_attach_args *pa = &sc->bge_pa;
 2484         u_int32_t cachesize, command, pcistate, new_pcistate;
 2485         int i, val = 0;
 2486 
 2487         /* Save some important PCI state. */
 2488         cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
 2489         command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
 2490         pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
 2491 
 2492         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
 2493             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2494             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
 2495 
 2496         /* Issue global reset */
 2497         bge_writereg_ind(sc, BGE_MISC_CFG,
 2498             BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
 2499 
 2500         DELAY(1000);
 2501 
 2502         /* Reset some of the PCI state that got zapped by reset */
 2503         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
 2504             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2505             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
 2506         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
 2507         pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
 2508         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
 2509 
 2510         /* Enable memory arbiter. */
 2511         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 2512                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 2513         }
 2514 
 2515         /*
 2516          * Prevent PXE restart: write a magic number to the
 2517          * general communications memory at 0xB50.
 2518          */
 2519         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
 2520 
 2521         /*
 2522          * Poll the value location we just wrote until
 2523          * we see the 1's complement of the magic number.
 2524          * This indicates that the firmware initialization
 2525          * is complete.
 2526          */
 2527         for (i = 0; i < 750; i++) {
 2528                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
 2529                 if (val == ~BGE_MAGIC_NUMBER)
 2530                         break;
 2531                 DELAY(1000);
 2532         }
 2533 
 2534         if (i == 750) {
 2535                 printf("%s: firmware handshake timed out, val = %x\n",
 2536                     sc->bge_dev.dv_xname, val);
 2537                 return;
 2538         }
 2539 
 2540         /*
 2541          * XXX Wait for the value of the PCISTATE register to
 2542          * return to its original pre-reset state. This is a
 2543          * fairly good indicator of reset completion. If we don't
 2544          * wait for the reset to fully complete, trying to read
 2545          * from the device's non-PCI registers may yield garbage
 2546          * results.
 2547          */
 2548         for (i = 0; i < BGE_TIMEOUT; i++) {
 2549                 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
 2550                     BGE_PCI_PCISTATE);
 2551                 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 
 2552                     (pcistate & ~BGE_PCISTATE_RESERVED))
 2553                         break;
 2554                 DELAY(10);
 2555         }
 2556         if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 
 2557             (pcistate & ~BGE_PCISTATE_RESERVED)) {
 2558                 printf("%s: pcistate failed to revert\n",
 2559                     sc->bge_dev.dv_xname);
 2560         }
 2561 
 2562         /* Enable memory arbiter. */
 2563         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 2564                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 2565         }
 2566 
 2567         /* Fix up byte swapping */
 2568         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
 2569 
 2570         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 2571 
 2572         DELAY(10000);
 2573 }
 2574 
 2575 /*
 2576  * Frame reception handling. This is called if there's a frame
 2577  * on the receive return list.
 2578  *
 2579  * Note: we have to be able to handle two possibilities here:
 2580  * 1) the frame is from the jumbo recieve ring
 2581  * 2) the frame is from the standard receive ring
 2582  */
 2583 
 2584 void
 2585 bge_rxeof(sc)
 2586         struct bge_softc *sc;
 2587 {
 2588         struct ifnet *ifp;
 2589         int stdcnt = 0, jumbocnt = 0;
 2590         int have_tag = 0;
 2591         u_int16_t vlan_tag = 0;
 2592         bus_dmamap_t dmamap;
 2593         bus_addr_t offset, toff;
 2594         bus_size_t tlen;
 2595         int tosync;
 2596 
 2597         ifp = &sc->ethercom.ec_if;
 2598 
 2599         bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
 2600             offsetof(struct bge_ring_data, bge_status_block),
 2601             sizeof (struct bge_status_block),
 2602             BUS_DMASYNC_POSTREAD);
 2603 
 2604         offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
 2605         tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 
 2606             sc->bge_rx_saved_considx;
 2607 
 2608         toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
 2609 
 2610         if (tosync < 0) {
 2611                 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
 2612                     sizeof (struct bge_rx_bd);
 2613                 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
 2614                     toff, tlen, BUS_DMASYNC_POSTREAD);
 2615                 tosync = -tosync;
 2616         }
 2617 
 2618         bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
 2619             offset, tosync * sizeof (struct bge_rx_bd),
 2620             BUS_DMASYNC_POSTREAD);
 2621 
 2622         while(sc->bge_rx_saved_considx !=
 2623             sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
 2624                 struct bge_rx_bd        *cur_rx;
 2625                 u_int32_t               rxidx;
 2626                 struct mbuf             *m = NULL;
 2627 
 2628                 cur_rx = &sc->bge_rdata->
 2629                         bge_rx_return_ring[sc->bge_rx_saved_considx];
 2630 
 2631                 rxidx = cur_rx->bge_idx;
 2632                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
 2633 
 2634                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
 2635                         have_tag = 1;
 2636                         vlan_tag = cur_rx->bge_vlan_tag;
 2637                 }
 2638 
 2639                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
 2640                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
 2641                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
 2642                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
 2643                         jumbocnt++;
 2644                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2645                                 ifp->if_ierrors++;
 2646                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2647                                 continue;
 2648                         }
 2649                         if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
 2650                                              NULL)== ENOBUFS) {
 2651                                 ifp->if_ierrors++;
 2652                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2653                                 continue;
 2654                         }
 2655                 } else {
 2656                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
 2657                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
 2658                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
 2659                         stdcnt++;
 2660                         dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
 2661                         sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
 2662                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2663                                 ifp->if_ierrors++;
 2664                                 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
 2665                                 continue;
 2666                         }
 2667                         if (bge_newbuf_std(sc, sc->bge_std,
 2668                             NULL, dmamap) == ENOBUFS) {
 2669                                 ifp->if_ierrors++;
 2670                                 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
 2671                                 continue;
 2672                         }
 2673                 }
 2674 
 2675                 ifp->if_ipackets++;
 2676 #ifndef __NO_STRICT_ALIGNMENT
 2677                 /*
 2678                  * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
 2679                  * the Rx buffer has the layer-2 header unaligned.
 2680                  * If our CPU requires alignment, re-align by copying.
 2681                  */
 2682                 if (sc->bge_rx_alignment_bug) {
 2683                         memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data,
 2684                                 cur_rx->bge_len);
 2685                         m->m_data += ETHER_ALIGN;
 2686                 }
 2687 #endif
 2688                 
 2689                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
 2690                 m->m_pkthdr.rcvif = ifp;
 2691 
 2692 #if NBPFILTER > 0
 2693                 /*
 2694                  * Handle BPF listeners. Let the BPF user see the packet.
 2695                  */
 2696                 if (ifp->if_bpf)
 2697                         bpf_mtap(ifp->if_bpf, m);
 2698 #endif
 2699 
 2700                 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
 2701 
 2702                 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
 2703                         m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
 2704                 /*
 2705                  * Rx transport checksum-offload may also
 2706                  * have bugs with packets which, when transmitted,
 2707                  * were `runts' requiring padding.
 2708                  */
 2709                 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
 2710                     (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
 2711                      m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
 2712                         m->m_pkthdr.csum_data =
 2713                             cur_rx->bge_tcp_udp_csum;
 2714                         m->m_pkthdr.csum_flags |=
 2715                             (M_CSUM_TCPv4|M_CSUM_UDPv4|
 2716                              M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
 2717                 }
 2718 
 2719                 /*
 2720                  * If we received a packet with a vlan tag, pass it
 2721                  * to vlan_input() instead of ether_input().
 2722                  */
 2723                 if (have_tag) {
 2724                         struct m_tag *mtag;
 2725 
 2726                         mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
 2727                             M_NOWAIT);
 2728                         if (mtag != NULL) {
 2729                                 *(u_int *)(mtag + 1) = vlan_tag;
 2730                                 m_tag_prepend(m, mtag);
 2731                                 have_tag = vlan_tag = 0;
 2732                         } else {
 2733                                 printf("%s: no mbuf for tag\n", ifp->if_xname);
 2734                                 m_freem(m);
 2735                                 have_tag = vlan_tag = 0;
 2736                                 continue;
 2737                         }
 2738                 }
 2739                 (*ifp->if_input)(ifp, m);
 2740         }
 2741 
 2742         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
 2743         if (stdcnt)
 2744                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 2745         if (jumbocnt)
 2746                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 2747 }
 2748 
 2749 void
 2750 bge_txeof(sc)
 2751         struct bge_softc *sc;
 2752 {
 2753         struct bge_tx_bd *cur_tx = NULL;
 2754         struct ifnet *ifp;
 2755         struct txdmamap_pool_entry *dma;
 2756         bus_addr_t offset, toff;
 2757         bus_size_t tlen;
 2758         int tosync;
 2759         struct mbuf *m;
 2760 
 2761         ifp = &sc->ethercom.ec_if;
 2762 
 2763         bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
 2764             offsetof(struct bge_ring_data, bge_status_block),
 2765             sizeof (struct bge_status_block),
 2766             BUS_DMASYNC_POSTREAD);
 2767 
 2768         offset = offsetof(struct bge_ring_data, bge_tx_ring);
 2769         tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 
 2770             sc->bge_tx_saved_considx;
 2771 
 2772         toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
 2773 
 2774         if (tosync < 0) {
 2775                 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
 2776                     sizeof (struct bge_tx_bd);
 2777                 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
 2778                     toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 2779                 tosync = -tosync;
 2780         }
 2781 
 2782         bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
 2783             offset, tosync * sizeof (struct bge_tx_bd),
 2784             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 2785 
 2786         /*
 2787          * Go through our tx ring and free mbufs for those
 2788          * frames that have been sent.
 2789          */
 2790         while (sc->bge_tx_saved_considx !=
 2791             sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
 2792                 u_int32_t               idx = 0;
 2793 
 2794                 idx = sc->bge_tx_saved_considx;
 2795                 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
 2796                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
 2797                         ifp->if_opackets++;
 2798                 m = sc->bge_cdata.bge_tx_chain[idx];
 2799                 if (m != NULL) {
 2800                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
 2801                         dma = sc->txdma[idx];
 2802                         bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
 2803                             dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 2804                         bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
 2805                         SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
 2806                         sc->txdma[idx] = NULL;
 2807 
 2808                         m_freem(m);
 2809                 }
 2810                 sc->bge_txcnt--;
 2811                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
 2812                 ifp->if_timer = 0;
 2813         }
 2814 
 2815         if (cur_tx != NULL)
 2816                 ifp->if_flags &= ~IFF_OACTIVE;
 2817 }
 2818 
 2819 int
 2820 bge_intr(xsc)
 2821         void *xsc;
 2822 {
 2823         struct bge_softc *sc;
 2824         struct ifnet *ifp;
 2825 
 2826         sc = xsc;
 2827         ifp = &sc->ethercom.ec_if;
 2828 
 2829 #ifdef notdef
 2830         /* Avoid this for now -- checking this register is expensive. */
 2831         /* Make sure this is really our interrupt. */
 2832         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
 2833                 return (0);
 2834 #endif
 2835         /* Ack interrupt and stop others from occuring. */
 2836         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 2837 
 2838         /*
 2839          * Process link state changes.
 2840          * Grrr. The link status word in the status block does
 2841          * not work correctly on the BCM5700 rev AX and BX chips,
 2842          * according to all avaibable information. Hence, we have
 2843          * to enable MII interrupts in order to properly obtain
 2844          * async link changes. Unfortunately, this also means that
 2845          * we have to read the MAC status register to detect link
 2846          * changes, thereby adding an additional register access to
 2847          * the interrupt handler.
 2848          */
 2849 
 2850         if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
 2851                 u_int32_t               status;
 2852 
 2853                 status = CSR_READ_4(sc, BGE_MAC_STS);
 2854                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
 2855                         sc->bge_link = 0;
 2856                         callout_stop(&sc->bge_timeout);
 2857                         bge_tick(sc);
 2858                         /* Clear the interrupt */
 2859                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 2860                             BGE_EVTENB_MI_INTERRUPT);
 2861                         bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
 2862                         bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
 2863                             BRGPHY_INTRS);
 2864                 }
 2865         } else {
 2866                 if (sc->bge_rdata->bge_status_block.bge_status &
 2867                     BGE_STATFLAG_LINKSTATE_CHANGED) {
 2868                         sc->bge_link = 0;
 2869                         callout_stop(&sc->bge_timeout);
 2870                         bge_tick(sc);
 2871                         /* Clear the interrupt */
 2872                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 2873                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 2874                             BGE_MACSTAT_LINK_CHANGED);
 2875                 }
 2876         }
 2877 
 2878         if (ifp->if_flags & IFF_RUNNING) {
 2879                 /* Check RX return ring producer/consumer */
 2880                 bge_rxeof(sc);
 2881 
 2882                 /* Check TX ring producer/consumer */
 2883                 bge_txeof(sc);
 2884         }
 2885 
 2886         if (sc->bge_pending_rxintr_change) {
 2887                 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
 2888                 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
 2889                 uint32_t junk;
 2890 
 2891                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
 2892                 DELAY(10);
 2893                 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
 2894                 
 2895                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
 2896                 DELAY(10);
 2897                 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
 2898 
 2899                 sc->bge_pending_rxintr_change = 0;
 2900         }
 2901         bge_handle_events(sc);
 2902 
 2903         /* Re-enable interrupts. */
 2904         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 2905 
 2906         if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
 2907                 bge_start(ifp);
 2908 
 2909         return (1);
 2910 }
 2911 
 2912 void
 2913 bge_tick(xsc)
 2914         void *xsc;
 2915 {
 2916         struct bge_softc *sc = xsc;
 2917         struct mii_data *mii = &sc->bge_mii;
 2918         struct ifmedia *ifm = NULL;
 2919         struct ifnet *ifp = &sc->ethercom.ec_if;
 2920         int s;
 2921 
 2922         s = splnet();
 2923 
 2924         bge_stats_update(sc);
 2925         callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
 2926         if (sc->bge_link) {
 2927                 splx(s);
 2928                 return;
 2929         }
 2930 
 2931         if (sc->bge_tbi) {
 2932                 ifm = &sc->bge_ifmedia;
 2933                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 2934                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
 2935                         sc->bge_link++;
 2936                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
 2937                         if (!IFQ_IS_EMPTY(&ifp->if_snd))
 2938                                 bge_start(ifp);
 2939                 }
 2940                 splx(s);
 2941                 return;
 2942         }
 2943 
 2944         mii_tick(mii);
 2945 
 2946         if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
 2947             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 2948                 sc->bge_link++;
 2949                 if (!IFQ_IS_EMPTY(&ifp->if_snd))
 2950                         bge_start(ifp);
 2951         }
 2952 
 2953         splx(s);
 2954 }
 2955 
 2956 void
 2957 bge_stats_update(sc)
 2958         struct bge_softc *sc;
 2959 {
 2960         struct ifnet *ifp = &sc->ethercom.ec_if;
 2961         bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
 2962         bus_size_t rstats = BGE_RX_STATS;
 2963 
 2964 #define READ_RSTAT(sc, stats, stat) \
 2965           CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat))
 2966 
 2967         if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
 2968                 ifp->if_collisions +=
 2969                     READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) +
 2970                     READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) +
 2971                     READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) +
 2972                     READ_RSTAT(sc, rstats, dot3StatsLateCollisions);
 2973                 return;
 2974         }
 2975 
 2976 #undef READ_RSTAT
 2977 #define READ_STAT(sc, stats, stat) \
 2978           CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
 2979 
 2980         ifp->if_collisions +=
 2981           (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
 2982            READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
 2983            READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
 2984            READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
 2985           ifp->if_collisions;
 2986 
 2987 #undef READ_STAT
 2988 
 2989 #ifdef notdef
 2990         ifp->if_collisions +=
 2991            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
 2992            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
 2993            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
 2994            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
 2995            ifp->if_collisions;
 2996 #endif
 2997 }
 2998 
 2999 /*
 3000  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
 3001  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
 3002  * but when such padded frames employ the  bge IP/TCP checksum offload,
 3003  * the hardware checksum assist gives incorrect results (possibly
 3004  * from incorporating its own padding into the UDP/TCP checksum; who knows).
 3005  * If we pad such runts with zeros, the onboard checksum comes out correct.
 3006  */
 3007 static __inline int
 3008 bge_cksum_pad(struct mbuf *pkt)
 3009 {
 3010         struct mbuf *last = NULL;
 3011         int padlen;
 3012 
 3013         padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
 3014 
 3015         /* if there's only the packet-header and we can pad there, use it. */
 3016         if (pkt->m_pkthdr.len == pkt->m_len &&
 3017             !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) {
 3018                 last = pkt;
 3019         } else {
 3020                 /*
 3021                  * Walk packet chain to find last mbuf. We will either
 3022                  * pad there, or append a new mbuf and pad it 
 3023                  * (thus perhaps avoiding the bcm5700 dma-min bug).
 3024                  */
 3025                 for (last = pkt; last->m_next != NULL; last = last->m_next) {
 3026                        (void) 0; /* do nothing*/
 3027                 }
 3028 
 3029                 /* `last' now points to last in chain. */
 3030                 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) {
 3031                         (void) 0; /* we can pad here, in-place. */
 3032                 } else {
 3033                         /* Allocate new empty mbuf, pad it. Compact later. */
 3034                         struct mbuf *n;
 3035                         MGET(n, M_DONTWAIT, MT_DATA);
 3036                         n->m_len = 0;
 3037                         last->m_next = n;
 3038                         last = n;
 3039                 }
 3040         }
 3041 
 3042 #ifdef DEBUG
 3043           /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/
 3044           KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ );
 3045 #endif
 3046         /* Now zero the pad area, to avoid the bge cksum-assist bug */
 3047         memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
 3048         last->m_len += padlen;
 3049         pkt->m_pkthdr.len += padlen;
 3050         return 0;
 3051 }
 3052 
 3053 /*
 3054  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
 3055  */
 3056 static __inline int
 3057 bge_compact_dma_runt(struct mbuf *pkt)
 3058 {
 3059         struct mbuf     *m, *prev;
 3060         int             totlen, prevlen;
 3061 
 3062         prev = NULL;
 3063         totlen = 0;
 3064         prevlen = -1;
 3065 
 3066         for (m = pkt; m != NULL; prev = m,m = m->m_next) {
 3067                 int mlen = m->m_len;
 3068                 int shortfall = 8 - mlen ;
 3069 
 3070                 totlen += mlen;
 3071                 if (mlen == 0) {
 3072                         continue;
 3073                 }
 3074                 if (mlen >= 8)
 3075                         continue;
 3076 
 3077                 /* If we get here, mbuf data is too small for DMA engine.
 3078                  * Try to fix by shuffling data to prev or next in chain.
 3079                  * If that fails, do a compacting deep-copy of the whole chain.
 3080                  */
 3081 
 3082                 /* Internal frag. If fits in prev, copy it there. */
 3083                 if (prev && !M_READONLY(prev) &&
 3084                       M_TRAILINGSPACE(prev) >= m->m_len) {
 3085                         bcopy(m->m_data,
 3086                               prev->m_data+prev->m_len,
 3087                               mlen);
 3088                         prev->m_len += mlen;
 3089                         m->m_len = 0;
 3090                         /* XXX stitch chain */
 3091                         prev->m_next = m_free(m);
 3092                         m = prev;
 3093                         continue;
 3094                 }
 3095                 else if (m->m_next != NULL && !M_READONLY(m) &&
 3096                              M_TRAILINGSPACE(m) >= shortfall &&
 3097                              m->m_next->m_len >= (8 + shortfall)) {
 3098                     /* m is writable and have enough data in next, pull up. */
 3099 
 3100                         bcopy(m->m_next->m_data,
 3101                               m->m_data+m->m_len,
 3102                               shortfall);
 3103                         m->m_len += shortfall;
 3104                         m->m_next->m_len -= shortfall;
 3105                         m->m_next->m_data += shortfall;
 3106                 }
 3107                 else if (m->m_next == NULL || 1) {
 3108                         /* Got a runt at the very end of the packet.
 3109                          * borrow data from the tail of the preceding mbuf and
 3110                          * update its length in-place. (The original data is still
 3111                          * valid, so we can do this even if prev is not writable.)
 3112                          */
 3113 
 3114                         /* if we'd make prev a runt, just move all of its data. */
 3115 #ifdef DEBUG
 3116                         KASSERT(prev != NULL /*, ("runt but null PREV")*/);
 3117                         KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
 3118 #endif
 3119                         if ((prev->m_len - shortfall) < 8)
 3120                                 shortfall = prev->m_len;
 3121                         
 3122 #ifdef notyet   /* just do the safe slow thing for now */
 3123                         if (!M_READONLY(m)) {
 3124                                 if (M_LEADINGSPACE(m) < shorfall) {
 3125                                         void *m_dat;
 3126                                         m_dat = (m->m_flags & M_PKTHDR) ?
 3127                                           m->m_pktdat : m->dat;
 3128                                         memmove(m_dat, mtod(m, void*), m->m_len);
 3129                                         m->m_data = m_dat;
 3130                                     }
 3131                         } else
 3132 #endif  /* just do the safe slow thing */
 3133                         {
 3134                                 struct mbuf * n = NULL;
 3135                                 int newprevlen = prev->m_len - shortfall;
 3136 
 3137                                 MGET(n, M_NOWAIT, MT_DATA);
 3138                                 if (n == NULL)
 3139                                    return ENOBUFS;
 3140                                 KASSERT(m->m_len + shortfall < MLEN
 3141                                         /*,
 3142                                           ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
 3143 
 3144                                 /* first copy the data we're stealing from prev */
 3145                                 bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
 3146 
 3147                                 /* update prev->m_len accordingly */
 3148                                 prev->m_len -= shortfall;
 3149 
 3150                                 /* copy data from runt m */
 3151                                 bcopy(m->m_data, n->m_data + shortfall, m->m_len);
 3152 
 3153                                 /* n holds what we stole from prev, plus m */
 3154                                 n->m_len = shortfall + m->m_len;
 3155 
 3156                                 /* stitch n into chain and free m */
 3157                                 n->m_next = m->m_next;
 3158                                 prev->m_next = n;
 3159                                 /* KASSERT(m->m_next == NULL); */
 3160                                 m->m_next = NULL;
 3161                                 m_free(m);
 3162                                 m = n;  /* for continuing loop */
 3163                         }
 3164                 }
 3165                 prevlen = m->m_len;
 3166         }
 3167         return 0;
 3168 }
 3169 
 3170 /*
 3171  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
 3172  * pointers to descriptors.
 3173  */
 3174 int
 3175 bge_encap(sc, m_head, txidx)
 3176         struct bge_softc *sc;
 3177         struct mbuf *m_head;
 3178         u_int32_t *txidx;
 3179 {
 3180         struct bge_tx_bd        *f = NULL;
 3181         u_int32_t               frag, cur, cnt = 0;
 3182         u_int16_t               csum_flags = 0;
 3183         struct txdmamap_pool_entry *dma;
 3184         bus_dmamap_t dmamap;
 3185         int                     i = 0;
 3186         struct m_tag            *mtag;
 3187 
 3188         cur = frag = *txidx;
 3189 
 3190         if (m_head->m_pkthdr.csum_flags) {
 3191                 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
 3192                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
 3193                 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
 3194                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
 3195         }
 3196 
 3197         /* 
 3198          * If we were asked to do an outboard checksum, and the NIC
 3199          * has the bug where it sometimes adds in the Ethernet padding,
 3200          * explicitly pad with zeros so the cksum will be correct either way.
 3201          * (For now, do this for all chip versions, until newer
 3202          * are confirmed to not require the workaround.)
 3203          */
 3204         if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
 3205 #ifdef notyet
 3206             (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
 3207 #endif      
 3208             m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
 3209                 goto check_dma_bug;
 3210 
 3211         if (bge_cksum_pad(m_head) != 0)
 3212             return ENOBUFS;
 3213 
 3214 check_dma_bug:
 3215         if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
 3216                 goto doit;
 3217         /*
 3218          * bcm5700 Revision B silicon cannot handle DMA descriptors with
 3219          * less than eight bytes.  If we encounter a teeny mbuf 
 3220          * at the end of a chain, we can pad.  Otherwise, copy.
 3221          */
 3222         if (bge_compact_dma_runt(m_head) != 0)
 3223                 return ENOBUFS;
 3224 
 3225 doit:
 3226         dma = SLIST_FIRST(&sc->txdma_list);
 3227         if (dma == NULL)
 3228                 return ENOBUFS;
 3229         dmamap = dma->dmamap;
 3230 
 3231         /*
 3232          * Start packing the mbufs in this chain into
 3233          * the fragment pointers. Stop when we run out
 3234          * of fragments or hit the end of the mbuf chain.
 3235          */
 3236         if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
 3237             BUS_DMA_NOWAIT))
 3238                 return(ENOBUFS);
 3239 
 3240         mtag = sc->ethercom.ec_nvlans ?
 3241             m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
 3242 
 3243         for (i = 0; i < dmamap->dm_nsegs; i++) {
 3244                 f = &sc->bge_rdata->bge_tx_ring[frag];
 3245                 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
 3246                         break;
 3247                 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
 3248                 f->bge_len = dmamap->dm_segs[i].ds_len;
 3249                 f->bge_flags = csum_flags;
 3250 
 3251                 if (mtag != NULL) {
 3252                         f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
 3253                         f->bge_vlan_tag = *(u_int *)(mtag + 1);
 3254                 } else {
 3255                         f->bge_vlan_tag = 0;
 3256                 }
 3257                 /*
 3258                  * Sanity check: avoid coming within 16 descriptors
 3259                  * of the end of the ring.
 3260                  */
 3261                 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
 3262                         return(ENOBUFS);
 3263                 cur = frag;
 3264                 BGE_INC(frag, BGE_TX_RING_CNT);
 3265                 cnt++;
 3266         }
 3267 
 3268         if (i < dmamap->dm_nsegs)
 3269                 return ENOBUFS;
 3270 
 3271         bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
 3272             BUS_DMASYNC_PREWRITE);
 3273 
 3274         if (frag == sc->bge_tx_saved_considx)
 3275                 return(ENOBUFS);
 3276 
 3277         sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
 3278         sc->bge_cdata.bge_tx_chain[cur] = m_head;
 3279         SLIST_REMOVE_HEAD(&sc->txdma_list, link);
 3280         sc->txdma[cur] = dma;
 3281         sc->bge_txcnt += cnt;
 3282 
 3283         *txidx = frag;
 3284 
 3285         return(0);
 3286 }
 3287 
 3288 /*
 3289  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3290  * to the mbuf data regions directly in the transmit descriptors.
 3291  */
 3292 void
 3293 bge_start(ifp)
 3294         struct ifnet *ifp;
 3295 {
 3296         struct bge_softc *sc;
 3297         struct mbuf *m_head = NULL;
 3298         u_int32_t prodidx = 0;
 3299         int pkts = 0;
 3300 
 3301         sc = ifp->if_softc;
 3302 
 3303         if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
 3304                 return;
 3305 
 3306         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
 3307 
 3308         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
 3309                 IFQ_POLL(&ifp->if_snd, m_head);
 3310                 if (m_head == NULL)
 3311                         break;
 3312 
 3313 #if 0
 3314                 /*
 3315                  * XXX
 3316                  * safety overkill.  If this is a fragmented packet chain
 3317                  * with delayed TCP/UDP checksums, then only encapsulate
 3318                  * it if we have enough descriptors to handle the entire
 3319                  * chain at once.
 3320                  * (paranoia -- may not actually be needed)
 3321                  */
 3322                 if (m_head->m_flags & M_FIRSTFRAG &&
 3323                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
 3324                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
 3325                             m_head->m_pkthdr.csum_data + 16) {
 3326                                 ifp->if_flags |= IFF_OACTIVE;
 3327                                 break;
 3328                         }
 3329                 }
 3330 #endif
 3331 
 3332                 /*
 3333                  * Pack the data into the transmit ring. If we
 3334                  * don't have room, set the OACTIVE flag and wait
 3335                  * for the NIC to drain the ring.
 3336                  */
 3337                 if (bge_encap(sc, m_head, &prodidx)) {
 3338                         ifp->if_flags |= IFF_OACTIVE;
 3339                         break;
 3340                 }
 3341 
 3342                 /* now we are committed to transmit the packet */
 3343                 IFQ_DEQUEUE(&ifp->if_snd, m_head);
 3344                 pkts++;
 3345 
 3346 #if NBPFILTER > 0
 3347                 /*
 3348                  * If there's a BPF listener, bounce a copy of this frame
 3349                  * to him.
 3350                  */
 3351                 if (ifp->if_bpf)
 3352                         bpf_mtap(ifp->if_bpf, m_head);
 3353 #endif
 3354         }
 3355         if (pkts == 0)
 3356                 return;
 3357 
 3358         /* Transmit */
 3359         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3360         if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)    /* 5700 b2 errata */
 3361                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3362 
 3363         /*
 3364          * Set a timeout in case the chip goes out to lunch.
 3365          */
 3366         ifp->if_timer = 5;
 3367 }
 3368 
 3369 int
 3370 bge_init(ifp)
 3371         struct ifnet *ifp;
 3372 {
 3373         struct bge_softc *sc = ifp->if_softc;
 3374         u_int16_t *m;
 3375         int s, error;
 3376 
 3377         s = splnet();
 3378 
 3379         ifp = &sc->ethercom.ec_if;
 3380 
 3381         /* Cancel pending I/O and flush buffers. */
 3382         bge_stop(sc);
 3383         bge_reset(sc);
 3384         bge_chipinit(sc);
 3385 
 3386         /*
 3387          * Init the various state machines, ring
 3388          * control blocks and firmware.
 3389          */
 3390         error = bge_blockinit(sc);
 3391         if (error != 0) {
 3392                 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
 3393                     error);
 3394                 splx(s);
 3395                 return error;
 3396         }
 3397 
 3398         ifp = &sc->ethercom.ec_if;
 3399 
 3400         /* Specify MTU. */
 3401         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
 3402             ETHER_HDR_LEN + ETHER_CRC_LEN);
 3403 
 3404         /* Load our MAC address. */
 3405         m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
 3406         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
 3407         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
 3408 
 3409         /* Enable or disable promiscuous mode as needed. */
 3410         if (ifp->if_flags & IFF_PROMISC) {
 3411                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3412         } else {
 3413                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3414         }
 3415 
 3416         /* Program multicast filter. */
 3417         bge_setmulti(sc);
 3418 
 3419         /* Init RX ring. */
 3420         bge_init_rx_ring_std(sc);
 3421 
 3422         /* Init jumbo RX ring. */
 3423         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
 3424                 bge_init_rx_ring_jumbo(sc);
 3425 
 3426         /* Init our RX return ring index */
 3427         sc->bge_rx_saved_considx = 0;
 3428 
 3429         /* Init TX ring. */
 3430         bge_init_tx_ring(sc);
 3431 
 3432         /* Turn on transmitter */
 3433         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
 3434 
 3435         /* Turn on receiver */
 3436         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3437 
 3438         /* Tell firmware we're alive. */
 3439         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3440 
 3441         /* Enable host interrupts. */
 3442         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
 3443         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3444         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3445 
 3446         bge_ifmedia_upd(ifp);
 3447 
 3448         ifp->if_flags |= IFF_RUNNING;
 3449         ifp->if_flags &= ~IFF_OACTIVE;
 3450 
 3451         splx(s);
 3452 
 3453         callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
 3454 
 3455         return 0;
 3456 }
 3457 
 3458 /*
 3459  * Set media options.
 3460  */
 3461 int
 3462 bge_ifmedia_upd(ifp)
 3463         struct ifnet *ifp;
 3464 {
 3465         struct bge_softc *sc = ifp->if_softc;
 3466         struct mii_data *mii = &sc->bge_mii;
 3467         struct ifmedia *ifm = &sc->bge_ifmedia;
 3468 
 3469         /* If this is a 1000baseX NIC, enable the TBI port. */
 3470         if (sc->bge_tbi) {
 3471                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 3472                         return(EINVAL);
 3473                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
 3474                 case IFM_AUTO:
 3475                         break;
 3476                 case IFM_1000_SX:
 3477                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3478                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3479                                     BGE_MACMODE_HALF_DUPLEX);
 3480                         } else {
 3481                                 BGE_SETBIT(sc, BGE_MAC_MODE,
 3482                                     BGE_MACMODE_HALF_DUPLEX);
 3483                         }
 3484                         break;
 3485                 default:
 3486                         return(EINVAL);
 3487                 }
 3488                 return(0);
 3489         }
 3490 
 3491         sc->bge_link = 0;
 3492         mii_mediachg(mii);
 3493 
 3494         return(0);
 3495 }
 3496 
 3497 /*
 3498  * Report current media status.
 3499  */
 3500 void
 3501 bge_ifmedia_sts(ifp, ifmr)
 3502         struct ifnet *ifp;
 3503         struct ifmediareq *ifmr;
 3504 {
 3505         struct bge_softc *sc = ifp->if_softc;
 3506         struct mii_data *mii = &sc->bge_mii;
 3507 
 3508         if (sc->bge_tbi) {
 3509                 ifmr->ifm_status = IFM_AVALID;
 3510                 ifmr->ifm_active = IFM_ETHER;
 3511                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3512                     BGE_MACSTAT_TBI_PCS_SYNCHED)
 3513                         ifmr->ifm_status |= IFM_ACTIVE;
 3514                 ifmr->ifm_active |= IFM_1000_SX;
 3515                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
 3516                         ifmr->ifm_active |= IFM_HDX;
 3517                 else
 3518                         ifmr->ifm_active |= IFM_FDX;
 3519                 return;
 3520         }
 3521 
 3522         mii_pollstat(mii);
 3523         ifmr->ifm_active = mii->mii_media_active;
 3524         ifmr->ifm_status = mii->mii_media_status;
 3525 }
 3526 
 3527 int
 3528 bge_ioctl(ifp, command, data)
 3529         struct ifnet *ifp;
 3530         u_long command;
 3531         caddr_t data;
 3532 {
 3533         struct bge_softc *sc = ifp->if_softc;
 3534         struct ifreq *ifr = (struct ifreq *) data;
 3535         int s, error = 0;
 3536         struct mii_data *mii;
 3537 
 3538         s = splnet();
 3539 
 3540         switch(command) {
 3541         case SIOCSIFFLAGS:
 3542                 if (ifp->if_flags & IFF_UP) {
 3543                         /*
 3544                          * If only the state of the PROMISC flag changed,
 3545                          * then just use the 'set promisc mode' command
 3546                          * instead of reinitializing the entire NIC. Doing
 3547                          * a full re-init means reloading the firmware and
 3548                          * waiting for it to start up, which may take a
 3549                          * second or two.
 3550                          */
 3551                         if (ifp->if_flags & IFF_RUNNING &&
 3552                             ifp->if_flags & IFF_PROMISC &&
 3553                             !(sc->bge_if_flags & IFF_PROMISC)) {
 3554                                 BGE_SETBIT(sc, BGE_RX_MODE,
 3555                                     BGE_RXMODE_RX_PROMISC);
 3556                         } else if (ifp->if_flags & IFF_RUNNING &&
 3557                             !(ifp->if_flags & IFF_PROMISC) &&
 3558                             sc->bge_if_flags & IFF_PROMISC) {
 3559                                 BGE_CLRBIT(sc, BGE_RX_MODE,
 3560                                     BGE_RXMODE_RX_PROMISC);
 3561                         } else
 3562                                 bge_init(ifp);
 3563                 } else {
 3564                         if (ifp->if_flags & IFF_RUNNING) {
 3565                                 bge_stop(sc);
 3566                         }
 3567                 }
 3568                 sc->bge_if_flags = ifp->if_flags;
 3569                 error = 0;
 3570                 break;
 3571         case SIOCSIFMEDIA:
 3572         case SIOCGIFMEDIA:
 3573                 if (sc->bge_tbi) {
 3574                         error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
 3575                             command);
 3576                 } else {
 3577                         mii = &sc->bge_mii;
 3578                         error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
 3579                             command);
 3580                 }
 3581                 error = 0;
 3582                 break;
 3583         default:
 3584                 error = ether_ioctl(ifp, command, data);
 3585                 if (error == ENETRESET) {
 3586                         bge_setmulti(sc);
 3587                         error = 0;
 3588                 }
 3589                 break;
 3590         }
 3591 
 3592         splx(s);
 3593 
 3594         return(error);
 3595 }
 3596 
 3597 void
 3598 bge_watchdog(ifp)
 3599         struct ifnet *ifp;
 3600 {
 3601         struct bge_softc *sc;
 3602 
 3603         sc = ifp->if_softc;
 3604 
 3605         printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
 3606 
 3607         ifp->if_flags &= ~IFF_RUNNING;
 3608         bge_init(ifp);
 3609 
 3610         ifp->if_oerrors++;
 3611 }
 3612 
 3613 static void
 3614 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
 3615 {
 3616         int i;
 3617 
 3618         BGE_CLRBIT(sc, reg, bit);
 3619 
 3620         for (i = 0; i < BGE_TIMEOUT; i++) {
 3621                 if ((CSR_READ_4(sc, reg) & bit) == 0)
 3622                         return;
 3623                 delay(100);
 3624         }
 3625 
 3626         printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
 3627             sc->bge_dev.dv_xname, (u_long) reg, bit);
 3628 }
 3629 
 3630 /*
 3631  * Stop the adapter and free any mbufs allocated to the
 3632  * RX and TX lists.
 3633  */
 3634 void
 3635 bge_stop(sc)
 3636         struct bge_softc *sc;
 3637 {
 3638         struct ifnet *ifp = &sc->ethercom.ec_if;
 3639 
 3640         callout_stop(&sc->bge_timeout);
 3641 
 3642         /*
 3643          * Disable all of the receiver blocks
 3644          */
 3645         bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3646         bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 3647         bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 3648         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 3649                 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 3650         }
 3651         bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
 3652         bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 3653         bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
 3654 
 3655         /*
 3656          * Disable all of the transmit blocks
 3657          */
 3658         bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 3659         bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 3660         bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 3661         bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
 3662         bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 3663         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 3664                 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 3665         }
 3666         bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 3667 
 3668         /*
 3669          * Shut down all of the memory managers and related
 3670          * state machines.
 3671          */
 3672         bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 3673         bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
 3674         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 3675                 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 3676         }
 3677 
 3678         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 3679         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 3680 
 3681         if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
 3682                 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
 3683                 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 3684         }
 3685 
 3686         /* Disable host interrupts. */
 3687         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3688         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3689 
 3690         /*
 3691          * Tell firmware we're shutting down.
 3692          */
 3693         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3694 
 3695         /* Free the RX lists. */
 3696         bge_free_rx_ring_std(sc);
 3697 
 3698         /* Free jumbo RX list. */
 3699         bge_free_rx_ring_jumbo(sc);
 3700 
 3701         /* Free TX buffers. */
 3702         bge_free_tx_ring(sc);
 3703 
 3704         /*
 3705          * Isolate/power down the PHY.
 3706          */
 3707         if (!sc->bge_tbi)
 3708                 mii_down(&sc->bge_mii);
 3709 
 3710         sc->bge_link = 0;
 3711 
 3712         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
 3713 
 3714         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 3715 }
 3716 
 3717 /*
 3718  * Stop all chip I/O so that the kernel's probe routines don't
 3719  * get confused by errant DMAs when rebooting.
 3720  */
 3721 void
 3722 bge_shutdown(xsc)
 3723         void *xsc;
 3724 {
 3725         struct bge_softc *sc = (struct bge_softc *)xsc;
 3726 
 3727         bge_stop(sc);
 3728         bge_reset(sc);
 3729 }
 3730 
 3731 
 3732 static int
 3733 sysctl_bge_verify(SYSCTLFN_ARGS)
 3734 {
 3735         int error, t;
 3736         struct sysctlnode node;
 3737 
 3738         node = *rnode;
 3739         t = *(int*)rnode->sysctl_data;
 3740         node.sysctl_data = &t;
 3741         error = sysctl_lookup(SYSCTLFN_CALL(&node));
 3742         if (error || newp == NULL)
 3743                 return (error);
 3744 
 3745 #if 0
 3746         DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
 3747             node.sysctl_num, rnode->sysctl_num));
 3748 #endif
 3749 
 3750         if (node.sysctl_num == bge_rxthresh_nodenum) {
 3751                 if (t < 0 || t >= NBGE_RX_THRESH)
 3752                         return (EINVAL);
 3753                 bge_update_all_threshes(t);
 3754         } else
 3755                 return (EINVAL);
 3756 
 3757         *(int*)rnode->sysctl_data = t;
 3758 
 3759         return (0);
 3760 }
 3761 
 3762 /*
 3763  * Set up sysctl(3) MIB, hw.bge.*.
 3764  *
 3765  * TBD condition SYSCTL_PERMANENT on being an LKM or not
 3766  */
 3767 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup")
 3768 {
 3769         int rc, bge_root_num;
 3770         struct sysctlnode *node;
 3771 
 3772         if ((rc = sysctl_createv(clog, 0, NULL, NULL,
 3773             CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
 3774             NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
 3775                 goto err;
 3776         }
 3777 
 3778         if ((rc = sysctl_createv(clog, 0, NULL, &node,
 3779             CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge",
 3780             SYSCTL_DESCR("BGE interface controls"),
 3781             NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
 3782                 goto err;
 3783         }
 3784 
 3785         bge_root_num = node->sysctl_num;
 3786 
 3787         /* BGE Rx interrupt mitigation level */
 3788         if ((rc = sysctl_createv(clog, 0, NULL, &node, 
 3789             CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
 3790             CTLTYPE_INT, "rx_lvl",
 3791             SYSCTL_DESCR("BGE receive interrupt mitigation level"),
 3792             sysctl_bge_verify, 0,
 3793             &bge_rx_thresh_lvl,
 3794             0, CTL_HW, bge_root_num, CTL_CREATE,
 3795             CTL_EOL)) != 0) {
 3796                 goto err;
 3797         }
 3798 
 3799         bge_rxthresh_nodenum = node->sysctl_num;
 3800 
 3801         return;
 3802 
 3803 err:
 3804         printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
 3805 }

Cache object: c631539914346cb16ce92f35f99a70bd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.