The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001 Wind River Systems
    3  * Copyright (c) 1997, 1998, 1999, 2001
    4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. All advertising materials mentioning features or use of this software
   15  *    must display the following acknowledgement:
   16  *      This product includes software developed by Bill Paul.
   17  * 4. Neither the name of the author nor the names of any co-contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   31  * THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: src/sys/dev/bge/if_bge.c,v 1.72.2.16 2006/01/29 15:39:03 emaste Exp $");
   36 
   37 /*
   38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
   39  *
   40  * The Broadcom BCM5700 is based on technology originally developed by
   41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
   42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
   43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
   44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
   45  * frames, highly configurable RX filtering, and 16 RX and TX queues
   46  * (which, along with RX filter rules, can be used for QOS applications).
   47  * Other features, such as TCP segmentation, may be available as part
   48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
   49  * firmware images can be stored in hardware and need not be compiled
   50  * into the driver.
   51  *
   52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
   53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
   54  *
   55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
   56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
   57  * does not support external SSRAM.
   58  *
   59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
   60  * brand name, which is functionally similar but lacks PCI-X support.
   61  *
   62  * Without external SSRAM, you can only have at most 4 TX rings,
   63  * and the use of the mini RX ring is disabled. This seems to imply
   64  * that these features are simply not available on the BCM5701. As a
   65  * result, this driver does not implement any support for the mini RX
   66  * ring.
   67  */
   68 
   69 #include <sys/param.h>
   70 #include <sys/endian.h>
   71 #include <sys/systm.h>
   72 #include <sys/sockio.h>
   73 #include <sys/mbuf.h>
   74 #include <sys/malloc.h>
   75 #include <sys/kernel.h>
   76 #include <sys/module.h>
   77 #include <sys/socket.h>
   78 #include <sys/queue.h>
   79 
   80 #include <net/if.h>
   81 #include <net/if_arp.h>
   82 #include <net/ethernet.h>
   83 #include <net/if_dl.h>
   84 #include <net/if_media.h>
   85 
   86 #include <net/bpf.h>
   87 
   88 #include <net/if_types.h>
   89 #include <net/if_vlan_var.h>
   90 
   91 #include <netinet/in_systm.h>
   92 #include <netinet/in.h>
   93 #include <netinet/ip.h>
   94 
   95 #include <machine/clock.h>      /* for DELAY */
   96 #include <machine/bus_memio.h>
   97 #include <machine/bus.h>
   98 #include <machine/resource.h>
   99 #include <sys/bus.h>
  100 #include <sys/rman.h>
  101 
  102 #include <dev/mii/mii.h>
  103 #include <dev/mii/miivar.h>
  104 #include "miidevs.h"
  105 #include <dev/mii/brgphyreg.h>
  106 
  107 #include <dev/pci/pcireg.h>
  108 #include <dev/pci/pcivar.h>
  109 
  110 #include <dev/bge/if_bgereg.h>
  111 
  112 #include "opt_bge.h"
  113 
  114 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
  115 
  116 MODULE_DEPEND(bge, pci, 1, 1, 1);
  117 MODULE_DEPEND(bge, ether, 1, 1, 1);
  118 MODULE_DEPEND(bge, miibus, 1, 1, 1);
  119 
  120 /* "controller miibus0" required.  See GENERIC if you get errors here. */
  121 #include "miibus_if.h"
  122 
  123 /*
  124  * Various supported device vendors/types and their names. Note: the
  125  * spec seems to indicate that the hardware still has Alteon's vendor
  126  * ID burned into it, though it will always be overriden by the vendor
  127  * ID in the EEPROM. Just to be safe, we cover all possibilities.
  128  */
  129 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
  130 
  131 static struct bge_type bge_devs[] = {
  132         { ALT_VENDORID, ALT_DEVICEID_BCM5700,
  133                 "Broadcom BCM5700 Gigabit Ethernet" },
  134         { ALT_VENDORID, ALT_DEVICEID_BCM5701,
  135                 "Broadcom BCM5701 Gigabit Ethernet" },
  136         { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
  137                 "Broadcom BCM5700 Gigabit Ethernet" },
  138         { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
  139                 "Broadcom BCM5701 Gigabit Ethernet" },
  140         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
  141                 "Broadcom BCM5702 Gigabit Ethernet" },
  142         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
  143                 "Broadcom BCM5702X Gigabit Ethernet" },
  144         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
  145                 "Broadcom BCM5703 Gigabit Ethernet" },
  146         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
  147                 "Broadcom BCM5703X Gigabit Ethernet" },
  148         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
  149                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
  150         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
  151                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
  152         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
  153                 "Broadcom BCM5705 Gigabit Ethernet" },
  154         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
  155                 "Broadcom BCM5705K Gigabit Ethernet" },
  156         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
  157                 "Broadcom BCM5705M Gigabit Ethernet" },
  158         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
  159                 "Broadcom BCM5705M Gigabit Ethernet" },
  160         { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
  161                 "Broadcom BCM5714C Gigabit Ethernet" },
  162         { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
  163                 "Broadcom BCM5721 Gigabit Ethernet" },
  164         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
  165                 "Broadcom BCM5750 Gigabit Ethernet" },
  166         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
  167                 "Broadcom BCM5750M Gigabit Ethernet" },
  168         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
  169                 "Broadcom BCM5751 Gigabit Ethernet" },
  170         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
  171                 "Broadcom BCM5751M Gigabit Ethernet" },
  172         { BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
  173                 "Broadcom BCM5752 Gigabit Ethernet" },
  174         { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
  175                 "Broadcom BCM5782 Gigabit Ethernet" },
  176         { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
  177                 "Broadcom BCM5788 Gigabit Ethernet" },
  178         { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
  179                 "Broadcom BCM5789 Gigabit Ethernet" },
  180         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
  181                 "Broadcom BCM5901 Fast Ethernet" },
  182         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
  183                 "Broadcom BCM5901A2 Fast Ethernet" },
  184         { SK_VENDORID, SK_DEVICEID_ALTIMA,
  185                 "SysKonnect Gigabit Ethernet" },
  186         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
  187                 "Altima AC1000 Gigabit Ethernet" },
  188         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
  189                 "Altima AC1002 Gigabit Ethernet" },
  190         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
  191                 "Altima AC9100 Gigabit Ethernet" },
  192         { 0, 0, NULL }
  193 };
  194 
  195 static int bge_probe            (device_t);
  196 static int bge_attach           (device_t);
  197 static int bge_detach           (device_t);
  198 static void bge_release_resources
  199                                 (struct bge_softc *);
  200 static void bge_dma_map_addr    (void *, bus_dma_segment_t *, int, int);
  201 static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
  202                                     bus_size_t, int);
  203 static int bge_dma_alloc        (device_t);
  204 static void bge_dma_free        (struct bge_softc *);
  205 
  206 static void bge_txeof           (struct bge_softc *);
  207 static void bge_rxeof           (struct bge_softc *);
  208 
  209 static void bge_tick_locked     (struct bge_softc *);
  210 static void bge_tick            (void *);
  211 static void bge_stats_update    (struct bge_softc *);
  212 static void bge_stats_update_regs
  213                                 (struct bge_softc *);
  214 static int bge_encap            (struct bge_softc *, struct mbuf *,
  215                                         u_int32_t *);
  216 
  217 static void bge_intr            (void *);
  218 static void bge_start_locked    (struct ifnet *);
  219 static void bge_start           (struct ifnet *);
  220 static int bge_ioctl            (struct ifnet *, u_long, caddr_t);
  221 static void bge_init_locked     (struct bge_softc *);
  222 static void bge_init            (void *);
  223 static void bge_stop            (struct bge_softc *);
  224 static void bge_watchdog                (struct ifnet *);
  225 static void bge_shutdown                (device_t);
  226 static int bge_ifmedia_upd      (struct ifnet *);
  227 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
  228 
  229 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *, int, u_int8_t *);
  230 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
  231 
  232 static void bge_setmulti        (struct bge_softc *);
  233 
  234 static void bge_handle_events   (struct bge_softc *);
  235 static int bge_alloc_jumbo_mem  (struct bge_softc *);
  236 static void bge_free_jumbo_mem  (struct bge_softc *);
  237 static void *bge_jalloc         (struct bge_softc *);
  238 static void bge_jfree           (void *, void *);
  239 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
  240 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
  241 static int bge_init_rx_ring_std (struct bge_softc *);
  242 static void bge_free_rx_ring_std        (struct bge_softc *);
  243 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
  244 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
  245 static void bge_free_tx_ring    (struct bge_softc *);
  246 static int bge_init_tx_ring     (struct bge_softc *);
  247 
  248 static int bge_chipinit         (struct bge_softc *);
  249 static int bge_blockinit        (struct bge_softc *);
  250 
  251 #ifdef notdef
  252 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
  253 static void bge_vpd_read_res    (struct bge_softc *, struct vpd_res *, int);
  254 static void bge_vpd_read        (struct bge_softc *);
  255 #endif
  256 
  257 static u_int32_t bge_readmem_ind
  258                                 (struct bge_softc *, int);
  259 static void bge_writemem_ind    (struct bge_softc *, int, int);
  260 #ifdef notdef
  261 static u_int32_t bge_readreg_ind
  262                                 (struct bge_softc *, int);
  263 #endif
  264 static void bge_writereg_ind    (struct bge_softc *, int, int);
  265 
  266 static int bge_miibus_readreg   (device_t, int, int);
  267 static int bge_miibus_writereg  (device_t, int, int, int);
  268 static void bge_miibus_statchg  (device_t);
  269 
  270 static void bge_reset           (struct bge_softc *);
  271 
  272 static device_method_t bge_methods[] = {
  273         /* Device interface */
  274         DEVMETHOD(device_probe,         bge_probe),
  275         DEVMETHOD(device_attach,        bge_attach),
  276         DEVMETHOD(device_detach,        bge_detach),
  277         DEVMETHOD(device_shutdown,      bge_shutdown),
  278 
  279         /* bus interface */
  280         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  281         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  282 
  283         /* MII interface */
  284         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
  285         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
  286         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
  287 
  288         { 0, 0 }
  289 };
  290 
  291 static driver_t bge_driver = {
  292         "bge",
  293         bge_methods,
  294         sizeof(struct bge_softc)
  295 };
  296 
  297 static devclass_t bge_devclass;
  298 
  299 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
  300 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
  301 
  302 static u_int32_t
  303 bge_readmem_ind(sc, off)
  304         struct bge_softc *sc;
  305         int off;
  306 {
  307         device_t dev;
  308 
  309         dev = sc->bge_dev;
  310 
  311         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  312         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
  313 }
  314 
  315 static void
  316 bge_writemem_ind(sc, off, val)
  317         struct bge_softc *sc;
  318         int off, val;
  319 {
  320         device_t dev;
  321 
  322         dev = sc->bge_dev;
  323 
  324         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  325         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
  326 
  327         return;
  328 }
  329 
  330 #ifdef notdef
  331 static u_int32_t
  332 bge_readreg_ind(sc, off)
  333         struct bge_softc *sc;
  334         int off;
  335 {
  336         device_t dev;
  337 
  338         dev = sc->bge_dev;
  339 
  340         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  341         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
  342 }
  343 #endif
  344 
  345 static void
  346 bge_writereg_ind(sc, off, val)
  347         struct bge_softc *sc;
  348         int off, val;
  349 {
  350         device_t dev;
  351 
  352         dev = sc->bge_dev;
  353 
  354         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  355         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
  356 
  357         return;
  358 }
  359 
  360 /*
  361  * Map a single buffer address.
  362  */
  363 
  364 static void
  365 bge_dma_map_addr(arg, segs, nseg, error)
  366         void *arg;
  367         bus_dma_segment_t *segs;
  368         int nseg;
  369         int error;
  370 {
  371         struct bge_dmamap_arg *ctx;
  372 
  373         if (error)
  374                 return;
  375 
  376         ctx = arg;
  377 
  378         if (nseg > ctx->bge_maxsegs) {
  379                 ctx->bge_maxsegs = 0;
  380                 return;
  381         }
  382 
  383         ctx->bge_busaddr = segs->ds_addr;
  384 
  385         return;
  386 }
  387 
  388 /*
  389  * Map an mbuf chain into an TX ring.
  390  */
  391 
  392 static void
  393 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
  394         void *arg;
  395         bus_dma_segment_t *segs;
  396         int nseg;
  397         bus_size_t mapsize;
  398         int error;
  399 {
  400         struct bge_dmamap_arg *ctx;
  401         struct bge_tx_bd *d = NULL;
  402         int i = 0, idx;
  403 
  404         if (error)
  405                 return;
  406 
  407         ctx = arg;
  408 
  409         /* Signal error to caller if there's too many segments */
  410         if (nseg > ctx->bge_maxsegs) {
  411                 ctx->bge_maxsegs = 0;
  412                 return;
  413         }
  414 
  415         idx = ctx->bge_idx;
  416         while(1) {
  417                 d = &ctx->bge_ring[idx];
  418                 d->bge_addr.bge_addr_lo =
  419                     htole32(BGE_ADDR_LO(segs[i].ds_addr));
  420                 d->bge_addr.bge_addr_hi =
  421                     htole32(BGE_ADDR_HI(segs[i].ds_addr));
  422                 d->bge_len = htole16(segs[i].ds_len);
  423                 d->bge_flags = htole16(ctx->bge_flags);
  424                 i++;
  425                 if (i == nseg)
  426                         break;
  427                 BGE_INC(idx, BGE_TX_RING_CNT);
  428         }
  429 
  430         d->bge_flags |= htole16(BGE_TXBDFLAG_END);
  431         ctx->bge_maxsegs = nseg;
  432         ctx->bge_idx = idx;
  433 
  434         return;
  435 }
  436 
  437 
  438 #ifdef notdef
  439 static u_int8_t
  440 bge_vpd_readbyte(sc, addr)
  441         struct bge_softc *sc;
  442         int addr;
  443 {
  444         int i;
  445         device_t dev;
  446         u_int32_t val;
  447 
  448         dev = sc->bge_dev;
  449         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
  450         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
  451                 DELAY(10);
  452                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
  453                         break;
  454         }
  455 
  456         if (i == BGE_TIMEOUT) {
  457                 printf("bge%d: VPD read timed out\n", sc->bge_unit);
  458                 return(0);
  459         }
  460 
  461         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
  462 
  463         return((val >> ((addr % 4) * 8)) & 0xFF);
  464 }
  465 
  466 static void
  467 bge_vpd_read_res(sc, res, addr)
  468         struct bge_softc *sc;
  469         struct vpd_res *res;
  470         int addr;
  471 {
  472         int i;
  473         u_int8_t *ptr;
  474 
  475         ptr = (u_int8_t *)res;
  476         for (i = 0; i < sizeof(struct vpd_res); i++)
  477                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
  478 
  479         return;
  480 }
  481 
  482 static void
  483 bge_vpd_read(sc)
  484         struct bge_softc *sc;
  485 {
  486         int pos = 0, i;
  487         struct vpd_res res;
  488 
  489         if (sc->bge_vpd_prodname != NULL)
  490                 free(sc->bge_vpd_prodname, M_DEVBUF);
  491         if (sc->bge_vpd_readonly != NULL)
  492                 free(sc->bge_vpd_readonly, M_DEVBUF);
  493         sc->bge_vpd_prodname = NULL;
  494         sc->bge_vpd_readonly = NULL;
  495 
  496         bge_vpd_read_res(sc, &res, pos);
  497 
  498         if (res.vr_id != VPD_RES_ID) {
  499                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  500                         sc->bge_unit, VPD_RES_ID, res.vr_id);
  501                 return;
  502         }
  503 
  504         pos += sizeof(res);
  505         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
  506         for (i = 0; i < res.vr_len; i++)
  507                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
  508         sc->bge_vpd_prodname[i] = '\0';
  509         pos += i;
  510 
  511         bge_vpd_read_res(sc, &res, pos);
  512 
  513         if (res.vr_id != VPD_RES_READ) {
  514                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  515                     sc->bge_unit, VPD_RES_READ, res.vr_id);
  516                 return;
  517         }
  518 
  519         pos += sizeof(res);
  520         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
  521         for (i = 0; i < res.vr_len + 1; i++)
  522                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
  523 
  524         return;
  525 }
  526 #endif
  527 
  528 /*
  529  * Read a byte of data stored in the EEPROM at address 'addr.' The
  530  * BCM570x supports both the traditional bitbang interface and an
  531  * auto access interface for reading the EEPROM. We use the auto
  532  * access method.
  533  */
  534 static u_int8_t
  535 bge_eeprom_getbyte(sc, addr, dest)
  536         struct bge_softc *sc;
  537         int addr;
  538         u_int8_t *dest;
  539 {
  540         int i;
  541         u_int32_t byte = 0;
  542 
  543         /*
  544          * Enable use of auto EEPROM access so we can avoid
  545          * having to use the bitbang method.
  546          */
  547         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
  548 
  549         /* Reset the EEPROM, load the clock period. */
  550         CSR_WRITE_4(sc, BGE_EE_ADDR,
  551             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
  552         DELAY(20);
  553 
  554         /* Issue the read EEPROM command. */
  555         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
  556 
  557         /* Wait for completion */
  558         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
  559                 DELAY(10);
  560                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
  561                         break;
  562         }
  563 
  564         if (i == BGE_TIMEOUT) {
  565                 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
  566                 return(0);
  567         }
  568 
  569         /* Get result. */
  570         byte = CSR_READ_4(sc, BGE_EE_DATA);
  571 
  572         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
  573 
  574         return(0);
  575 }
  576 
  577 /*
  578  * Read a sequence of bytes from the EEPROM.
  579  */
  580 static int
  581 bge_read_eeprom(sc, dest, off, cnt)
  582         struct bge_softc *sc;
  583         caddr_t dest;
  584         int off;
  585         int cnt;
  586 {
  587         int err = 0, i;
  588         u_int8_t byte = 0;
  589 
  590         for (i = 0; i < cnt; i++) {
  591                 err = bge_eeprom_getbyte(sc, off + i, &byte);
  592                 if (err)
  593                         break;
  594                 *(dest + i) = byte;
  595         }
  596 
  597         return(err ? 1 : 0);
  598 }
  599 
  600 static int
  601 bge_miibus_readreg(dev, phy, reg)
  602         device_t dev;
  603         int phy, reg;
  604 {
  605         struct bge_softc *sc;
  606         u_int32_t val, autopoll;
  607         int i;
  608 
  609         sc = device_get_softc(dev);
  610 
  611         /*
  612          * Broadcom's own driver always assumes the internal
  613          * PHY is at GMII address 1. On some chips, the PHY responds
  614          * to accesses at all addresses, which could cause us to
  615          * bogusly attach the PHY 32 times at probe type. Always
  616          * restricting the lookup to address 1 is simpler than
  617          * trying to figure out which chips revisions should be
  618          * special-cased.
  619          */
  620         if (phy != 1)
  621                 return(0);
  622 
  623         /* Reading with autopolling on may trigger PCI errors */
  624         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  625         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  626                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  627                 DELAY(40);
  628         }
  629 
  630         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
  631             BGE_MIPHY(phy)|BGE_MIREG(reg));
  632 
  633         for (i = 0; i < BGE_TIMEOUT; i++) {
  634                 val = CSR_READ_4(sc, BGE_MI_COMM);
  635                 if (!(val & BGE_MICOMM_BUSY))
  636                         break;
  637         }
  638 
  639         if (i == BGE_TIMEOUT) {
  640                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  641                 val = 0;
  642                 goto done;
  643         }
  644 
  645         val = CSR_READ_4(sc, BGE_MI_COMM);
  646 
  647 done:
  648         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  649                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  650                 DELAY(40);
  651         }
  652 
  653         if (val & BGE_MICOMM_READFAIL)
  654                 return(0);
  655 
  656         return(val & 0xFFFF);
  657 }
  658 
  659 static int
  660 bge_miibus_writereg(dev, phy, reg, val)
  661         device_t dev;
  662         int phy, reg, val;
  663 {
  664         struct bge_softc *sc;
  665         u_int32_t autopoll;
  666         int i;
  667 
  668         sc = device_get_softc(dev);
  669 
  670         /* Reading with autopolling on may trigger PCI errors */
  671         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  672         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  673                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  674                 DELAY(40);
  675         }
  676 
  677         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
  678             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
  679 
  680         for (i = 0; i < BGE_TIMEOUT; i++) {
  681                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
  682                         break;
  683         }
  684 
  685         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  686                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  687                 DELAY(40);
  688         }
  689 
  690         if (i == BGE_TIMEOUT) {
  691                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  692                 return(0);
  693         }
  694 
  695         return(0);
  696 }
  697 
  698 static void
  699 bge_miibus_statchg(dev)
  700         device_t dev;
  701 {
  702         struct bge_softc *sc;
  703         struct mii_data *mii;
  704 
  705         sc = device_get_softc(dev);
  706         mii = device_get_softc(sc->bge_miibus);
  707 
  708         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
  709         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
  710                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
  711         } else {
  712                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
  713         }
  714 
  715         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  716                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  717         } else {
  718                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  719         }
  720 
  721         return;
  722 }
  723 
  724 /*
  725  * Handle events that have triggered interrupts.
  726  */
  727 static void
  728 bge_handle_events(sc)
  729         struct bge_softc                *sc;
  730 {
  731 
  732         return;
  733 }
  734 
  735 /*
  736  * Memory management for jumbo frames.
  737  */
  738 
  739 static int
  740 bge_alloc_jumbo_mem(sc)
  741         struct bge_softc                *sc;
  742 {
  743         caddr_t                 ptr;
  744         register int            i, error;
  745         struct bge_jpool_entry   *entry;
  746 
  747         /* Create tag for jumbo buffer block */
  748 
  749         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
  750             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
  751             NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
  752             &sc->bge_cdata.bge_jumbo_tag);
  753 
  754         if (error) {
  755                 printf("bge%d: could not allocate jumbo dma tag\n",
  756                     sc->bge_unit);
  757                 return (ENOMEM);
  758         }
  759 
  760         /* Allocate DMA'able memory for jumbo buffer block */
  761 
  762         error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
  763             (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
  764             &sc->bge_cdata.bge_jumbo_map);
  765 
  766         if (error)
  767                 return (ENOMEM);
  768 
  769         SLIST_INIT(&sc->bge_jfree_listhead);
  770         SLIST_INIT(&sc->bge_jinuse_listhead);
  771 
  772         /*
  773          * Now divide it up into 9K pieces and save the addresses
  774          * in an array.
  775          */
  776         ptr = sc->bge_ldata.bge_jumbo_buf;
  777         for (i = 0; i < BGE_JSLOTS; i++) {
  778                 sc->bge_cdata.bge_jslots[i] = ptr;
  779                 ptr += BGE_JLEN;
  780                 entry = malloc(sizeof(struct bge_jpool_entry),
  781                     M_DEVBUF, M_NOWAIT);
  782                 if (entry == NULL) {
  783                         bge_free_jumbo_mem(sc);
  784                         sc->bge_ldata.bge_jumbo_buf = NULL;
  785                         printf("bge%d: no memory for jumbo "
  786                             "buffer queue!\n", sc->bge_unit);
  787                         return(ENOBUFS);
  788                 }
  789                 entry->slot = i;
  790                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
  791                     entry, jpool_entries);
  792         }
  793 
  794         return(0);
  795 }
  796 
  797 static void
  798 bge_free_jumbo_mem(sc)
  799         struct bge_softc *sc;
  800 {
  801         int i;
  802         struct bge_jpool_entry *entry;
  803 
  804         for (i = 0; i < BGE_JSLOTS; i++) {
  805                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  806                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  807                 free(entry, M_DEVBUF);
  808         }
  809 
  810         /* Destroy jumbo buffer block */
  811 
  812         if (sc->bge_ldata.bge_rx_jumbo_ring)
  813                 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
  814                     sc->bge_ldata.bge_jumbo_buf,
  815                     sc->bge_cdata.bge_jumbo_map);
  816 
  817         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
  818                 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
  819                     sc->bge_cdata.bge_jumbo_map);
  820 
  821         if (sc->bge_cdata.bge_jumbo_tag)
  822                 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
  823 
  824         return;
  825 }
  826 
  827 /*
  828  * Allocate a jumbo buffer.
  829  */
  830 static void *
  831 bge_jalloc(sc)
  832         struct bge_softc                *sc;
  833 {
  834         struct bge_jpool_entry   *entry;
  835 
  836         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  837 
  838         if (entry == NULL) {
  839                 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
  840                 return(NULL);
  841         }
  842 
  843         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  844         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
  845         return(sc->bge_cdata.bge_jslots[entry->slot]);
  846 }
  847 
  848 /*
  849  * Release a jumbo buffer.
  850  */
  851 static void
  852 bge_jfree(buf, args)
  853         void *buf;
  854         void *args;
  855 {
  856         struct bge_jpool_entry *entry;
  857         struct bge_softc *sc;
  858         int i;
  859 
  860         /* Extract the softc struct pointer. */
  861         sc = (struct bge_softc *)args;
  862 
  863         if (sc == NULL)
  864                 panic("bge_jfree: can't find softc pointer!");
  865 
  866         /* calculate the slot this buffer belongs to */
  867 
  868         i = ((vm_offset_t)buf
  869              - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
  870 
  871         if ((i < 0) || (i >= BGE_JSLOTS))
  872                 panic("bge_jfree: asked to free buffer that we don't manage!");
  873 
  874         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
  875         if (entry == NULL)
  876                 panic("bge_jfree: buffer not in use!");
  877         entry->slot = i;
  878         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
  879         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
  880 
  881         return;
  882 }
  883 
  884 
  885 /*
  886  * Intialize a standard receive ring descriptor.
  887  */
  888 static int
  889 bge_newbuf_std(sc, i, m)
  890         struct bge_softc        *sc;
  891         int                     i;
  892         struct mbuf             *m;
  893 {
  894         struct mbuf             *m_new = NULL;
  895         struct bge_rx_bd        *r;
  896         struct bge_dmamap_arg   ctx;
  897         int                     error;
  898 
  899         if (m == NULL) {
  900                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  901                 if (m_new == NULL) {
  902                         return(ENOBUFS);
  903                 }
  904 
  905                 MCLGET(m_new, M_DONTWAIT);
  906                 if (!(m_new->m_flags & M_EXT)) {
  907                         m_freem(m_new);
  908                         return(ENOBUFS);
  909                 }
  910                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  911         } else {
  912                 m_new = m;
  913                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  914                 m_new->m_data = m_new->m_ext.ext_buf;
  915         }
  916 
  917         if (!sc->bge_rx_alignment_bug)
  918                 m_adj(m_new, ETHER_ALIGN);
  919         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
  920         r = &sc->bge_ldata.bge_rx_std_ring[i];
  921         ctx.bge_maxsegs = 1;
  922         ctx.sc = sc;
  923         error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
  924             sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
  925             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  926         if (error || ctx.bge_maxsegs == 0) {
  927                 if (m == NULL)
  928                         m_freem(m_new);
  929                 return(ENOMEM);
  930         }
  931         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
  932         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
  933         r->bge_flags = htole16(BGE_RXBDFLAG_END);
  934         r->bge_len = htole16(m_new->m_len);
  935         r->bge_idx = htole16(i);
  936 
  937         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  938             sc->bge_cdata.bge_rx_std_dmamap[i],
  939             BUS_DMASYNC_PREREAD);
  940 
  941         return(0);
  942 }
  943 
  944 /*
  945  * Initialize a jumbo receive ring descriptor. This allocates
  946  * a jumbo buffer from the pool managed internally by the driver.
  947  */
  948 static int
  949 bge_newbuf_jumbo(sc, i, m)
  950         struct bge_softc *sc;
  951         int i;
  952         struct mbuf *m;
  953 {
  954         struct mbuf *m_new = NULL;
  955         struct bge_rx_bd *r;
  956         struct bge_dmamap_arg ctx;
  957         int error;
  958 
  959         if (m == NULL) {
  960                 caddr_t                 *buf = NULL;
  961 
  962                 /* Allocate the mbuf. */
  963                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  964                 if (m_new == NULL) {
  965                         return(ENOBUFS);
  966                 }
  967 
  968                 /* Allocate the jumbo buffer */
  969                 buf = bge_jalloc(sc);
  970                 if (buf == NULL) {
  971                         m_freem(m_new);
  972                         printf("bge%d: jumbo allocation failed "
  973                             "-- packet dropped!\n", sc->bge_unit);
  974                         return(ENOBUFS);
  975                 }
  976 
  977                 /* Attach the buffer to the mbuf. */
  978                 m_new->m_data = (void *) buf;
  979                 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
  980                 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
  981                     (struct bge_softc *)sc, 0, EXT_NET_DRV);
  982         } else {
  983                 m_new = m;
  984                 m_new->m_data = m_new->m_ext.ext_buf;
  985                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
  986         }
  987 
  988         if (!sc->bge_rx_alignment_bug)
  989                 m_adj(m_new, ETHER_ALIGN);
  990         /* Set up the descriptor. */
  991         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
  992         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
  993         ctx.bge_maxsegs = 1;
  994         ctx.sc = sc;
  995         error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
  996             sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
  997             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  998         if (error || ctx.bge_maxsegs == 0) {
  999                 if (m == NULL)
 1000                         m_freem(m_new);
 1001                 return(ENOMEM);
 1002         }
 1003         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
 1004         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
 1005         r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
 1006         r->bge_len = htole16(m_new->m_len);
 1007         r->bge_idx = htole16(i);
 1008 
 1009         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 1010             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
 1011             BUS_DMASYNC_PREREAD);
 1012 
 1013         return(0);
 1014 }
 1015 
 1016 /*
 1017  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
 1018  * that's 1MB or memory, which is a lot. For now, we fill only the first
 1019  * 256 ring entries and hope that our CPU is fast enough to keep up with
 1020  * the NIC.
 1021  */
 1022 static int
 1023 bge_init_rx_ring_std(sc)
 1024         struct bge_softc *sc;
 1025 {
 1026         int i;
 1027 
 1028         for (i = 0; i < BGE_SSLOTS; i++) {
 1029                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
 1030                         return(ENOBUFS);
 1031         };
 1032 
 1033         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1034             sc->bge_cdata.bge_rx_std_ring_map,
 1035             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1036 
 1037         sc->bge_std = i - 1;
 1038         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 1039 
 1040         return(0);
 1041 }
 1042 
 1043 static void
 1044 bge_free_rx_ring_std(sc)
 1045         struct bge_softc *sc;
 1046 {
 1047         int i;
 1048 
 1049         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1050                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
 1051                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
 1052                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
 1053                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1054                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1055                 }
 1056                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
 1057                     sizeof(struct bge_rx_bd));
 1058         }
 1059 
 1060         return;
 1061 }
 1062 
 1063 static int
 1064 bge_init_rx_ring_jumbo(sc)
 1065         struct bge_softc *sc;
 1066 {
 1067         int i;
 1068         struct bge_rcb *rcb;
 1069 
 1070         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1071                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
 1072                         return(ENOBUFS);
 1073         };
 1074 
 1075         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1076             sc->bge_cdata.bge_rx_jumbo_ring_map,
 1077             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1078 
 1079         sc->bge_jumbo = i - 1;
 1080 
 1081         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1082         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
 1083         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1084 
 1085         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 1086 
 1087         return(0);
 1088 }
 1089 
 1090 static void
 1091 bge_free_rx_ring_jumbo(sc)
 1092         struct bge_softc *sc;
 1093 {
 1094         int i;
 1095 
 1096         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1097                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
 1098                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
 1099                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
 1100                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 1101                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1102                 }
 1103                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
 1104                     sizeof(struct bge_rx_bd));
 1105         }
 1106 
 1107         return;
 1108 }
 1109 
 1110 static void
 1111 bge_free_tx_ring(sc)
 1112         struct bge_softc *sc;
 1113 {
 1114         int i;
 1115 
 1116         if (sc->bge_ldata.bge_tx_ring == NULL)
 1117                 return;
 1118 
 1119         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1120                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
 1121                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
 1122                         sc->bge_cdata.bge_tx_chain[i] = NULL;
 1123                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1124                             sc->bge_cdata.bge_tx_dmamap[i]);
 1125                 }
 1126                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
 1127                     sizeof(struct bge_tx_bd));
 1128         }
 1129 
 1130         return;
 1131 }
 1132 
 1133 static int
 1134 bge_init_tx_ring(sc)
 1135         struct bge_softc *sc;
 1136 {
 1137         sc->bge_txcnt = 0;
 1138         sc->bge_tx_saved_considx = 0;
 1139 
 1140         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1141         /* 5700 b2 errata */
 1142         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1143                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1144 
 1145         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1146         /* 5700 b2 errata */
 1147         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1148                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1149 
 1150         return(0);
 1151 }
 1152 
 1153 static void
 1154 bge_setmulti(sc)
 1155         struct bge_softc *sc;
 1156 {
 1157         struct ifnet *ifp;
 1158         struct ifmultiaddr *ifma;
 1159         u_int32_t hashes[4] = { 0, 0, 0, 0 };
 1160         int h, i;
 1161 
 1162         BGE_LOCK_ASSERT(sc);
 1163 
 1164         ifp = &sc->arpcom.ac_if;
 1165 
 1166         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
 1167                 for (i = 0; i < 4; i++)
 1168                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
 1169                 return;
 1170         }
 1171 
 1172         /* First, zot all the existing filters. */
 1173         for (i = 0; i < 4; i++)
 1174                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
 1175 
 1176         /* Now program new ones. */
 1177         IF_ADDR_LOCK(ifp);
 1178         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1179                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1180                         continue;
 1181                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1182                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
 1183                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
 1184         }
 1185         IF_ADDR_UNLOCK(ifp);
 1186 
 1187         for (i = 0; i < 4; i++)
 1188                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
 1189 
 1190         return;
 1191 }
 1192 
 1193 /*
 1194  * Do endian, PCI and DMA initialization. Also check the on-board ROM
 1195  * self-test results.
 1196  */
 1197 static int
 1198 bge_chipinit(sc)
 1199         struct bge_softc *sc;
 1200 {
 1201         int                     i;
 1202         u_int32_t               dma_rw_ctl;
 1203 
 1204         /* Set endianness before we access any non-PCI registers. */
 1205 #if BYTE_ORDER == BIG_ENDIAN
 1206         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1207             BGE_BIGENDIAN_INIT, 4);
 1208 #else
 1209         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1210             BGE_LITTLEENDIAN_INIT, 4);
 1211 #endif
 1212 
 1213         /*
 1214          * Check the 'ROM failed' bit on the RX CPU to see if
 1215          * self-tests passed.
 1216          */
 1217         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
 1218                 printf("bge%d: RX CPU self-diagnostics failed!\n",
 1219                     sc->bge_unit);
 1220                 return(ENODEV);
 1221         }
 1222 
 1223         /* Clear the MAC control register */
 1224         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 1225 
 1226         /*
 1227          * Clear the MAC statistics block in the NIC's
 1228          * internal memory.
 1229          */
 1230         for (i = BGE_STATS_BLOCK;
 1231             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1232                 BGE_MEMWIN_WRITE(sc, i, 0);
 1233 
 1234         for (i = BGE_STATUS_BLOCK;
 1235             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1236                 BGE_MEMWIN_WRITE(sc, i, 0);
 1237 
 1238         /* Set up the PCI DMA control register. */
 1239         if (sc->bge_pcie) {
 1240                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1241                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1242                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1243         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
 1244             BGE_PCISTATE_PCI_BUSMODE) {
 1245                 /* Conventional PCI bus */
 1246                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1247                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1248                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1249                     (0x0F);
 1250         } else {
 1251                 /* PCI-X bus */
 1252                 /*
 1253                  * The 5704 uses a different encoding of read/write
 1254                  * watermarks.
 1255                  */
 1256                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1257                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1258                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1259                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1260                 else
 1261                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1262                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1263                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1264                             (0x0F);
 1265 
 1266                 /*
 1267                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
 1268                  * for hardware bugs.
 1269                  */
 1270                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1271                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 1272                         u_int32_t tmp;
 1273 
 1274                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
 1275                         if (tmp == 0x6 || tmp == 0x7)
 1276                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
 1277                 }
 1278         }
 1279 
 1280         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1281             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
 1282             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1283             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1284                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
 1285         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
 1286 
 1287         /*
 1288          * Set up general mode register.
 1289          */
 1290         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
 1291             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
 1292             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
 1293             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
 1294 
 1295         /*
 1296          * Disable memory write invalidate.  Apparently it is not supported
 1297          * properly by these devices.
 1298          */
 1299         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
 1300 
 1301 #ifdef __brokenalpha__
 1302         /*
 1303          * Must insure that we do not cross an 8K (bytes) boundary
 1304          * for DMA reads.  Our highest limit is 1K bytes.  This is a
 1305          * restriction on some ALPHA platforms with early revision
 1306          * 21174 PCI chipsets, such as the AlphaPC 164lx
 1307          */
 1308         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
 1309             BGE_PCI_READ_BNDRY_1024BYTES, 4);
 1310 #endif
 1311 
 1312         /* Set the timer prescaler (always 66Mhz) */
 1313         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
 1314 
 1315         return(0);
 1316 }
 1317 
 1318 static int
 1319 bge_blockinit(sc)
 1320         struct bge_softc *sc;
 1321 {
 1322         struct bge_rcb *rcb;
 1323         volatile struct bge_rcb *vrcb;
 1324         int i;
 1325 
 1326         /*
 1327          * Initialize the memory window pointer register so that
 1328          * we can access the first 32K of internal NIC RAM. This will
 1329          * allow us to set up the TX send ring RCBs and the RX return
 1330          * ring RCBs, plus other things which live in NIC memory.
 1331          */
 1332         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
 1333 
 1334         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
 1335 
 1336         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1337             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1338                 /* Configure mbuf memory pool */
 1339                 if (sc->bge_extram) {
 1340                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1341                             BGE_EXT_SSRAM);
 1342                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1343                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1344                         else
 1345                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1346                 } else {
 1347                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1348                             BGE_BUFFPOOL_1);
 1349                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1350                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1351                         else
 1352                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1353                 }
 1354 
 1355                 /* Configure DMA resource pool */
 1356                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
 1357                     BGE_DMA_DESCRIPTORS);
 1358                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
 1359         }
 1360 
 1361         /* Configure mbuf pool watermarks */
 1362         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1363             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 1364                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
 1365                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
 1366         } else {
 1367                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
 1368                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
 1369         }
 1370         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
 1371 
 1372         /* Configure DMA resource watermarks */
 1373         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
 1374         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
 1375 
 1376         /* Enable buffer manager */
 1377         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1378             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1379                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
 1380                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
 1381 
 1382                 /* Poll for buffer manager start indication */
 1383                 for (i = 0; i < BGE_TIMEOUT; i++) {
 1384                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
 1385                                 break;
 1386                         DELAY(10);
 1387                 }
 1388 
 1389                 if (i == BGE_TIMEOUT) {
 1390                         printf("bge%d: buffer manager failed to start\n",
 1391                             sc->bge_unit);
 1392                         return(ENXIO);
 1393                 }
 1394         }
 1395 
 1396         /* Enable flow-through queues */
 1397         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 1398         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 1399 
 1400         /* Wait until queue initialization is complete */
 1401         for (i = 0; i < BGE_TIMEOUT; i++) {
 1402                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
 1403                         break;
 1404                 DELAY(10);
 1405         }
 1406 
 1407         if (i == BGE_TIMEOUT) {
 1408                 printf("bge%d: flow-through queue init failed\n",
 1409                     sc->bge_unit);
 1410                 return(ENXIO);
 1411         }
 1412 
 1413         /* Initialize the standard RX ring control block */
 1414         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
 1415         rcb->bge_hostaddr.bge_addr_lo =
 1416             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
 1417         rcb->bge_hostaddr.bge_addr_hi =
 1418             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
 1419         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1420             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
 1421         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1422             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1423                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
 1424         else
 1425                 rcb->bge_maxlen_flags =
 1426                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
 1427         if (sc->bge_extram)
 1428                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
 1429         else
 1430                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
 1431         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
 1432         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
 1433 
 1434         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1435         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
 1436 
 1437         /*
 1438          * Initialize the jumbo RX ring control block
 1439          * We set the 'ring disabled' bit in the flags
 1440          * field until we're actually ready to start
 1441          * using this ring (i.e. once we set the MTU
 1442          * high enough to require it).
 1443          */
 1444         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1445             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1446                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1447 
 1448                 rcb->bge_hostaddr.bge_addr_lo =
 1449                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1450                 rcb->bge_hostaddr.bge_addr_hi =
 1451                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1452                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1453                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 1454                     BUS_DMASYNC_PREREAD);
 1455                 rcb->bge_maxlen_flags =
 1456                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
 1457                     BGE_RCB_FLAG_RING_DISABLED);
 1458                 if (sc->bge_extram)
 1459                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
 1460                 else
 1461                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
 1462                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
 1463                     rcb->bge_hostaddr.bge_addr_hi);
 1464                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
 1465                     rcb->bge_hostaddr.bge_addr_lo);
 1466 
 1467                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
 1468                     rcb->bge_maxlen_flags);
 1469                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
 1470 
 1471                 /* Set up dummy disabled mini ring RCB */
 1472                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
 1473                 rcb->bge_maxlen_flags =
 1474                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1475                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
 1476                     rcb->bge_maxlen_flags);
 1477         }
 1478 
 1479         /*
 1480          * Set the BD ring replentish thresholds. The recommended
 1481          * values are 1/8th the number of descriptors allocated to
 1482          * each ring.
 1483          */
 1484         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
 1485         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
 1486 
 1487         /*
 1488          * Disable all unused send rings by setting the 'ring disabled'
 1489          * bit in the flags field of all the TX send ring control blocks.
 1490          * These are located in NIC memory.
 1491          */
 1492         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1493             BGE_SEND_RING_RCB);
 1494         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
 1495                 vrcb->bge_maxlen_flags =
 1496                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1497                 vrcb->bge_nicaddr = 0;
 1498                 vrcb++;
 1499         }
 1500 
 1501         /* Configure TX RCB 0 (we use only the first ring) */
 1502         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1503             BGE_SEND_RING_RCB);
 1504         vrcb->bge_hostaddr.bge_addr_lo =
 1505             htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
 1506         vrcb->bge_hostaddr.bge_addr_hi =
 1507             htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
 1508         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
 1509         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1510             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1511                 vrcb->bge_maxlen_flags =
 1512                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
 1513 
 1514         /* Disable all unused RX return rings */
 1515         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1516             BGE_RX_RETURN_RING_RCB);
 1517         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
 1518                 vrcb->bge_hostaddr.bge_addr_hi = 0;
 1519                 vrcb->bge_hostaddr.bge_addr_lo = 0;
 1520                 vrcb->bge_maxlen_flags =
 1521                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
 1522                     BGE_RCB_FLAG_RING_DISABLED);
 1523                 vrcb->bge_nicaddr = 0;
 1524                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
 1525                     (i * (sizeof(u_int64_t))), 0);
 1526                 vrcb++;
 1527         }
 1528 
 1529         /* Initialize RX ring indexes */
 1530         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
 1531         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
 1532         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
 1533 
 1534         /*
 1535          * Set up RX return ring 0
 1536          * Note that the NIC address for RX return rings is 0x00000000.
 1537          * The return rings live entirely within the host, so the
 1538          * nicaddr field in the RCB isn't used.
 1539          */
 1540         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1541             BGE_RX_RETURN_RING_RCB);
 1542         vrcb->bge_hostaddr.bge_addr_lo =
 1543             BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
 1544         vrcb->bge_hostaddr.bge_addr_hi =
 1545             BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
 1546         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 1547             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 1548         vrcb->bge_nicaddr = 0x00000000;
 1549         vrcb->bge_maxlen_flags =
 1550             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
 1551 
 1552         /* Set random backoff seed for TX */
 1553         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
 1554             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
 1555             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
 1556             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
 1557             BGE_TX_BACKOFF_SEED_MASK);
 1558 
 1559         /* Set inter-packet gap */
 1560         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
 1561 
 1562         /*
 1563          * Specify which ring to use for packets that don't match
 1564          * any RX rules.
 1565          */
 1566         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
 1567 
 1568         /*
 1569          * Configure number of RX lists. One interrupt distribution
 1570          * list, sixteen active lists, one bad frames class.
 1571          */
 1572         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
 1573 
 1574         /* Inialize RX list placement stats mask. */
 1575         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
 1576         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
 1577 
 1578         /* Disable host coalescing until we get it set up */
 1579         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
 1580 
 1581         /* Poll to make sure it's shut down. */
 1582         for (i = 0; i < BGE_TIMEOUT; i++) {
 1583                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
 1584                         break;
 1585                 DELAY(10);
 1586         }
 1587 
 1588         if (i == BGE_TIMEOUT) {
 1589                 printf("bge%d: host coalescing engine failed to idle\n",
 1590                     sc->bge_unit);
 1591                 return(ENXIO);
 1592         }
 1593 
 1594         /* Set up host coalescing defaults */
 1595         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
 1596         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
 1597         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
 1598         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
 1599         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1600             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1601                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
 1602                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
 1603         }
 1604         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
 1605         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
 1606 
 1607         /* Set up address of statistics block */
 1608         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1609             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1610                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
 1611                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
 1612                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
 1613                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
 1614                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
 1615                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
 1616                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
 1617         }
 1618 
 1619         /* Set up address of status block */
 1620         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
 1621             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
 1622         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
 1623             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
 1624         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 1625             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 1626         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
 1627         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
 1628 
 1629         /* Turn on host coalescing state machine */
 1630         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 1631 
 1632         /* Turn on RX BD completion state machine and enable attentions */
 1633         CSR_WRITE_4(sc, BGE_RBDC_MODE,
 1634             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
 1635 
 1636         /* Turn on RX list placement state machine */
 1637         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 1638 
 1639         /* Turn on RX list selector state machine. */
 1640         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1641             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1642                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 1643 
 1644         /* Turn on DMA, clear stats */
 1645         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
 1646             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
 1647             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
 1648             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
 1649             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
 1650 
 1651         /* Set misc. local control, enable interrupts on attentions */
 1652         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
 1653 
 1654 #ifdef notdef
 1655         /* Assert GPIO pins for PHY reset */
 1656         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
 1657             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
 1658         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
 1659             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
 1660 #endif
 1661 
 1662         /* Turn on DMA completion state machine */
 1663         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1664             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1665                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 1666 
 1667         /* Turn on write DMA state machine */
 1668         CSR_WRITE_4(sc, BGE_WDMA_MODE,
 1669             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
 1670 
 1671         /* Turn on read DMA state machine */
 1672         CSR_WRITE_4(sc, BGE_RDMA_MODE,
 1673             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
 1674 
 1675         /* Turn on RX data completion state machine */
 1676         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 1677 
 1678         /* Turn on RX BD initiator state machine */
 1679         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 1680 
 1681         /* Turn on RX data and RX BD initiator state machine */
 1682         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
 1683 
 1684         /* Turn on Mbuf cluster free state machine */
 1685         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1686             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1687                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 1688 
 1689         /* Turn on send BD completion state machine */
 1690         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 1691 
 1692         /* Turn on send data completion state machine */
 1693         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 1694 
 1695         /* Turn on send data initiator state machine */
 1696         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 1697 
 1698         /* Turn on send BD initiator state machine */
 1699         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 1700 
 1701         /* Turn on send BD selector state machine */
 1702         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 1703 
 1704         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
 1705         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
 1706             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
 1707 
 1708         /* ack/clear link change events */
 1709         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 1710             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 1711             BGE_MACSTAT_LINK_CHANGED);
 1712         CSR_WRITE_4(sc, BGE_MI_STS, 0);
 1713 
 1714         /* Enable PHY auto polling (for MII/GMII only) */
 1715         if (sc->bge_tbi) {
 1716                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
 1717         } else {
 1718                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
 1719                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
 1720                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 1721                             BGE_EVTENB_MI_INTERRUPT);
 1722         }
 1723 
 1724         /* Enable link state change attentions. */
 1725         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
 1726 
 1727         return(0);
 1728 }
 1729 
 1730 /*
 1731  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
 1732  * against our list and return its name if we find a match. Note
 1733  * that since the Broadcom controller contains VPD support, we
 1734  * can get the device name string from the controller itself instead
 1735  * of the compiled-in string. This is a little slow, but it guarantees
 1736  * we'll always announce the right product name.
 1737  */
 1738 static int
 1739 bge_probe(dev)
 1740         device_t dev;
 1741 {
 1742         struct bge_type *t;
 1743         struct bge_softc *sc;
 1744         char *descbuf;
 1745 
 1746         t = bge_devs;
 1747 
 1748         sc = device_get_softc(dev);
 1749         bzero(sc, sizeof(struct bge_softc));
 1750         sc->bge_unit = device_get_unit(dev);
 1751         sc->bge_dev = dev;
 1752 
 1753         while(t->bge_name != NULL) {
 1754                 if ((pci_get_vendor(dev) == t->bge_vid) &&
 1755                     (pci_get_device(dev) == t->bge_did)) {
 1756 #ifdef notdef
 1757                         bge_vpd_read(sc);
 1758                         device_set_desc(dev, sc->bge_vpd_prodname);
 1759 #endif
 1760                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
 1761                         if (descbuf == NULL)
 1762                                 return(ENOMEM);
 1763                         snprintf(descbuf, BGE_DEVDESC_MAX,
 1764                             "%s, ASIC rev. %#04x", t->bge_name,
 1765                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
 1766                         device_set_desc_copy(dev, descbuf);
 1767                         if (pci_get_subvendor(dev) == DELL_VENDORID)
 1768                                 sc->bge_no_3_led = 1;
 1769                         free(descbuf, M_TEMP);
 1770                         return(0);
 1771                 }
 1772                 t++;
 1773         }
 1774 
 1775         return(ENXIO);
 1776 }
 1777 
 1778 static void
 1779 bge_dma_free(sc)
 1780         struct bge_softc *sc;
 1781 {
 1782         int i;
 1783 
 1784 
 1785         /* Destroy DMA maps for RX buffers */
 1786 
 1787         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1788                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
 1789                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1790                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1791         }
 1792 
 1793         /* Destroy DMA maps for jumbo RX buffers */
 1794 
 1795         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1796                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
 1797                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
 1798                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1799         }
 1800 
 1801         /* Destroy DMA maps for TX buffers */
 1802 
 1803         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1804                 if (sc->bge_cdata.bge_tx_dmamap[i])
 1805                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1806                             sc->bge_cdata.bge_tx_dmamap[i]);
 1807         }
 1808 
 1809         if (sc->bge_cdata.bge_mtag)
 1810                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
 1811 
 1812 
 1813         /* Destroy standard RX ring */
 1814 
 1815         if (sc->bge_ldata.bge_rx_std_ring)
 1816                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
 1817                     sc->bge_ldata.bge_rx_std_ring,
 1818                     sc->bge_cdata.bge_rx_std_ring_map);
 1819 
 1820         if (sc->bge_cdata.bge_rx_std_ring_map) {
 1821                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
 1822                     sc->bge_cdata.bge_rx_std_ring_map);
 1823                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
 1824                     sc->bge_cdata.bge_rx_std_ring_map);
 1825         }
 1826 
 1827         if (sc->bge_cdata.bge_rx_std_ring_tag)
 1828                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
 1829 
 1830         /* Destroy jumbo RX ring */
 1831 
 1832         if (sc->bge_ldata.bge_rx_jumbo_ring)
 1833                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1834                     sc->bge_ldata.bge_rx_jumbo_ring,
 1835                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1836 
 1837         if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
 1838                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1839                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1840                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1841                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1842         }
 1843 
 1844         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
 1845                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
 1846 
 1847         /* Destroy RX return ring */
 1848 
 1849         if (sc->bge_ldata.bge_rx_return_ring)
 1850                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
 1851                     sc->bge_ldata.bge_rx_return_ring,
 1852                     sc->bge_cdata.bge_rx_return_ring_map);
 1853 
 1854         if (sc->bge_cdata.bge_rx_return_ring_map) {
 1855                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
 1856                     sc->bge_cdata.bge_rx_return_ring_map);
 1857                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
 1858                     sc->bge_cdata.bge_rx_return_ring_map);
 1859         }
 1860 
 1861         if (sc->bge_cdata.bge_rx_return_ring_tag)
 1862                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
 1863 
 1864         /* Destroy TX ring */
 1865 
 1866         if (sc->bge_ldata.bge_tx_ring)
 1867                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
 1868                     sc->bge_ldata.bge_tx_ring,
 1869                     sc->bge_cdata.bge_tx_ring_map);
 1870 
 1871         if (sc->bge_cdata.bge_tx_ring_map) {
 1872                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
 1873                     sc->bge_cdata.bge_tx_ring_map);
 1874                 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
 1875                     sc->bge_cdata.bge_tx_ring_map);
 1876         }
 1877 
 1878         if (sc->bge_cdata.bge_tx_ring_tag)
 1879                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
 1880 
 1881         /* Destroy status block */
 1882 
 1883         if (sc->bge_ldata.bge_status_block)
 1884                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
 1885                     sc->bge_ldata.bge_status_block,
 1886                     sc->bge_cdata.bge_status_map);
 1887 
 1888         if (sc->bge_cdata.bge_status_map) {
 1889                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
 1890                     sc->bge_cdata.bge_status_map);
 1891                 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
 1892                     sc->bge_cdata.bge_status_map);
 1893         }
 1894 
 1895         if (sc->bge_cdata.bge_status_tag)
 1896                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
 1897 
 1898         /* Destroy statistics block */
 1899 
 1900         if (sc->bge_ldata.bge_stats)
 1901                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
 1902                     sc->bge_ldata.bge_stats,
 1903                     sc->bge_cdata.bge_stats_map);
 1904 
 1905         if (sc->bge_cdata.bge_stats_map) {
 1906                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
 1907                     sc->bge_cdata.bge_stats_map);
 1908                 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
 1909                     sc->bge_cdata.bge_stats_map);
 1910         }
 1911 
 1912         if (sc->bge_cdata.bge_stats_tag)
 1913                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
 1914 
 1915         /* Destroy the parent tag */
 1916 
 1917         if (sc->bge_cdata.bge_parent_tag)
 1918                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
 1919 
 1920         return;
 1921 }
 1922 
 1923 static int
 1924 bge_dma_alloc(dev)
 1925         device_t dev;
 1926 {
 1927         struct bge_softc *sc;
 1928         int nseg, i, error;
 1929         struct bge_dmamap_arg ctx;
 1930 
 1931         sc = device_get_softc(dev);
 1932 
 1933         /*
 1934          * Allocate the parent bus DMA tag appropriate for PCI.
 1935          */
 1936 #define BGE_NSEG_NEW 32
 1937         error = bus_dma_tag_create(NULL,        /* parent */
 1938                         PAGE_SIZE, 0,           /* alignment, boundary */
 1939                         BUS_SPACE_MAXADDR,      /* lowaddr */
 1940                         BUS_SPACE_MAXADDR_32BIT,/* highaddr */
 1941                         NULL, NULL,             /* filter, filterarg */
 1942                         MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
 1943                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
 1944                         0,                      /* flags */
 1945                         NULL, NULL,             /* lockfunc, lockarg */
 1946                         &sc->bge_cdata.bge_parent_tag);
 1947 
 1948         /*
 1949          * Create tag for RX mbufs.
 1950          */
 1951         nseg = 32;
 1952         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
 1953             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1954             NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL,
 1955             &sc->bge_cdata.bge_mtag);
 1956 
 1957         if (error) {
 1958                 device_printf(dev, "could not allocate dma tag\n");
 1959                 return (ENOMEM);
 1960         }
 1961 
 1962         /* Create DMA maps for RX buffers */
 1963 
 1964         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1965                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1966                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
 1967                 if (error) {
 1968                         device_printf(dev, "can't create DMA map for RX\n");
 1969                         return(ENOMEM);
 1970                 }
 1971         }
 1972 
 1973         /* Create DMA maps for TX buffers */
 1974 
 1975         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1976                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1977                             &sc->bge_cdata.bge_tx_dmamap[i]);
 1978                 if (error) {
 1979                         device_printf(dev, "can't create DMA map for RX\n");
 1980                         return(ENOMEM);
 1981                 }
 1982         }
 1983 
 1984         /* Create tag for standard RX ring */
 1985 
 1986         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1987             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1988             NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
 1989             NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
 1990 
 1991         if (error) {
 1992                 device_printf(dev, "could not allocate dma tag\n");
 1993                 return (ENOMEM);
 1994         }
 1995 
 1996         /* Allocate DMA'able memory for standard RX ring */
 1997 
 1998         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
 1999             (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
 2000             &sc->bge_cdata.bge_rx_std_ring_map);
 2001         if (error)
 2002                 return (ENOMEM);
 2003 
 2004         bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
 2005 
 2006         /* Load the address of the standard RX ring */
 2007 
 2008         ctx.bge_maxsegs = 1;
 2009         ctx.sc = sc;
 2010 
 2011         error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
 2012             sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
 2013             BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2014 
 2015         if (error)
 2016                 return (ENOMEM);
 2017 
 2018         sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
 2019 
 2020         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2021             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2022 
 2023                 /*
 2024                  * Create tag for jumbo mbufs.
 2025                  * This is really a bit of a kludge. We allocate a special
 2026                  * jumbo buffer pool which (thanks to the way our DMA
 2027                  * memory allocation works) will consist of contiguous
 2028                  * pages. This means that even though a jumbo buffer might
 2029                  * be larger than a page size, we don't really need to
 2030                  * map it into more than one DMA segment. However, the
 2031                  * default mbuf tag will result in multi-segment mappings,
 2032                  * so we have to create a special jumbo mbuf tag that
 2033                  * lets us get away with mapping the jumbo buffers as
 2034                  * a single segment. I think eventually the driver should
 2035                  * be changed so that it uses ordinary mbufs and cluster
 2036                  * buffers, i.e. jumbo frames can span multiple DMA
 2037                  * descriptors. But that's a project for another day.
 2038                  */
 2039 
 2040                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2041                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2042                     NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
 2043                     &sc->bge_cdata.bge_mtag_jumbo);
 2044 
 2045                 if (error) {
 2046                         device_printf(dev, "could not allocate dma tag\n");
 2047                         return (ENOMEM);
 2048                 }
 2049 
 2050                 /* Create tag for jumbo RX ring */
 2051 
 2052                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2053                     PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2054                     NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
 2055                     NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
 2056 
 2057                 if (error) {
 2058                         device_printf(dev, "could not allocate dma tag\n");
 2059                         return (ENOMEM);
 2060                 }
 2061 
 2062                 /* Allocate DMA'able memory for jumbo RX ring */
 2063 
 2064                 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2065                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
 2066                     &sc->bge_cdata.bge_rx_jumbo_ring_map);
 2067                 if (error)
 2068                         return (ENOMEM);
 2069 
 2070                 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
 2071                     BGE_JUMBO_RX_RING_SZ);
 2072 
 2073                 /* Load the address of the jumbo RX ring */
 2074 
 2075                 ctx.bge_maxsegs = 1;
 2076                 ctx.sc = sc;
 2077 
 2078                 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2079                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2080                     sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
 2081                     bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2082 
 2083                 if (error)
 2084                         return (ENOMEM);
 2085 
 2086                 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
 2087 
 2088                 /* Create DMA maps for jumbo RX buffers */
 2089 
 2090                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 2091                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
 2092                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 2093                         if (error) {
 2094                                 device_printf(dev,
 2095                                     "can't create DMA map for RX\n");
 2096                                 return(ENOMEM);
 2097                         }
 2098                 }
 2099 
 2100         }
 2101 
 2102         /* Create tag for RX return ring */
 2103 
 2104         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2105             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2106             NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
 2107             NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
 2108 
 2109         if (error) {
 2110                 device_printf(dev, "could not allocate dma tag\n");
 2111                 return (ENOMEM);
 2112         }
 2113 
 2114         /* Allocate DMA'able memory for RX return ring */
 2115 
 2116         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
 2117             (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
 2118             &sc->bge_cdata.bge_rx_return_ring_map);
 2119         if (error)
 2120                 return (ENOMEM);
 2121 
 2122         bzero((char *)sc->bge_ldata.bge_rx_return_ring,
 2123             BGE_RX_RTN_RING_SZ(sc));
 2124 
 2125         /* Load the address of the RX return ring */
 2126 
 2127         ctx.bge_maxsegs = 1;
 2128         ctx.sc = sc;
 2129 
 2130         error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
 2131             sc->bge_cdata.bge_rx_return_ring_map,
 2132             sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
 2133             bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2134 
 2135         if (error)
 2136                 return (ENOMEM);
 2137 
 2138         sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
 2139 
 2140         /* Create tag for TX ring */
 2141 
 2142         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2143             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2144             NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
 2145             &sc->bge_cdata.bge_tx_ring_tag);
 2146 
 2147         if (error) {
 2148                 device_printf(dev, "could not allocate dma tag\n");
 2149                 return (ENOMEM);
 2150         }
 2151 
 2152         /* Allocate DMA'able memory for TX ring */
 2153 
 2154         error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
 2155             (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
 2156             &sc->bge_cdata.bge_tx_ring_map);
 2157         if (error)
 2158                 return (ENOMEM);
 2159 
 2160         bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
 2161 
 2162         /* Load the address of the TX ring */
 2163 
 2164         ctx.bge_maxsegs = 1;
 2165         ctx.sc = sc;
 2166 
 2167         error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
 2168             sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
 2169             BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2170 
 2171         if (error)
 2172                 return (ENOMEM);
 2173 
 2174         sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
 2175 
 2176         /* Create tag for status block */
 2177 
 2178         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2179             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2180             NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
 2181             NULL, NULL, &sc->bge_cdata.bge_status_tag);
 2182 
 2183         if (error) {
 2184                 device_printf(dev, "could not allocate dma tag\n");
 2185                 return (ENOMEM);
 2186         }
 2187 
 2188         /* Allocate DMA'able memory for status block */
 2189 
 2190         error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
 2191             (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
 2192             &sc->bge_cdata.bge_status_map);
 2193         if (error)
 2194                 return (ENOMEM);
 2195 
 2196         bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
 2197 
 2198         /* Load the address of the status block */
 2199 
 2200         ctx.sc = sc;
 2201         ctx.bge_maxsegs = 1;
 2202 
 2203         error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
 2204             sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
 2205             BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2206 
 2207         if (error)
 2208                 return (ENOMEM);
 2209 
 2210         sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
 2211 
 2212         /* Create tag for statistics block */
 2213 
 2214         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2215             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2216             NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
 2217             &sc->bge_cdata.bge_stats_tag);
 2218 
 2219         if (error) {
 2220                 device_printf(dev, "could not allocate dma tag\n");
 2221                 return (ENOMEM);
 2222         }
 2223 
 2224         /* Allocate DMA'able memory for statistics block */
 2225 
 2226         error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
 2227             (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
 2228             &sc->bge_cdata.bge_stats_map);
 2229         if (error)
 2230                 return (ENOMEM);
 2231 
 2232         bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
 2233 
 2234         /* Load the address of the statstics block */
 2235 
 2236         ctx.sc = sc;
 2237         ctx.bge_maxsegs = 1;
 2238 
 2239         error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
 2240             sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
 2241             BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2242 
 2243         if (error)
 2244                 return (ENOMEM);
 2245 
 2246         sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
 2247 
 2248         return(0);
 2249 }
 2250 
 2251 static int
 2252 bge_attach(dev)
 2253         device_t dev;
 2254 {
 2255         struct ifnet *ifp;
 2256         struct bge_softc *sc;
 2257         u_int32_t hwcfg = 0;
 2258         u_int32_t mac_addr = 0;
 2259         int unit, error = 0, rid;
 2260 
 2261         sc = device_get_softc(dev);
 2262         unit = device_get_unit(dev);
 2263         sc->bge_dev = dev;
 2264         sc->bge_unit = unit;
 2265 
 2266         /*
 2267          * Map control/status registers.
 2268          */
 2269         pci_enable_busmaster(dev);
 2270 
 2271         rid = BGE_PCI_BAR0;
 2272         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 2273             RF_ACTIVE|PCI_RF_DENSE);
 2274 
 2275         if (sc->bge_res == NULL) {
 2276                 printf ("bge%d: couldn't map memory\n", unit);
 2277                 error = ENXIO;
 2278                 goto fail;
 2279         }
 2280 
 2281         sc->bge_btag = rman_get_bustag(sc->bge_res);
 2282         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
 2283         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
 2284 
 2285         /* Allocate interrupt */
 2286         rid = 0;
 2287 
 2288         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 2289             RF_SHAREABLE | RF_ACTIVE);
 2290 
 2291         if (sc->bge_irq == NULL) {
 2292                 printf("bge%d: couldn't map interrupt\n", unit);
 2293                 error = ENXIO;
 2294                 goto fail;
 2295         }
 2296 
 2297         sc->bge_unit = unit;
 2298 
 2299         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
 2300 
 2301         /* Save ASIC rev. */
 2302 
 2303         sc->bge_chipid =
 2304             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
 2305             BGE_PCIMISCCTL_ASICREV;
 2306         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
 2307         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
 2308 
 2309         /*
 2310          * Treat the 5714 and the 5752 like the 5750 until we have more info
 2311          * on this chip.
 2312          */
 2313         if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 
 2314             sc->bge_asicrev == BGE_ASICREV_BCM5752)
 2315                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
 2316 
 2317         /*
 2318          * XXX: Broadcom Linux driver.  Not in specs or eratta.
 2319          * PCI-Express?
 2320          */
 2321         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 2322                 u_int32_t v;
 2323 
 2324                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
 2325                 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
 2326                         v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
 2327                         if ((v & 0xff) == BGE_PCIE_CAPID)
 2328                                 sc->bge_pcie = 1;
 2329                 }
 2330         }
 2331 
 2332         /* Try to reset the chip. */
 2333         bge_reset(sc);
 2334 
 2335         if (bge_chipinit(sc)) {
 2336                 printf("bge%d: chip initialization failed\n", sc->bge_unit);
 2337                 bge_release_resources(sc);
 2338                 error = ENXIO;
 2339                 goto fail;
 2340         }
 2341 
 2342         /*
 2343          * Get station address from the EEPROM.
 2344          */
 2345         mac_addr = bge_readmem_ind(sc, 0x0c14);
 2346         if ((mac_addr >> 16) == 0x484b) {
 2347                 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
 2348                 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
 2349                 mac_addr = bge_readmem_ind(sc, 0x0c18);
 2350                 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
 2351                 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
 2352                 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
 2353                 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
 2354         } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
 2355             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
 2356                 printf("bge%d: failed to read station address\n", unit);
 2357                 bge_release_resources(sc);
 2358                 error = ENXIO;
 2359                 goto fail;
 2360         }
 2361 
 2362         /* 5705 limits RX return ring to 512 entries. */
 2363         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 2364             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 2365                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
 2366         else
 2367                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
 2368 
 2369         if (bge_dma_alloc(dev)) {
 2370                 printf ("bge%d: failed to allocate DMA resources\n",
 2371                     sc->bge_unit);
 2372                 bge_release_resources(sc);
 2373                 error = ENXIO;
 2374                 goto fail;
 2375         }
 2376 
 2377         /*
 2378          * Try to allocate memory for jumbo buffers.
 2379          * The 5705 does not appear to support jumbo frames.
 2380          */
 2381         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2382             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2383                 if (bge_alloc_jumbo_mem(sc)) {
 2384                         printf("bge%d: jumbo buffer allocation "
 2385                             "failed\n", sc->bge_unit);
 2386                         bge_release_resources(sc);
 2387                         error = ENXIO;
 2388                         goto fail;
 2389                 }
 2390         }
 2391 
 2392         /* Set default tuneable values. */
 2393         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
 2394         sc->bge_rx_coal_ticks = 150;
 2395         sc->bge_tx_coal_ticks = 150;
 2396         sc->bge_rx_max_coal_bds = 64;
 2397         sc->bge_tx_max_coal_bds = 128;
 2398 
 2399         /* Set up ifnet structure */
 2400         ifp = &sc->arpcom.ac_if;
 2401         ifp->if_softc = sc;
 2402         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2403         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2404         ifp->if_ioctl = bge_ioctl;
 2405         ifp->if_start = bge_start;
 2406         ifp->if_watchdog = bge_watchdog;
 2407         ifp->if_init = bge_init;
 2408         ifp->if_mtu = ETHERMTU;
 2409         ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
 2410         ifp->if_hwassist = BGE_CSUM_FEATURES;
 2411         /* NB: the code for RX csum offload is disabled for now */
 2412         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
 2413             IFCAP_VLAN_MTU;
 2414         ifp->if_capenable = ifp->if_capabilities;
 2415 
 2416         /*
 2417          * Figure out what sort of media we have by checking the
 2418          * hardware config word in the first 32k of NIC internal memory,
 2419          * or fall back to examining the EEPROM if necessary.
 2420          * Note: on some BCM5700 cards, this value appears to be unset.
 2421          * If that's the case, we have to rely on identifying the NIC
 2422          * by its PCI subsystem ID, as we do below for the SysKonnect
 2423          * SK-9D41.
 2424          */
 2425         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
 2426                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
 2427         else {
 2428                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
 2429                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
 2430                 hwcfg = ntohl(hwcfg);
 2431         }
 2432 
 2433         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
 2434                 sc->bge_tbi = 1;
 2435 
 2436         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
 2437         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
 2438                 sc->bge_tbi = 1;
 2439 
 2440         if (sc->bge_tbi) {
 2441                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
 2442                     bge_ifmedia_upd, bge_ifmedia_sts);
 2443                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
 2444                 ifmedia_add(&sc->bge_ifmedia,
 2445                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
 2446                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
 2447                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
 2448                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
 2449         } else {
 2450                 /*
 2451                  * Do transceiver setup.
 2452                  */
 2453                 if (mii_phy_probe(dev, &sc->bge_miibus,
 2454                     bge_ifmedia_upd, bge_ifmedia_sts)) {
 2455                         printf("bge%d: MII without any PHY!\n", sc->bge_unit);
 2456                         bge_release_resources(sc);
 2457                         bge_free_jumbo_mem(sc);
 2458                         error = ENXIO;
 2459                         goto fail;
 2460                 }
 2461         }
 2462 
 2463         /*
 2464          * When using the BCM5701 in PCI-X mode, data corruption has
 2465          * been observed in the first few bytes of some received packets.
 2466          * Aligning the packet buffer in memory eliminates the corruption.
 2467          * Unfortunately, this misaligns the packet payloads.  On platforms
 2468          * which do not support unaligned accesses, we will realign the
 2469          * payloads by copying the received packets.
 2470          */
 2471         switch (sc->bge_chipid) {
 2472         case BGE_CHIPID_BCM5701_A0:
 2473         case BGE_CHIPID_BCM5701_B0:
 2474         case BGE_CHIPID_BCM5701_B2:
 2475         case BGE_CHIPID_BCM5701_B5:
 2476                 /* If in PCI-X mode, work around the alignment bug. */
 2477                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
 2478                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
 2479                     BGE_PCISTATE_PCI_BUSSPEED)
 2480                         sc->bge_rx_alignment_bug = 1;
 2481                 break;
 2482         }
 2483 
 2484         /*
 2485          * Call MI attach routine.
 2486          */
 2487         ether_ifattach(ifp, sc->arpcom.ac_enaddr);
 2488         callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
 2489 
 2490         /*
 2491          * Hookup IRQ last.
 2492          */
 2493         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
 2494            bge_intr, sc, &sc->bge_intrhand);
 2495 
 2496         if (error) {
 2497                 bge_release_resources(sc);
 2498                 printf("bge%d: couldn't set up irq\n", unit);
 2499         }
 2500 
 2501 fail:
 2502         return(error);
 2503 }
 2504 
 2505 static int
 2506 bge_detach(dev)
 2507         device_t dev;
 2508 {
 2509         struct bge_softc *sc;
 2510         struct ifnet *ifp;
 2511 
 2512         sc = device_get_softc(dev);
 2513         ifp = &sc->arpcom.ac_if;
 2514 
 2515         BGE_LOCK(sc);
 2516         bge_stop(sc);
 2517         bge_reset(sc);
 2518         BGE_UNLOCK(sc);
 2519 
 2520         ether_ifdetach(ifp);
 2521 
 2522         if (sc->bge_tbi) {
 2523                 ifmedia_removeall(&sc->bge_ifmedia);
 2524         } else {
 2525                 bus_generic_detach(dev);
 2526                 device_delete_child(dev, sc->bge_miibus);
 2527         }
 2528 
 2529         bge_release_resources(sc);
 2530         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2531             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 2532                 bge_free_jumbo_mem(sc);
 2533 
 2534         return(0);
 2535 }
 2536 
 2537 static void
 2538 bge_release_resources(sc)
 2539         struct bge_softc *sc;
 2540 {
 2541         device_t dev;
 2542 
 2543         dev = sc->bge_dev;
 2544 
 2545         if (sc->bge_vpd_prodname != NULL)
 2546                 free(sc->bge_vpd_prodname, M_DEVBUF);
 2547 
 2548         if (sc->bge_vpd_readonly != NULL)
 2549                 free(sc->bge_vpd_readonly, M_DEVBUF);
 2550 
 2551         if (sc->bge_intrhand != NULL)
 2552                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
 2553 
 2554         if (sc->bge_irq != NULL)
 2555                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
 2556 
 2557         if (sc->bge_res != NULL)
 2558                 bus_release_resource(dev, SYS_RES_MEMORY,
 2559                     BGE_PCI_BAR0, sc->bge_res);
 2560 
 2561         bge_dma_free(sc);
 2562 
 2563         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
 2564                 BGE_LOCK_DESTROY(sc);
 2565 
 2566         return;
 2567 }
 2568 
 2569 static void
 2570 bge_reset(sc)
 2571         struct bge_softc *sc;
 2572 {
 2573         device_t dev;
 2574         u_int32_t cachesize, command, pcistate, reset;
 2575         int i, val = 0;
 2576 
 2577         dev = sc->bge_dev;
 2578 
 2579         /* Save some important PCI state. */
 2580         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
 2581         command = pci_read_config(dev, BGE_PCI_CMD, 4);
 2582         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
 2583 
 2584         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2585             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2586             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2587 
 2588         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
 2589 
 2590         /* XXX: Broadcom Linux driver. */
 2591         if (sc->bge_pcie) {
 2592                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
 2593                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
 2594                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2595                         /* Prevent PCIE link training during global reset */
 2596                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
 2597                         reset |= (1<<29);
 2598                 }
 2599         }
 2600 
 2601         /* Issue global reset */
 2602         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
 2603 
 2604         DELAY(1000);
 2605 
 2606         /* XXX: Broadcom Linux driver. */
 2607         if (sc->bge_pcie) {
 2608                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
 2609                         uint32_t v;
 2610 
 2611                         DELAY(500000); /* wait for link training to complete */
 2612                         v = pci_read_config(dev, 0xc4, 4);
 2613                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
 2614                 }
 2615                 /* Set PCIE max payload size and clear error status. */
 2616                 pci_write_config(dev, 0xd8, 0xf5000, 4);
 2617         }
 2618 
 2619         /* Reset some of the PCI state that got zapped by reset */
 2620         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2621             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2622             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2623         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
 2624         pci_write_config(dev, BGE_PCI_CMD, command, 4);
 2625         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
 2626 
 2627         /* Enable memory arbiter. */
 2628         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2629             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 2630                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 2631 
 2632         /*
 2633          * Prevent PXE restart: write a magic number to the
 2634          * general communications memory at 0xB50.
 2635          */
 2636         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
 2637         /*
 2638          * Poll the value location we just wrote until
 2639          * we see the 1's complement of the magic number.
 2640          * This indicates that the firmware initialization
 2641          * is complete.
 2642          */
 2643         for (i = 0; i < BGE_TIMEOUT; i++) {
 2644                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
 2645                 if (val == ~BGE_MAGIC_NUMBER)
 2646                         break;
 2647                 DELAY(10);
 2648         }
 2649 
 2650         if (i == BGE_TIMEOUT) {
 2651                 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
 2652                 return;
 2653         }
 2654 
 2655         /*
 2656          * XXX Wait for the value of the PCISTATE register to
 2657          * return to its original pre-reset state. This is a
 2658          * fairly good indicator of reset completion. If we don't
 2659          * wait for the reset to fully complete, trying to read
 2660          * from the device's non-PCI registers may yield garbage
 2661          * results.
 2662          */
 2663         for (i = 0; i < BGE_TIMEOUT; i++) {
 2664                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
 2665                         break;
 2666                 DELAY(10);
 2667         }
 2668 
 2669         /* Fix up byte swapping */
 2670         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
 2671             BGE_MODECTL_BYTESWAP_DATA);
 2672 
 2673         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 2674 
 2675         /*
 2676          * The 5704 in TBI mode apparently needs some special
 2677          * adjustment to insure the SERDES drive level is set
 2678          * to 1.2V.
 2679          */
 2680         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
 2681                 uint32_t serdescfg;
 2682                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
 2683                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
 2684                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
 2685         }
 2686 
 2687         /* XXX: Broadcom Linux driver. */
 2688         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2689                 uint32_t v;
 2690 
 2691                 v = CSR_READ_4(sc, 0x7c00);
 2692                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
 2693         }
 2694         DELAY(10000);
 2695 
 2696         return;
 2697 }
 2698 
 2699 /*
 2700  * Frame reception handling. This is called if there's a frame
 2701  * on the receive return list.
 2702  *
 2703  * Note: we have to be able to handle two possibilities here:
 2704  * 1) the frame is from the jumbo recieve ring
 2705  * 2) the frame is from the standard receive ring
 2706  */
 2707 
 2708 static void
 2709 bge_rxeof(sc)
 2710         struct bge_softc *sc;
 2711 {
 2712         struct ifnet *ifp;
 2713         int stdcnt = 0, jumbocnt = 0;
 2714 
 2715         BGE_LOCK_ASSERT(sc);
 2716 
 2717         ifp = &sc->arpcom.ac_if;
 2718 
 2719         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2720             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
 2721         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2722             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
 2723         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2724             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2725                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2726                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2727                     BUS_DMASYNC_POSTREAD);
 2728         }
 2729 
 2730         while(sc->bge_rx_saved_considx !=
 2731             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
 2732                 struct bge_rx_bd        *cur_rx;
 2733                 u_int32_t               rxidx;
 2734                 struct ether_header     *eh;
 2735                 struct mbuf             *m = NULL;
 2736                 u_int16_t               vlan_tag = 0;
 2737                 int                     have_tag = 0;
 2738 
 2739                 cur_rx =
 2740             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
 2741 
 2742                 rxidx = cur_rx->bge_idx;
 2743                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
 2744 
 2745                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
 2746                         have_tag = 1;
 2747                         vlan_tag = cur_rx->bge_vlan_tag;
 2748                 }
 2749 
 2750                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
 2751                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
 2752                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
 2753                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
 2754                             BUS_DMASYNC_POSTREAD);
 2755                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 2756                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
 2757                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
 2758                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
 2759                         jumbocnt++;
 2760                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2761                                 ifp->if_ierrors++;
 2762                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2763                                 continue;
 2764                         }
 2765                         if (bge_newbuf_jumbo(sc,
 2766                             sc->bge_jumbo, NULL) == ENOBUFS) {
 2767                                 ifp->if_ierrors++;
 2768                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2769                                 continue;
 2770                         }
 2771                 } else {
 2772                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
 2773                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 2774                             sc->bge_cdata.bge_rx_std_dmamap[rxidx],
 2775                             BUS_DMASYNC_POSTREAD);
 2776                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2777                             sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
 2778                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
 2779                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
 2780                         stdcnt++;
 2781                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2782                                 ifp->if_ierrors++;
 2783                                 bge_newbuf_std(sc, sc->bge_std, m);
 2784                                 continue;
 2785                         }
 2786                         if (bge_newbuf_std(sc, sc->bge_std,
 2787                             NULL) == ENOBUFS) {
 2788                                 ifp->if_ierrors++;
 2789                                 bge_newbuf_std(sc, sc->bge_std, m);
 2790                                 continue;
 2791                         }
 2792                 }
 2793 
 2794                 ifp->if_ipackets++;
 2795 #ifndef __i386__
 2796                 /*
 2797                  * The i386 allows unaligned accesses, but for other
 2798                  * platforms we must make sure the payload is aligned.
 2799                  */
 2800                 if (sc->bge_rx_alignment_bug) {
 2801                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
 2802                             cur_rx->bge_len);
 2803                         m->m_data += ETHER_ALIGN;
 2804                 }
 2805 #endif
 2806                 eh = mtod(m, struct ether_header *);
 2807                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
 2808                 m->m_pkthdr.rcvif = ifp;
 2809 
 2810 #if 0 /* currently broken for some packets, possibly related to TCP options */
 2811                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2812                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2813                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
 2814                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2815                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
 2816                                 m->m_pkthdr.csum_data =
 2817                                     cur_rx->bge_tcp_udp_csum;
 2818                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 2819                         }
 2820                 }
 2821 #endif
 2822 
 2823                 /*
 2824                  * If we received a packet with a vlan tag,
 2825                  * attach that information to the packet.
 2826                  */
 2827                 if (have_tag)
 2828                         VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
 2829 
 2830                 BGE_UNLOCK(sc);
 2831                 (*ifp->if_input)(ifp, m);
 2832                 BGE_LOCK(sc);
 2833         }
 2834 
 2835         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2836             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 2837         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2838             sc->bge_cdata.bge_rx_std_ring_map,
 2839             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
 2840         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2841             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2842                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2843                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2844                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 2845         }
 2846 
 2847         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
 2848         if (stdcnt)
 2849                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 2850         if (jumbocnt)
 2851                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 2852 
 2853         return;
 2854 }
 2855 
 2856 static void
 2857 bge_txeof(sc)
 2858         struct bge_softc *sc;
 2859 {
 2860         struct bge_tx_bd *cur_tx = NULL;
 2861         struct ifnet *ifp;
 2862 
 2863         BGE_LOCK_ASSERT(sc);
 2864 
 2865         ifp = &sc->arpcom.ac_if;
 2866 
 2867         /*
 2868          * Go through our tx ring and free mbufs for those
 2869          * frames that have been sent.
 2870          */
 2871         while (sc->bge_tx_saved_considx !=
 2872             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
 2873                 u_int32_t               idx = 0;
 2874 
 2875                 idx = sc->bge_tx_saved_considx;
 2876                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
 2877                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
 2878                         ifp->if_opackets++;
 2879                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
 2880                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
 2881                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
 2882                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2883                             sc->bge_cdata.bge_tx_dmamap[idx]);
 2884                 }
 2885                 sc->bge_txcnt--;
 2886                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
 2887                 ifp->if_timer = 0;
 2888         }
 2889 
 2890         if (cur_tx != NULL)
 2891                 ifp->if_flags &= ~IFF_OACTIVE;
 2892 
 2893         return;
 2894 }
 2895 
 2896 static void
 2897 bge_intr(xsc)
 2898         void *xsc;
 2899 {
 2900         struct bge_softc *sc;
 2901         struct ifnet *ifp;
 2902         u_int32_t statusword;
 2903         u_int32_t status, mimode;
 2904 
 2905         sc = xsc;
 2906         ifp = &sc->arpcom.ac_if;
 2907 
 2908         BGE_LOCK(sc);
 2909 
 2910         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2911             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
 2912 
 2913         statusword =
 2914             atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
 2915 
 2916 #ifdef notdef
 2917         /* Avoid this for now -- checking this register is expensive. */
 2918         /* Make sure this is really our interrupt. */
 2919         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
 2920                 return;
 2921 #endif
 2922         /* Ack interrupt and stop others from occuring. */
 2923         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 2924 
 2925         /*
 2926          * Process link state changes.
 2927          * Grrr. The link status word in the status block does
 2928          * not work correctly on the BCM5700 rev AX and BX chips,
 2929          * according to all available information. Hence, we have
 2930          * to enable MII interrupts in order to properly obtain
 2931          * async link changes. Unfortunately, this also means that
 2932          * we have to read the MAC status register to detect link
 2933          * changes, thereby adding an additional register access to
 2934          * the interrupt handler.
 2935          */
 2936 
 2937         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
 2938 
 2939                 status = CSR_READ_4(sc, BGE_MAC_STS);
 2940                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
 2941                         sc->bge_link = 0;
 2942                         callout_stop(&sc->bge_stat_ch);
 2943                         bge_tick_locked(sc);
 2944                         /* Clear the interrupt */
 2945                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 2946                             BGE_EVTENB_MI_INTERRUPT);
 2947                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
 2948                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
 2949                             BRGPHY_INTRS);
 2950                 }
 2951         } else {
 2952                 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
 2953                         /*
 2954                          * Sometimes PCS encoding errors are detected in
 2955                          * TBI mode (on fiber NICs), and for some reason
 2956                          * the chip will signal them as link changes.
 2957                          * If we get a link change event, but the 'PCS
 2958                          * encoding error' bit in the MAC status register
 2959                          * is set, don't bother doing a link check.
 2960                          * This avoids spurious "gigabit link up" messages
 2961                          * that sometimes appear on fiber NICs during
 2962                          * periods of heavy traffic. (There should be no
 2963                          * effect on copper NICs.)
 2964                          *
 2965                          * If we do have a copper NIC (bge_tbi == 0) then
 2966                          * check that the AUTOPOLL bit is set before
 2967                          * processing the event as a real link change.
 2968                          * Turning AUTOPOLL on and off in the MII read/write
 2969                          * functions will often trigger a link status
 2970                          * interrupt for no reason.
 2971                          */
 2972                         status = CSR_READ_4(sc, BGE_MAC_STS);
 2973                         mimode = CSR_READ_4(sc, BGE_MI_MODE);
 2974                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
 2975                             BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
 2976                             (mimode & BGE_MIMODE_AUTOPOLL))) {
 2977                                 sc->bge_link = 0;
 2978                                 callout_stop(&sc->bge_stat_ch);
 2979                                 bge_tick_locked(sc);
 2980                         }
 2981                         /* Clear the interrupt */
 2982                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 2983                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 2984                             BGE_MACSTAT_LINK_CHANGED);
 2985 
 2986                         /* Force flush the status block cached by PCI bridge */
 2987                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
 2988                 }
 2989         }
 2990 
 2991         if (ifp->if_flags & IFF_RUNNING) {
 2992                 /* Check RX return ring producer/consumer */
 2993                 bge_rxeof(sc);
 2994 
 2995                 /* Check TX ring producer/consumer */
 2996                 bge_txeof(sc);
 2997         }
 2998 
 2999         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 3000             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 3001 
 3002         bge_handle_events(sc);
 3003 
 3004         /* Re-enable interrupts. */
 3005         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3006 
 3007         if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
 3008                 bge_start_locked(ifp);
 3009 
 3010         BGE_UNLOCK(sc);
 3011 
 3012         return;
 3013 }
 3014 
 3015 static void
 3016 bge_tick_locked(sc)
 3017         struct bge_softc *sc;
 3018 {
 3019         struct mii_data *mii = NULL;
 3020         struct ifmedia *ifm = NULL;
 3021         struct ifnet *ifp;
 3022 
 3023         ifp = &sc->arpcom.ac_if;
 3024 
 3025         BGE_LOCK_ASSERT(sc);
 3026 
 3027         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 3028             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 3029                 bge_stats_update_regs(sc);
 3030         else
 3031                 bge_stats_update(sc);
 3032         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3033         if (sc->bge_link)
 3034                 return;
 3035 
 3036         if (sc->bge_tbi) {
 3037                 ifm = &sc->bge_ifmedia;
 3038                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3039                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
 3040                         sc->bge_link++;
 3041                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 3042                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3043                                     BGE_MACMODE_TBI_SEND_CFGS);
 3044                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
 3045                         if (bootverbose)
 3046                                 printf("bge%d: gigabit link up\n",
 3047                                     sc->bge_unit);
 3048                         if (ifp->if_snd.ifq_head != NULL)
 3049                                 bge_start_locked(ifp);
 3050                 }
 3051                 return;
 3052         }
 3053 
 3054         mii = device_get_softc(sc->bge_miibus);
 3055         mii_tick(mii);
 3056 
 3057         if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
 3058             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 3059                 sc->bge_link++;
 3060                 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
 3061                     IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
 3062                     bootverbose)
 3063                         printf("bge%d: gigabit link up\n", sc->bge_unit);
 3064                 if (ifp->if_snd.ifq_head != NULL)
 3065                         bge_start_locked(ifp);
 3066         }
 3067 
 3068         return;
 3069 }
 3070 
 3071 static void
 3072 bge_tick(xsc)
 3073         void *xsc;
 3074 {
 3075         struct bge_softc *sc;
 3076 
 3077         sc = xsc;
 3078 
 3079         BGE_LOCK(sc);
 3080         bge_tick_locked(sc);
 3081         BGE_UNLOCK(sc);
 3082 }
 3083 
 3084 static void
 3085 bge_stats_update_regs(sc)
 3086         struct bge_softc *sc;
 3087 {
 3088         struct ifnet *ifp;
 3089         struct bge_mac_stats_regs stats;
 3090         u_int32_t *s;
 3091         int i;
 3092 
 3093         ifp = &sc->arpcom.ac_if;
 3094 
 3095         s = (u_int32_t *)&stats;
 3096         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
 3097                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
 3098                 s++;
 3099         }
 3100 
 3101         ifp->if_collisions +=
 3102            (stats.dot3StatsSingleCollisionFrames +
 3103            stats.dot3StatsMultipleCollisionFrames +
 3104            stats.dot3StatsExcessiveCollisions +
 3105            stats.dot3StatsLateCollisions) -
 3106            ifp->if_collisions;
 3107 
 3108         return;
 3109 }
 3110 
 3111 static void
 3112 bge_stats_update(sc)
 3113         struct bge_softc *sc;
 3114 {
 3115         struct ifnet *ifp;
 3116         struct bge_stats *stats;
 3117 
 3118         ifp = &sc->arpcom.ac_if;
 3119 
 3120         stats = (struct bge_stats *)(sc->bge_vhandle +
 3121             BGE_MEMWIN_START + BGE_STATS_BLOCK);
 3122 
 3123         ifp->if_collisions +=
 3124            (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
 3125            stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
 3126            stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
 3127            stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
 3128            ifp->if_collisions;
 3129 
 3130 #ifdef notdef
 3131         ifp->if_collisions +=
 3132            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
 3133            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
 3134            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
 3135            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
 3136            ifp->if_collisions;
 3137 #endif
 3138 
 3139         return;
 3140 }
 3141 
 3142 /*
 3143  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
 3144  * pointers to descriptors.
 3145  */
 3146 static int
 3147 bge_encap(sc, m_head, txidx)
 3148         struct bge_softc *sc;
 3149         struct mbuf *m_head;
 3150         u_int32_t *txidx;
 3151 {
 3152         struct bge_tx_bd        *f = NULL;
 3153         u_int16_t               csum_flags = 0;
 3154         struct m_tag            *mtag;
 3155         struct bge_dmamap_arg   ctx;
 3156         bus_dmamap_t            map;
 3157         int                     error;
 3158 
 3159 
 3160         if (m_head->m_pkthdr.csum_flags) {
 3161                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 3162                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
 3163                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
 3164                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
 3165                 if (m_head->m_flags & M_LASTFRAG)
 3166                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
 3167                 else if (m_head->m_flags & M_FRAG)
 3168                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
 3169         }
 3170 
 3171         mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
 3172 
 3173         ctx.sc = sc;
 3174         ctx.bge_idx = *txidx;
 3175         ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
 3176         ctx.bge_flags = csum_flags;
 3177         /*
 3178          * Sanity check: avoid coming within 16 descriptors
 3179          * of the end of the ring.
 3180          */
 3181         ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
 3182 
 3183         map = sc->bge_cdata.bge_tx_dmamap[*txidx];
 3184         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
 3185             m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
 3186 
 3187         if (error || ctx.bge_maxsegs == 0 /*||
 3188             ctx.bge_idx == sc->bge_tx_saved_considx*/)
 3189                 return (ENOBUFS);
 3190 
 3191         /*
 3192          * Insure that the map for this transmission
 3193          * is placed at the array index of the last descriptor
 3194          * in this chain.
 3195          */
 3196         sc->bge_cdata.bge_tx_dmamap[*txidx] =
 3197             sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
 3198         sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
 3199         sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
 3200         sc->bge_txcnt += ctx.bge_maxsegs;
 3201         f = &sc->bge_ldata.bge_tx_ring[*txidx];
 3202         if (mtag != NULL) {
 3203                 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
 3204                 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
 3205         } else {
 3206                 f->bge_vlan_tag = 0;
 3207         }
 3208 
 3209         BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
 3210         *txidx = ctx.bge_idx;
 3211 
 3212         return(0);
 3213 }
 3214 
 3215 /*
 3216  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3217  * to the mbuf data regions directly in the transmit descriptors.
 3218  */
 3219 static void
 3220 bge_start_locked(ifp)
 3221         struct ifnet *ifp;
 3222 {
 3223         struct bge_softc *sc;
 3224         struct mbuf *m_head = NULL;
 3225         u_int32_t prodidx = 0;
 3226         int count = 0;
 3227 
 3228         sc = ifp->if_softc;
 3229 
 3230         if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
 3231                 return;
 3232 
 3233         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
 3234 
 3235         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
 3236                 IF_DEQUEUE(&ifp->if_snd, m_head);
 3237                 if (m_head == NULL)
 3238                         break;
 3239 
 3240                 /*
 3241                  * XXX
 3242                  * The code inside the if() block is never reached since we
 3243                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
 3244                  * requests to checksum TCP/UDP in a fragmented packet.
 3245                  *
 3246                  * XXX
 3247                  * safety overkill.  If this is a fragmented packet chain
 3248                  * with delayed TCP/UDP checksums, then only encapsulate
 3249                  * it if we have enough descriptors to handle the entire
 3250                  * chain at once.
 3251                  * (paranoia -- may not actually be needed)
 3252                  */
 3253                 if (m_head->m_flags & M_FIRSTFRAG &&
 3254                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
 3255                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
 3256                             m_head->m_pkthdr.csum_data + 16) {
 3257                                 IF_PREPEND(&ifp->if_snd, m_head);
 3258                                 ifp->if_flags |= IFF_OACTIVE;
 3259                                 break;
 3260                         }
 3261                 }
 3262 
 3263                 /*
 3264                  * Pack the data into the transmit ring. If we
 3265                  * don't have room, set the OACTIVE flag and wait
 3266                  * for the NIC to drain the ring.
 3267                  */
 3268                 if (bge_encap(sc, m_head, &prodidx)) {
 3269                         IF_PREPEND(&ifp->if_snd, m_head);
 3270                         ifp->if_flags |= IFF_OACTIVE;
 3271                         break;
 3272                 }
 3273                 ++count;
 3274 
 3275                 /*
 3276                  * If there's a BPF listener, bounce a copy of this frame
 3277                  * to him.
 3278                  */
 3279                 BPF_MTAP(ifp, m_head);
 3280         }
 3281 
 3282         if (count == 0) {
 3283                 /* no packets were dequeued */
 3284                 return;
 3285         }
 3286 
 3287         /* Transmit */
 3288         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3289         /* 5700 b2 errata */
 3290         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 3291                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3292 
 3293         /*
 3294          * Set a timeout in case the chip goes out to lunch.
 3295          */
 3296         ifp->if_timer = 5;
 3297 
 3298         return;
 3299 }
 3300 
 3301 /*
 3302  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3303  * to the mbuf data regions directly in the transmit descriptors.
 3304  */
 3305 static void
 3306 bge_start(ifp)
 3307         struct ifnet *ifp;
 3308 {
 3309         struct bge_softc *sc;
 3310 
 3311         sc = ifp->if_softc;
 3312         BGE_LOCK(sc);
 3313         bge_start_locked(ifp);
 3314         BGE_UNLOCK(sc);
 3315 }
 3316 
 3317 static void
 3318 bge_init_locked(sc)
 3319         struct bge_softc *sc;
 3320 {
 3321         struct ifnet *ifp;
 3322         u_int16_t *m;
 3323 
 3324         BGE_LOCK_ASSERT(sc);
 3325 
 3326         ifp = &sc->arpcom.ac_if;
 3327 
 3328         if (ifp->if_flags & IFF_RUNNING)
 3329                 return;
 3330 
 3331         /* Cancel pending I/O and flush buffers. */
 3332         bge_stop(sc);
 3333         bge_reset(sc);
 3334         bge_chipinit(sc);
 3335 
 3336         /*
 3337          * Init the various state machines, ring
 3338          * control blocks and firmware.
 3339          */
 3340         if (bge_blockinit(sc)) {
 3341                 printf("bge%d: initialization failure\n", sc->bge_unit);
 3342                 return;
 3343         }
 3344 
 3345         ifp = &sc->arpcom.ac_if;
 3346 
 3347         /* Specify MTU. */
 3348         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
 3349             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
 3350 
 3351         /* Load our MAC address. */
 3352         m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
 3353         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
 3354         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
 3355 
 3356         /* Enable or disable promiscuous mode as needed. */
 3357         if (ifp->if_flags & IFF_PROMISC) {
 3358                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3359         } else {
 3360                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3361         }
 3362 
 3363         /* Program multicast filter. */
 3364         bge_setmulti(sc);
 3365 
 3366         /* Init RX ring. */
 3367         bge_init_rx_ring_std(sc);
 3368 
 3369         /*
 3370          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
 3371          * memory to insure that the chip has in fact read the first
 3372          * entry of the ring.
 3373          */
 3374         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
 3375                 u_int32_t               v, i;
 3376                 for (i = 0; i < 10; i++) {
 3377                         DELAY(20);
 3378                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
 3379                         if (v == (MCLBYTES - ETHER_ALIGN))
 3380                                 break;
 3381                 }
 3382                 if (i == 10)
 3383                         printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
 3384                             sc->bge_unit);
 3385         }
 3386 
 3387         /* Init jumbo RX ring. */
 3388         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
 3389                 bge_init_rx_ring_jumbo(sc);
 3390 
 3391         /* Init our RX return ring index */
 3392         sc->bge_rx_saved_considx = 0;
 3393 
 3394         /* Init TX ring. */
 3395         bge_init_tx_ring(sc);
 3396 
 3397         /* Turn on transmitter */
 3398         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
 3399 
 3400         /* Turn on receiver */
 3401         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3402 
 3403         /* Tell firmware we're alive. */
 3404         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3405 
 3406         /* Enable host interrupts. */
 3407         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
 3408         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3409         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3410 
 3411         bge_ifmedia_upd(ifp);
 3412 
 3413         ifp->if_flags |= IFF_RUNNING;
 3414         ifp->if_flags &= ~IFF_OACTIVE;
 3415 
 3416         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3417 
 3418         return;
 3419 }
 3420 
 3421 static void
 3422 bge_init(xsc)
 3423         void *xsc;
 3424 {
 3425         struct bge_softc *sc = xsc;
 3426 
 3427         BGE_LOCK(sc);
 3428         bge_init_locked(sc);
 3429         BGE_UNLOCK(sc);
 3430 
 3431         return;
 3432 }
 3433 
 3434 /*
 3435  * Set media options.
 3436  */
 3437 static int
 3438 bge_ifmedia_upd(ifp)
 3439         struct ifnet *ifp;
 3440 {
 3441         struct bge_softc *sc;
 3442         struct mii_data *mii;
 3443         struct ifmedia *ifm;
 3444 
 3445         sc = ifp->if_softc;
 3446         ifm = &sc->bge_ifmedia;
 3447 
 3448         /* If this is a 1000baseX NIC, enable the TBI port. */
 3449         if (sc->bge_tbi) {
 3450                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 3451                         return(EINVAL);
 3452                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
 3453                 case IFM_AUTO:
 3454 #ifndef BGE_FAKE_AUTONEG
 3455                         /*
 3456                          * The BCM5704 ASIC appears to have a special
 3457                          * mechanism for programming the autoneg
 3458                          * advertisement registers in TBI mode.
 3459                          */
 3460                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 3461                                 uint32_t sgdig;
 3462                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
 3463                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
 3464                                 sgdig |= BGE_SGDIGCFG_AUTO|
 3465                                     BGE_SGDIGCFG_PAUSE_CAP|
 3466                                     BGE_SGDIGCFG_ASYM_PAUSE;
 3467                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
 3468                                     sgdig|BGE_SGDIGCFG_SEND);
 3469                                 DELAY(5);
 3470                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
 3471                         }
 3472 #endif
 3473                         break;
 3474                 case IFM_1000_SX:
 3475                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3476                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3477                                     BGE_MACMODE_HALF_DUPLEX);
 3478                         } else {
 3479                                 BGE_SETBIT(sc, BGE_MAC_MODE,
 3480                                     BGE_MACMODE_HALF_DUPLEX);
 3481                         }
 3482                         break;
 3483                 default:
 3484                         return(EINVAL);
 3485                 }
 3486                 return(0);
 3487         }
 3488 
 3489         mii = device_get_softc(sc->bge_miibus);
 3490         sc->bge_link = 0;
 3491         if (mii->mii_instance) {
 3492                 struct mii_softc *miisc;
 3493                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
 3494                     miisc = LIST_NEXT(miisc, mii_list))
 3495                         mii_phy_reset(miisc);
 3496         }
 3497         mii_mediachg(mii);
 3498 
 3499         return(0);
 3500 }
 3501 
 3502 /*
 3503  * Report current media status.
 3504  */
 3505 static void
 3506 bge_ifmedia_sts(ifp, ifmr)
 3507         struct ifnet *ifp;
 3508         struct ifmediareq *ifmr;
 3509 {
 3510         struct bge_softc *sc;
 3511         struct mii_data *mii;
 3512 
 3513         sc = ifp->if_softc;
 3514 
 3515         if (sc->bge_tbi) {
 3516                 ifmr->ifm_status = IFM_AVALID;
 3517                 ifmr->ifm_active = IFM_ETHER;
 3518                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3519                     BGE_MACSTAT_TBI_PCS_SYNCHED)
 3520                         ifmr->ifm_status |= IFM_ACTIVE;
 3521                 ifmr->ifm_active |= IFM_1000_SX;
 3522                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
 3523                         ifmr->ifm_active |= IFM_HDX;
 3524                 else
 3525                         ifmr->ifm_active |= IFM_FDX;
 3526                 return;
 3527         }
 3528 
 3529         mii = device_get_softc(sc->bge_miibus);
 3530         mii_pollstat(mii);
 3531         ifmr->ifm_active = mii->mii_media_active;
 3532         ifmr->ifm_status = mii->mii_media_status;
 3533 
 3534         return;
 3535 }
 3536 
 3537 static int
 3538 bge_ioctl(ifp, command, data)
 3539         struct ifnet *ifp;
 3540         u_long command;
 3541         caddr_t data;
 3542 {
 3543         struct bge_softc *sc = ifp->if_softc;
 3544         struct ifreq *ifr = (struct ifreq *) data;
 3545         int mask, error = 0;
 3546         struct mii_data *mii;
 3547 
 3548         switch(command) {
 3549         case SIOCSIFMTU:
 3550                 /* Disallow jumbo frames on 5705. */
 3551                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 3552                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
 3553                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
 3554                         error = EINVAL;
 3555                 else {
 3556                         ifp->if_mtu = ifr->ifr_mtu;
 3557                         ifp->if_flags &= ~IFF_RUNNING;
 3558                         bge_init(sc);
 3559                 }
 3560                 break;
 3561         case SIOCSIFFLAGS:
 3562                 BGE_LOCK(sc);
 3563                 if (ifp->if_flags & IFF_UP) {
 3564                         /*
 3565                          * If only the state of the PROMISC flag changed,
 3566                          * then just use the 'set promisc mode' command
 3567                          * instead of reinitializing the entire NIC. Doing
 3568                          * a full re-init means reloading the firmware and
 3569                          * waiting for it to start up, which may take a
 3570                          * second or two.
 3571                          */
 3572                         if (ifp->if_flags & IFF_RUNNING &&
 3573                             ifp->if_flags & IFF_PROMISC &&
 3574                             !(sc->bge_if_flags & IFF_PROMISC)) {
 3575                                 BGE_SETBIT(sc, BGE_RX_MODE,
 3576                                     BGE_RXMODE_RX_PROMISC);
 3577                         } else if (ifp->if_flags & IFF_RUNNING &&
 3578                             !(ifp->if_flags & IFF_PROMISC) &&
 3579                             sc->bge_if_flags & IFF_PROMISC) {
 3580                                 BGE_CLRBIT(sc, BGE_RX_MODE,
 3581                                     BGE_RXMODE_RX_PROMISC);
 3582                         } else
 3583                                 bge_init_locked(sc);
 3584                 } else {
 3585                         if (ifp->if_flags & IFF_RUNNING) {
 3586                                 bge_stop(sc);
 3587                         }
 3588                 }
 3589                 sc->bge_if_flags = ifp->if_flags;
 3590                 BGE_UNLOCK(sc);
 3591                 error = 0;
 3592                 break;
 3593         case SIOCADDMULTI:
 3594         case SIOCDELMULTI:
 3595                 if (ifp->if_flags & IFF_RUNNING) {
 3596                         BGE_LOCK(sc);
 3597                         bge_setmulti(sc);
 3598                         BGE_UNLOCK(sc);
 3599                         error = 0;
 3600                 }
 3601                 break;
 3602         case SIOCSIFMEDIA:
 3603         case SIOCGIFMEDIA:
 3604                 if (sc->bge_tbi) {
 3605                         error = ifmedia_ioctl(ifp, ifr,
 3606                             &sc->bge_ifmedia, command);
 3607                 } else {
 3608                         mii = device_get_softc(sc->bge_miibus);
 3609                         error = ifmedia_ioctl(ifp, ifr,
 3610                             &mii->mii_media, command);
 3611                 }
 3612                 break;
 3613         case SIOCSIFCAP:
 3614                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3615                 /* NB: the code for RX csum offload is disabled for now */
 3616                 if (mask & IFCAP_TXCSUM) {
 3617                         ifp->if_capenable ^= IFCAP_TXCSUM;
 3618                         if (IFCAP_TXCSUM & ifp->if_capenable)
 3619                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
 3620                         else
 3621                                 ifp->if_hwassist = 0;
 3622                 }
 3623                 error = 0;
 3624                 break;
 3625         default:
 3626                 error = ether_ioctl(ifp, command, data);
 3627                 break;
 3628         }
 3629 
 3630         return(error);
 3631 }
 3632 
 3633 static void
 3634 bge_watchdog(ifp)
 3635         struct ifnet *ifp;
 3636 {
 3637         struct bge_softc *sc;
 3638 
 3639         sc = ifp->if_softc;
 3640 
 3641         printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
 3642 
 3643         ifp->if_flags &= ~IFF_RUNNING;
 3644         bge_init(sc);
 3645 
 3646         ifp->if_oerrors++;
 3647 
 3648         return;
 3649 }
 3650 
 3651 /*
 3652  * Stop the adapter and free any mbufs allocated to the
 3653  * RX and TX lists.
 3654  */
 3655 static void
 3656 bge_stop(sc)
 3657         struct bge_softc *sc;
 3658 {
 3659         struct ifnet *ifp;
 3660         struct ifmedia_entry *ifm;
 3661         struct mii_data *mii = NULL;
 3662         int mtmp, itmp;
 3663 
 3664         BGE_LOCK_ASSERT(sc);
 3665 
 3666         ifp = &sc->arpcom.ac_if;
 3667 
 3668         if (!sc->bge_tbi)
 3669                 mii = device_get_softc(sc->bge_miibus);
 3670 
 3671         callout_stop(&sc->bge_stat_ch);
 3672 
 3673         /*
 3674          * Disable all of the receiver blocks
 3675          */
 3676         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3677         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 3678         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 3679         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3680             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3681                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 3682         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
 3683         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 3684         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
 3685 
 3686         /*
 3687          * Disable all of the transmit blocks
 3688          */
 3689         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 3690         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 3691         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 3692         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
 3693         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 3694         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3695             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3696                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 3697         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 3698 
 3699         /*
 3700          * Shut down all of the memory managers and related
 3701          * state machines.
 3702          */
 3703         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 3704         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
 3705         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3706             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3707                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 3708         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 3709         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 3710         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3711             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 3712                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
 3713                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 3714         }
 3715 
 3716         /* Disable host interrupts. */
 3717         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3718         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3719 
 3720         /*
 3721          * Tell firmware we're shutting down.
 3722          */
 3723         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3724 
 3725         /* Free the RX lists. */
 3726         bge_free_rx_ring_std(sc);
 3727 
 3728         /* Free jumbo RX list. */
 3729         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3730             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3731                 bge_free_rx_ring_jumbo(sc);
 3732 
 3733         /* Free TX buffers. */
 3734         bge_free_tx_ring(sc);
 3735 
 3736         /*
 3737          * Isolate/power down the PHY, but leave the media selection
 3738          * unchanged so that things will be put back to normal when
 3739          * we bring the interface back up.
 3740          */
 3741         if (!sc->bge_tbi) {
 3742                 itmp = ifp->if_flags;
 3743                 ifp->if_flags |= IFF_UP;
 3744                 ifm = mii->mii_media.ifm_cur;
 3745                 mtmp = ifm->ifm_media;
 3746                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
 3747                 mii_mediachg(mii);
 3748                 ifm->ifm_media = mtmp;
 3749                 ifp->if_flags = itmp;
 3750         }
 3751 
 3752         sc->bge_link = 0;
 3753 
 3754         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
 3755 
 3756         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 3757 
 3758         return;
 3759 }
 3760 
 3761 /*
 3762  * Stop all chip I/O so that the kernel's probe routines don't
 3763  * get confused by errant DMAs when rebooting.
 3764  */
 3765 static void
 3766 bge_shutdown(dev)
 3767         device_t dev;
 3768 {
 3769         struct bge_softc *sc;
 3770 
 3771         sc = device_get_softc(dev);
 3772 
 3773         BGE_LOCK(sc);
 3774         bge_stop(sc);
 3775         bge_reset(sc);
 3776         BGE_UNLOCK(sc);
 3777 
 3778         return;
 3779 }

Cache object: 6e094835b879267725baa5c1708ae440


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.