The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001 Wind River Systems
    3  * Copyright (c) 1997, 1998, 1999, 2001
    4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. All advertising materials mentioning features or use of this software
   15  *    must display the following acknowledgement:
   16  *      This product includes software developed by Bill Paul.
   17  * 4. Neither the name of the author nor the names of any co-contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   31  * THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/6.0/sys/dev/bge/if_bge.c 151141 2005-10-09 04:15:12Z delphij $");
   36 
   37 /*
   38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
   39  *
   40  * The Broadcom BCM5700 is based on technology originally developed by
   41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
   42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
   43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
   44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
   45  * frames, highly configurable RX filtering, and 16 RX and TX queues
   46  * (which, along with RX filter rules, can be used for QOS applications).
   47  * Other features, such as TCP segmentation, may be available as part
   48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
   49  * firmware images can be stored in hardware and need not be compiled
   50  * into the driver.
   51  *
   52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
   53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
   54  *
   55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
   56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
   57  * does not support external SSRAM.
   58  *
   59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
   60  * brand name, which is functionally similar but lacks PCI-X support.
   61  *
   62  * Without external SSRAM, you can only have at most 4 TX rings,
   63  * and the use of the mini RX ring is disabled. This seems to imply
   64  * that these features are simply not available on the BCM5701. As a
   65  * result, this driver does not implement any support for the mini RX
   66  * ring.
   67  */
   68 
   69 #include <sys/param.h>
   70 #include <sys/endian.h>
   71 #include <sys/systm.h>
   72 #include <sys/sockio.h>
   73 #include <sys/mbuf.h>
   74 #include <sys/malloc.h>
   75 #include <sys/kernel.h>
   76 #include <sys/module.h>
   77 #include <sys/socket.h>
   78 #include <sys/queue.h>
   79 
   80 #include <net/if.h>
   81 #include <net/if_arp.h>
   82 #include <net/ethernet.h>
   83 #include <net/if_dl.h>
   84 #include <net/if_media.h>
   85 
   86 #include <net/bpf.h>
   87 
   88 #include <net/if_types.h>
   89 #include <net/if_vlan_var.h>
   90 
   91 #include <netinet/in_systm.h>
   92 #include <netinet/in.h>
   93 #include <netinet/ip.h>
   94 
   95 #include <machine/clock.h>      /* for DELAY */
   96 #include <machine/bus.h>
   97 #include <machine/resource.h>
   98 #include <sys/bus.h>
   99 #include <sys/rman.h>
  100 
  101 #include <dev/mii/mii.h>
  102 #include <dev/mii/miivar.h>
  103 #include "miidevs.h"
  104 #include <dev/mii/brgphyreg.h>
  105 
  106 #include <dev/pci/pcireg.h>
  107 #include <dev/pci/pcivar.h>
  108 
  109 #include <dev/bge/if_bgereg.h>
  110 
  111 #include "opt_bge.h"
  112 
  113 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
  114 
  115 MODULE_DEPEND(bge, pci, 1, 1, 1);
  116 MODULE_DEPEND(bge, ether, 1, 1, 1);
  117 MODULE_DEPEND(bge, miibus, 1, 1, 1);
  118 
  119 /* "controller miibus0" required.  See GENERIC if you get errors here. */
  120 #include "miibus_if.h"
  121 
  122 /*
  123  * Various supported device vendors/types and their names. Note: the
  124  * spec seems to indicate that the hardware still has Alteon's vendor
  125  * ID burned into it, though it will always be overriden by the vendor
  126  * ID in the EEPROM. Just to be safe, we cover all possibilities.
  127  */
  128 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
  129 
  130 static struct bge_type bge_devs[] = {
  131         { ALT_VENDORID, ALT_DEVICEID_BCM5700,
  132                 "Broadcom BCM5700 Gigabit Ethernet" },
  133         { ALT_VENDORID, ALT_DEVICEID_BCM5701,
  134                 "Broadcom BCM5701 Gigabit Ethernet" },
  135         { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
  136                 "Broadcom BCM5700 Gigabit Ethernet" },
  137         { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
  138                 "Broadcom BCM5701 Gigabit Ethernet" },
  139         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
  140                 "Broadcom BCM5702 Gigabit Ethernet" },
  141         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
  142                 "Broadcom BCM5702X Gigabit Ethernet" },
  143         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
  144                 "Broadcom BCM5703 Gigabit Ethernet" },
  145         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
  146                 "Broadcom BCM5703X Gigabit Ethernet" },
  147         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
  148                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
  149         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
  150                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
  151         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
  152                 "Broadcom BCM5705 Gigabit Ethernet" },
  153         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
  154                 "Broadcom BCM5705K Gigabit Ethernet" },
  155         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
  156                 "Broadcom BCM5705M Gigabit Ethernet" },
  157         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
  158                 "Broadcom BCM5705M Gigabit Ethernet" },
  159         { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
  160                 "Broadcom BCM5714C Gigabit Ethernet" },
  161         { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
  162                 "Broadcom BCM5721 Gigabit Ethernet" },
  163         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
  164                 "Broadcom BCM5750 Gigabit Ethernet" },
  165         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
  166                 "Broadcom BCM5750M Gigabit Ethernet" },
  167         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
  168                 "Broadcom BCM5751 Gigabit Ethernet" },
  169         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
  170                 "Broadcom BCM5751M Gigabit Ethernet" },
  171         { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
  172                 "Broadcom BCM5782 Gigabit Ethernet" },
  173         { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
  174                 "Broadcom BCM5788 Gigabit Ethernet" },
  175         { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
  176                 "Broadcom BCM5789 Gigabit Ethernet" },
  177         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
  178                 "Broadcom BCM5901 Fast Ethernet" },
  179         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
  180                 "Broadcom BCM5901A2 Fast Ethernet" },
  181         { SK_VENDORID, SK_DEVICEID_ALTIMA,
  182                 "SysKonnect Gigabit Ethernet" },
  183         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
  184                 "Altima AC1000 Gigabit Ethernet" },
  185         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
  186                 "Altima AC1002 Gigabit Ethernet" },
  187         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
  188                 "Altima AC9100 Gigabit Ethernet" },
  189         { 0, 0, NULL }
  190 };
  191 
  192 static int bge_probe            (device_t);
  193 static int bge_attach           (device_t);
  194 static int bge_detach           (device_t);
  195 static void bge_release_resources
  196                                 (struct bge_softc *);
  197 static void bge_dma_map_addr    (void *, bus_dma_segment_t *, int, int);
  198 static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
  199                                     bus_size_t, int);
  200 static int bge_dma_alloc        (device_t);
  201 static void bge_dma_free        (struct bge_softc *);
  202 
  203 static void bge_txeof           (struct bge_softc *);
  204 static void bge_rxeof           (struct bge_softc *);
  205 
  206 static void bge_tick_locked     (struct bge_softc *);
  207 static void bge_tick            (void *);
  208 static void bge_stats_update    (struct bge_softc *);
  209 static void bge_stats_update_regs
  210                                 (struct bge_softc *);
  211 static int bge_encap            (struct bge_softc *, struct mbuf *,
  212                                         u_int32_t *);
  213 
  214 static void bge_intr            (void *);
  215 static void bge_start_locked    (struct ifnet *);
  216 static void bge_start           (struct ifnet *);
  217 static int bge_ioctl            (struct ifnet *, u_long, caddr_t);
  218 static void bge_init_locked     (struct bge_softc *);
  219 static void bge_init            (void *);
  220 static void bge_stop            (struct bge_softc *);
  221 static void bge_watchdog                (struct ifnet *);
  222 static void bge_shutdown                (device_t);
  223 static int bge_ifmedia_upd      (struct ifnet *);
  224 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
  225 
  226 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *, int, u_int8_t *);
  227 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
  228 
  229 static void bge_setmulti        (struct bge_softc *);
  230 
  231 static void bge_handle_events   (struct bge_softc *);
  232 static int bge_alloc_jumbo_mem  (struct bge_softc *);
  233 static void bge_free_jumbo_mem  (struct bge_softc *);
  234 static void *bge_jalloc         (struct bge_softc *);
  235 static void bge_jfree           (void *, void *);
  236 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
  237 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
  238 static int bge_init_rx_ring_std (struct bge_softc *);
  239 static void bge_free_rx_ring_std        (struct bge_softc *);
  240 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
  241 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
  242 static void bge_free_tx_ring    (struct bge_softc *);
  243 static int bge_init_tx_ring     (struct bge_softc *);
  244 
  245 static int bge_chipinit         (struct bge_softc *);
  246 static int bge_blockinit        (struct bge_softc *);
  247 
  248 #ifdef notdef
  249 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
  250 static void bge_vpd_read_res    (struct bge_softc *, struct vpd_res *, int);
  251 static void bge_vpd_read        (struct bge_softc *);
  252 #endif
  253 
  254 static u_int32_t bge_readmem_ind
  255                                 (struct bge_softc *, int);
  256 static void bge_writemem_ind    (struct bge_softc *, int, int);
  257 #ifdef notdef
  258 static u_int32_t bge_readreg_ind
  259                                 (struct bge_softc *, int);
  260 #endif
  261 static void bge_writereg_ind    (struct bge_softc *, int, int);
  262 
  263 static int bge_miibus_readreg   (device_t, int, int);
  264 static int bge_miibus_writereg  (device_t, int, int, int);
  265 static void bge_miibus_statchg  (device_t);
  266 
  267 static void bge_reset           (struct bge_softc *);
  268 
  269 static device_method_t bge_methods[] = {
  270         /* Device interface */
  271         DEVMETHOD(device_probe,         bge_probe),
  272         DEVMETHOD(device_attach,        bge_attach),
  273         DEVMETHOD(device_detach,        bge_detach),
  274         DEVMETHOD(device_shutdown,      bge_shutdown),
  275 
  276         /* bus interface */
  277         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  278         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  279 
  280         /* MII interface */
  281         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
  282         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
  283         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
  284 
  285         { 0, 0 }
  286 };
  287 
  288 static driver_t bge_driver = {
  289         "bge",
  290         bge_methods,
  291         sizeof(struct bge_softc)
  292 };
  293 
  294 static devclass_t bge_devclass;
  295 
  296 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
  297 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
  298 
  299 static u_int32_t
  300 bge_readmem_ind(sc, off)
  301         struct bge_softc *sc;
  302         int off;
  303 {
  304         device_t dev;
  305 
  306         dev = sc->bge_dev;
  307 
  308         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  309         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
  310 }
  311 
  312 static void
  313 bge_writemem_ind(sc, off, val)
  314         struct bge_softc *sc;
  315         int off, val;
  316 {
  317         device_t dev;
  318 
  319         dev = sc->bge_dev;
  320 
  321         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  322         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
  323 
  324         return;
  325 }
  326 
  327 #ifdef notdef
  328 static u_int32_t
  329 bge_readreg_ind(sc, off)
  330         struct bge_softc *sc;
  331         int off;
  332 {
  333         device_t dev;
  334 
  335         dev = sc->bge_dev;
  336 
  337         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  338         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
  339 }
  340 #endif
  341 
  342 static void
  343 bge_writereg_ind(sc, off, val)
  344         struct bge_softc *sc;
  345         int off, val;
  346 {
  347         device_t dev;
  348 
  349         dev = sc->bge_dev;
  350 
  351         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  352         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
  353 
  354         return;
  355 }
  356 
  357 /*
  358  * Map a single buffer address.
  359  */
  360 
  361 static void
  362 bge_dma_map_addr(arg, segs, nseg, error)
  363         void *arg;
  364         bus_dma_segment_t *segs;
  365         int nseg;
  366         int error;
  367 {
  368         struct bge_dmamap_arg *ctx;
  369 
  370         if (error)
  371                 return;
  372 
  373         ctx = arg;
  374 
  375         if (nseg > ctx->bge_maxsegs) {
  376                 ctx->bge_maxsegs = 0;
  377                 return;
  378         }
  379 
  380         ctx->bge_busaddr = segs->ds_addr;
  381 
  382         return;
  383 }
  384 
  385 /*
  386  * Map an mbuf chain into an TX ring.
  387  */
  388 
  389 static void
  390 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
  391         void *arg;
  392         bus_dma_segment_t *segs;
  393         int nseg;
  394         bus_size_t mapsize;
  395         int error;
  396 {
  397         struct bge_dmamap_arg *ctx;
  398         struct bge_tx_bd *d = NULL;
  399         int i = 0, idx;
  400 
  401         if (error)
  402                 return;
  403 
  404         ctx = arg;
  405 
  406         /* Signal error to caller if there's too many segments */
  407         if (nseg > ctx->bge_maxsegs) {
  408                 ctx->bge_maxsegs = 0;
  409                 return;
  410         }
  411 
  412         idx = ctx->bge_idx;
  413         while(1) {
  414                 d = &ctx->bge_ring[idx];
  415                 d->bge_addr.bge_addr_lo =
  416                     htole32(BGE_ADDR_LO(segs[i].ds_addr));
  417                 d->bge_addr.bge_addr_hi =
  418                     htole32(BGE_ADDR_HI(segs[i].ds_addr));
  419                 d->bge_len = htole16(segs[i].ds_len);
  420                 d->bge_flags = htole16(ctx->bge_flags);
  421                 i++;
  422                 if (i == nseg)
  423                         break;
  424                 BGE_INC(idx, BGE_TX_RING_CNT);
  425         }
  426 
  427         d->bge_flags |= htole16(BGE_TXBDFLAG_END);
  428         ctx->bge_maxsegs = nseg;
  429         ctx->bge_idx = idx;
  430 
  431         return;
  432 }
  433 
  434 
  435 #ifdef notdef
  436 static u_int8_t
  437 bge_vpd_readbyte(sc, addr)
  438         struct bge_softc *sc;
  439         int addr;
  440 {
  441         int i;
  442         device_t dev;
  443         u_int32_t val;
  444 
  445         dev = sc->bge_dev;
  446         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
  447         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
  448                 DELAY(10);
  449                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
  450                         break;
  451         }
  452 
  453         if (i == BGE_TIMEOUT) {
  454                 printf("bge%d: VPD read timed out\n", sc->bge_unit);
  455                 return(0);
  456         }
  457 
  458         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
  459 
  460         return((val >> ((addr % 4) * 8)) & 0xFF);
  461 }
  462 
  463 static void
  464 bge_vpd_read_res(sc, res, addr)
  465         struct bge_softc *sc;
  466         struct vpd_res *res;
  467         int addr;
  468 {
  469         int i;
  470         u_int8_t *ptr;
  471 
  472         ptr = (u_int8_t *)res;
  473         for (i = 0; i < sizeof(struct vpd_res); i++)
  474                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
  475 
  476         return;
  477 }
  478 
  479 static void
  480 bge_vpd_read(sc)
  481         struct bge_softc *sc;
  482 {
  483         int pos = 0, i;
  484         struct vpd_res res;
  485 
  486         if (sc->bge_vpd_prodname != NULL)
  487                 free(sc->bge_vpd_prodname, M_DEVBUF);
  488         if (sc->bge_vpd_readonly != NULL)
  489                 free(sc->bge_vpd_readonly, M_DEVBUF);
  490         sc->bge_vpd_prodname = NULL;
  491         sc->bge_vpd_readonly = NULL;
  492 
  493         bge_vpd_read_res(sc, &res, pos);
  494 
  495         if (res.vr_id != VPD_RES_ID) {
  496                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  497                         sc->bge_unit, VPD_RES_ID, res.vr_id);
  498                 return;
  499         }
  500 
  501         pos += sizeof(res);
  502         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
  503         for (i = 0; i < res.vr_len; i++)
  504                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
  505         sc->bge_vpd_prodname[i] = '\0';
  506         pos += i;
  507 
  508         bge_vpd_read_res(sc, &res, pos);
  509 
  510         if (res.vr_id != VPD_RES_READ) {
  511                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  512                     sc->bge_unit, VPD_RES_READ, res.vr_id);
  513                 return;
  514         }
  515 
  516         pos += sizeof(res);
  517         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
  518         for (i = 0; i < res.vr_len + 1; i++)
  519                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
  520 
  521         return;
  522 }
  523 #endif
  524 
  525 /*
  526  * Read a byte of data stored in the EEPROM at address 'addr.' The
  527  * BCM570x supports both the traditional bitbang interface and an
  528  * auto access interface for reading the EEPROM. We use the auto
  529  * access method.
  530  */
  531 static u_int8_t
  532 bge_eeprom_getbyte(sc, addr, dest)
  533         struct bge_softc *sc;
  534         int addr;
  535         u_int8_t *dest;
  536 {
  537         int i;
  538         u_int32_t byte = 0;
  539 
  540         /*
  541          * Enable use of auto EEPROM access so we can avoid
  542          * having to use the bitbang method.
  543          */
  544         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
  545 
  546         /* Reset the EEPROM, load the clock period. */
  547         CSR_WRITE_4(sc, BGE_EE_ADDR,
  548             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
  549         DELAY(20);
  550 
  551         /* Issue the read EEPROM command. */
  552         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
  553 
  554         /* Wait for completion */
  555         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
  556                 DELAY(10);
  557                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
  558                         break;
  559         }
  560 
  561         if (i == BGE_TIMEOUT) {
  562                 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
  563                 return(0);
  564         }
  565 
  566         /* Get result. */
  567         byte = CSR_READ_4(sc, BGE_EE_DATA);
  568 
  569         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
  570 
  571         return(0);
  572 }
  573 
  574 /*
  575  * Read a sequence of bytes from the EEPROM.
  576  */
  577 static int
  578 bge_read_eeprom(sc, dest, off, cnt)
  579         struct bge_softc *sc;
  580         caddr_t dest;
  581         int off;
  582         int cnt;
  583 {
  584         int err = 0, i;
  585         u_int8_t byte = 0;
  586 
  587         for (i = 0; i < cnt; i++) {
  588                 err = bge_eeprom_getbyte(sc, off + i, &byte);
  589                 if (err)
  590                         break;
  591                 *(dest + i) = byte;
  592         }
  593 
  594         return(err ? 1 : 0);
  595 }
  596 
  597 static int
  598 bge_miibus_readreg(dev, phy, reg)
  599         device_t dev;
  600         int phy, reg;
  601 {
  602         struct bge_softc *sc;
  603         u_int32_t val, autopoll;
  604         int i;
  605 
  606         sc = device_get_softc(dev);
  607 
  608         /*
  609          * Broadcom's own driver always assumes the internal
  610          * PHY is at GMII address 1. On some chips, the PHY responds
  611          * to accesses at all addresses, which could cause us to
  612          * bogusly attach the PHY 32 times at probe type. Always
  613          * restricting the lookup to address 1 is simpler than
  614          * trying to figure out which chips revisions should be
  615          * special-cased.
  616          */
  617         if (phy != 1)
  618                 return(0);
  619 
  620         /* Reading with autopolling on may trigger PCI errors */
  621         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  622         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  623                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  624                 DELAY(40);
  625         }
  626 
  627         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
  628             BGE_MIPHY(phy)|BGE_MIREG(reg));
  629 
  630         for (i = 0; i < BGE_TIMEOUT; i++) {
  631                 val = CSR_READ_4(sc, BGE_MI_COMM);
  632                 if (!(val & BGE_MICOMM_BUSY))
  633                         break;
  634         }
  635 
  636         if (i == BGE_TIMEOUT) {
  637                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  638                 val = 0;
  639                 goto done;
  640         }
  641 
  642         val = CSR_READ_4(sc, BGE_MI_COMM);
  643 
  644 done:
  645         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  646                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  647                 DELAY(40);
  648         }
  649 
  650         if (val & BGE_MICOMM_READFAIL)
  651                 return(0);
  652 
  653         return(val & 0xFFFF);
  654 }
  655 
  656 static int
  657 bge_miibus_writereg(dev, phy, reg, val)
  658         device_t dev;
  659         int phy, reg, val;
  660 {
  661         struct bge_softc *sc;
  662         u_int32_t autopoll;
  663         int i;
  664 
  665         sc = device_get_softc(dev);
  666 
  667         /* Reading with autopolling on may trigger PCI errors */
  668         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  669         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  670                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  671                 DELAY(40);
  672         }
  673 
  674         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
  675             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
  676 
  677         for (i = 0; i < BGE_TIMEOUT; i++) {
  678                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
  679                         break;
  680         }
  681 
  682         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  683                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  684                 DELAY(40);
  685         }
  686 
  687         if (i == BGE_TIMEOUT) {
  688                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  689                 return(0);
  690         }
  691 
  692         return(0);
  693 }
  694 
  695 static void
  696 bge_miibus_statchg(dev)
  697         device_t dev;
  698 {
  699         struct bge_softc *sc;
  700         struct mii_data *mii;
  701 
  702         sc = device_get_softc(dev);
  703         mii = device_get_softc(sc->bge_miibus);
  704 
  705         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
  706         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
  707                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
  708         } else {
  709                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
  710         }
  711 
  712         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  713                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  714         } else {
  715                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  716         }
  717 
  718         return;
  719 }
  720 
  721 /*
  722  * Handle events that have triggered interrupts.
  723  */
  724 static void
  725 bge_handle_events(sc)
  726         struct bge_softc                *sc;
  727 {
  728 
  729         return;
  730 }
  731 
  732 /*
  733  * Memory management for jumbo frames.
  734  */
  735 
  736 static int
  737 bge_alloc_jumbo_mem(sc)
  738         struct bge_softc                *sc;
  739 {
  740         caddr_t                 ptr;
  741         register int            i, error;
  742         struct bge_jpool_entry   *entry;
  743 
  744         /* Create tag for jumbo buffer block */
  745 
  746         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
  747             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
  748             NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
  749             &sc->bge_cdata.bge_jumbo_tag);
  750 
  751         if (error) {
  752                 printf("bge%d: could not allocate jumbo dma tag\n",
  753                     sc->bge_unit);
  754                 return (ENOMEM);
  755         }
  756 
  757         /* Allocate DMA'able memory for jumbo buffer block */
  758 
  759         error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
  760             (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
  761             &sc->bge_cdata.bge_jumbo_map);
  762 
  763         if (error)
  764                 return (ENOMEM);
  765 
  766         SLIST_INIT(&sc->bge_jfree_listhead);
  767         SLIST_INIT(&sc->bge_jinuse_listhead);
  768 
  769         /*
  770          * Now divide it up into 9K pieces and save the addresses
  771          * in an array.
  772          */
  773         ptr = sc->bge_ldata.bge_jumbo_buf;
  774         for (i = 0; i < BGE_JSLOTS; i++) {
  775                 sc->bge_cdata.bge_jslots[i] = ptr;
  776                 ptr += BGE_JLEN;
  777                 entry = malloc(sizeof(struct bge_jpool_entry),
  778                     M_DEVBUF, M_NOWAIT);
  779                 if (entry == NULL) {
  780                         bge_free_jumbo_mem(sc);
  781                         sc->bge_ldata.bge_jumbo_buf = NULL;
  782                         printf("bge%d: no memory for jumbo "
  783                             "buffer queue!\n", sc->bge_unit);
  784                         return(ENOBUFS);
  785                 }
  786                 entry->slot = i;
  787                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
  788                     entry, jpool_entries);
  789         }
  790 
  791         return(0);
  792 }
  793 
  794 static void
  795 bge_free_jumbo_mem(sc)
  796         struct bge_softc *sc;
  797 {
  798         int i;
  799         struct bge_jpool_entry *entry;
  800 
  801         for (i = 0; i < BGE_JSLOTS; i++) {
  802                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  803                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  804                 free(entry, M_DEVBUF);
  805         }
  806 
  807         /* Destroy jumbo buffer block */
  808 
  809         if (sc->bge_ldata.bge_rx_jumbo_ring)
  810                 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
  811                     sc->bge_ldata.bge_jumbo_buf,
  812                     sc->bge_cdata.bge_jumbo_map);
  813 
  814         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
  815                 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
  816                     sc->bge_cdata.bge_jumbo_map);
  817 
  818         if (sc->bge_cdata.bge_jumbo_tag)
  819                 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
  820 
  821         return;
  822 }
  823 
  824 /*
  825  * Allocate a jumbo buffer.
  826  */
  827 static void *
  828 bge_jalloc(sc)
  829         struct bge_softc                *sc;
  830 {
  831         struct bge_jpool_entry   *entry;
  832 
  833         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  834 
  835         if (entry == NULL) {
  836                 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
  837                 return(NULL);
  838         }
  839 
  840         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  841         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
  842         return(sc->bge_cdata.bge_jslots[entry->slot]);
  843 }
  844 
  845 /*
  846  * Release a jumbo buffer.
  847  */
  848 static void
  849 bge_jfree(buf, args)
  850         void *buf;
  851         void *args;
  852 {
  853         struct bge_jpool_entry *entry;
  854         struct bge_softc *sc;
  855         int i;
  856 
  857         /* Extract the softc struct pointer. */
  858         sc = (struct bge_softc *)args;
  859 
  860         if (sc == NULL)
  861                 panic("bge_jfree: can't find softc pointer!");
  862 
  863         /* calculate the slot this buffer belongs to */
  864 
  865         i = ((vm_offset_t)buf
  866              - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
  867 
  868         if ((i < 0) || (i >= BGE_JSLOTS))
  869                 panic("bge_jfree: asked to free buffer that we don't manage!");
  870 
  871         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
  872         if (entry == NULL)
  873                 panic("bge_jfree: buffer not in use!");
  874         entry->slot = i;
  875         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
  876         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
  877 
  878         return;
  879 }
  880 
  881 
  882 /*
  883  * Intialize a standard receive ring descriptor.
  884  */
  885 static int
  886 bge_newbuf_std(sc, i, m)
  887         struct bge_softc        *sc;
  888         int                     i;
  889         struct mbuf             *m;
  890 {
  891         struct mbuf             *m_new = NULL;
  892         struct bge_rx_bd        *r;
  893         struct bge_dmamap_arg   ctx;
  894         int                     error;
  895 
  896         if (m == NULL) {
  897                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  898                 if (m_new == NULL) {
  899                         return(ENOBUFS);
  900                 }
  901 
  902                 MCLGET(m_new, M_DONTWAIT);
  903                 if (!(m_new->m_flags & M_EXT)) {
  904                         m_freem(m_new);
  905                         return(ENOBUFS);
  906                 }
  907                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  908         } else {
  909                 m_new = m;
  910                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  911                 m_new->m_data = m_new->m_ext.ext_buf;
  912         }
  913 
  914         if (!sc->bge_rx_alignment_bug)
  915                 m_adj(m_new, ETHER_ALIGN);
  916         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
  917         r = &sc->bge_ldata.bge_rx_std_ring[i];
  918         ctx.bge_maxsegs = 1;
  919         ctx.sc = sc;
  920         error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
  921             sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
  922             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  923         if (error || ctx.bge_maxsegs == 0) {
  924                 if (m == NULL)
  925                         m_freem(m_new);
  926                 return(ENOMEM);
  927         }
  928         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
  929         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
  930         r->bge_flags = htole16(BGE_RXBDFLAG_END);
  931         r->bge_len = htole16(m_new->m_len);
  932         r->bge_idx = htole16(i);
  933 
  934         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  935             sc->bge_cdata.bge_rx_std_dmamap[i],
  936             BUS_DMASYNC_PREREAD);
  937 
  938         return(0);
  939 }
  940 
  941 /*
  942  * Initialize a jumbo receive ring descriptor. This allocates
  943  * a jumbo buffer from the pool managed internally by the driver.
  944  */
  945 static int
  946 bge_newbuf_jumbo(sc, i, m)
  947         struct bge_softc *sc;
  948         int i;
  949         struct mbuf *m;
  950 {
  951         struct mbuf *m_new = NULL;
  952         struct bge_rx_bd *r;
  953         struct bge_dmamap_arg ctx;
  954         int error;
  955 
  956         if (m == NULL) {
  957                 caddr_t                 *buf = NULL;
  958 
  959                 /* Allocate the mbuf. */
  960                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  961                 if (m_new == NULL) {
  962                         return(ENOBUFS);
  963                 }
  964 
  965                 /* Allocate the jumbo buffer */
  966                 buf = bge_jalloc(sc);
  967                 if (buf == NULL) {
  968                         m_freem(m_new);
  969                         printf("bge%d: jumbo allocation failed "
  970                             "-- packet dropped!\n", sc->bge_unit);
  971                         return(ENOBUFS);
  972                 }
  973 
  974                 /* Attach the buffer to the mbuf. */
  975                 m_new->m_data = (void *) buf;
  976                 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
  977                 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
  978                     (struct bge_softc *)sc, 0, EXT_NET_DRV);
  979         } else {
  980                 m_new = m;
  981                 m_new->m_data = m_new->m_ext.ext_buf;
  982                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
  983         }
  984 
  985         if (!sc->bge_rx_alignment_bug)
  986                 m_adj(m_new, ETHER_ALIGN);
  987         /* Set up the descriptor. */
  988         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
  989         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
  990         ctx.bge_maxsegs = 1;
  991         ctx.sc = sc;
  992         error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
  993             sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
  994             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  995         if (error || ctx.bge_maxsegs == 0) {
  996                 if (m == NULL)
  997                         m_freem(m_new);
  998                 return(ENOMEM);
  999         }
 1000         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
 1001         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
 1002         r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
 1003         r->bge_len = htole16(m_new->m_len);
 1004         r->bge_idx = htole16(i);
 1005 
 1006         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 1007             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
 1008             BUS_DMASYNC_PREREAD);
 1009 
 1010         return(0);
 1011 }
 1012 
 1013 /*
 1014  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
 1015  * that's 1MB or memory, which is a lot. For now, we fill only the first
 1016  * 256 ring entries and hope that our CPU is fast enough to keep up with
 1017  * the NIC.
 1018  */
 1019 static int
 1020 bge_init_rx_ring_std(sc)
 1021         struct bge_softc *sc;
 1022 {
 1023         int i;
 1024 
 1025         for (i = 0; i < BGE_SSLOTS; i++) {
 1026                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
 1027                         return(ENOBUFS);
 1028         };
 1029 
 1030         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1031             sc->bge_cdata.bge_rx_std_ring_map,
 1032             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1033 
 1034         sc->bge_std = i - 1;
 1035         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 1036 
 1037         return(0);
 1038 }
 1039 
 1040 static void
 1041 bge_free_rx_ring_std(sc)
 1042         struct bge_softc *sc;
 1043 {
 1044         int i;
 1045 
 1046         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1047                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
 1048                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
 1049                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
 1050                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1051                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1052                 }
 1053                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
 1054                     sizeof(struct bge_rx_bd));
 1055         }
 1056 
 1057         return;
 1058 }
 1059 
 1060 static int
 1061 bge_init_rx_ring_jumbo(sc)
 1062         struct bge_softc *sc;
 1063 {
 1064         int i;
 1065         struct bge_rcb *rcb;
 1066 
 1067         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1068                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
 1069                         return(ENOBUFS);
 1070         };
 1071 
 1072         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1073             sc->bge_cdata.bge_rx_jumbo_ring_map,
 1074             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1075 
 1076         sc->bge_jumbo = i - 1;
 1077 
 1078         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1079         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
 1080         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1081 
 1082         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 1083 
 1084         return(0);
 1085 }
 1086 
 1087 static void
 1088 bge_free_rx_ring_jumbo(sc)
 1089         struct bge_softc *sc;
 1090 {
 1091         int i;
 1092 
 1093         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1094                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
 1095                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
 1096                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
 1097                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 1098                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1099                 }
 1100                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
 1101                     sizeof(struct bge_rx_bd));
 1102         }
 1103 
 1104         return;
 1105 }
 1106 
 1107 static void
 1108 bge_free_tx_ring(sc)
 1109         struct bge_softc *sc;
 1110 {
 1111         int i;
 1112 
 1113         if (sc->bge_ldata.bge_tx_ring == NULL)
 1114                 return;
 1115 
 1116         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1117                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
 1118                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
 1119                         sc->bge_cdata.bge_tx_chain[i] = NULL;
 1120                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1121                             sc->bge_cdata.bge_tx_dmamap[i]);
 1122                 }
 1123                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
 1124                     sizeof(struct bge_tx_bd));
 1125         }
 1126 
 1127         return;
 1128 }
 1129 
 1130 static int
 1131 bge_init_tx_ring(sc)
 1132         struct bge_softc *sc;
 1133 {
 1134         sc->bge_txcnt = 0;
 1135         sc->bge_tx_saved_considx = 0;
 1136 
 1137         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1138         /* 5700 b2 errata */
 1139         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1140                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1141 
 1142         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1143         /* 5700 b2 errata */
 1144         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1145                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1146 
 1147         return(0);
 1148 }
 1149 
 1150 static void
 1151 bge_setmulti(sc)
 1152         struct bge_softc *sc;
 1153 {
 1154         struct ifnet *ifp;
 1155         struct ifmultiaddr *ifma;
 1156         u_int32_t hashes[4] = { 0, 0, 0, 0 };
 1157         int h, i;
 1158 
 1159         BGE_LOCK_ASSERT(sc);
 1160 
 1161         ifp = sc->bge_ifp;
 1162 
 1163         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
 1164                 for (i = 0; i < 4; i++)
 1165                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
 1166                 return;
 1167         }
 1168 
 1169         /* First, zot all the existing filters. */
 1170         for (i = 0; i < 4; i++)
 1171                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
 1172 
 1173         /* Now program new ones. */
 1174         IF_ADDR_LOCK(ifp);
 1175         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1176                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1177                         continue;
 1178                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1179                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
 1180                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
 1181         }
 1182         IF_ADDR_UNLOCK(ifp);
 1183 
 1184         for (i = 0; i < 4; i++)
 1185                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
 1186 
 1187         return;
 1188 }
 1189 
 1190 /*
 1191  * Do endian, PCI and DMA initialization. Also check the on-board ROM
 1192  * self-test results.
 1193  */
 1194 static int
 1195 bge_chipinit(sc)
 1196         struct bge_softc *sc;
 1197 {
 1198         int                     i;
 1199         u_int32_t               dma_rw_ctl;
 1200 
 1201         /* Set endianness before we access any non-PCI registers. */
 1202 #if BYTE_ORDER == BIG_ENDIAN
 1203         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1204             BGE_BIGENDIAN_INIT, 4);
 1205 #else
 1206         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1207             BGE_LITTLEENDIAN_INIT, 4);
 1208 #endif
 1209 
 1210         /*
 1211          * Check the 'ROM failed' bit on the RX CPU to see if
 1212          * self-tests passed.
 1213          */
 1214         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
 1215                 printf("bge%d: RX CPU self-diagnostics failed!\n",
 1216                     sc->bge_unit);
 1217                 return(ENODEV);
 1218         }
 1219 
 1220         /* Clear the MAC control register */
 1221         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 1222 
 1223         /*
 1224          * Clear the MAC statistics block in the NIC's
 1225          * internal memory.
 1226          */
 1227         for (i = BGE_STATS_BLOCK;
 1228             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1229                 BGE_MEMWIN_WRITE(sc, i, 0);
 1230 
 1231         for (i = BGE_STATUS_BLOCK;
 1232             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1233                 BGE_MEMWIN_WRITE(sc, i, 0);
 1234 
 1235         /* Set up the PCI DMA control register. */
 1236         if (sc->bge_pcie) {
 1237                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1238                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1239                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1240         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
 1241             BGE_PCISTATE_PCI_BUSMODE) {
 1242                 /* Conventional PCI bus */
 1243                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1244                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1245                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1246                     (0x0F);
 1247         } else {
 1248                 /* PCI-X bus */
 1249                 /*
 1250                  * The 5704 uses a different encoding of read/write
 1251                  * watermarks.
 1252                  */
 1253                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1254                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1255                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1256                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1257                 else
 1258                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1259                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1260                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1261                             (0x0F);
 1262 
 1263                 /*
 1264                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
 1265                  * for hardware bugs.
 1266                  */
 1267                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1268                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 1269                         u_int32_t tmp;
 1270 
 1271                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
 1272                         if (tmp == 0x6 || tmp == 0x7)
 1273                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
 1274                 }
 1275         }
 1276 
 1277         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1278             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
 1279             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1280             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1281                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
 1282         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
 1283 
 1284         /*
 1285          * Set up general mode register.
 1286          */
 1287         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
 1288             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
 1289             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
 1290             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
 1291 
 1292         /*
 1293          * Disable memory write invalidate.  Apparently it is not supported
 1294          * properly by these devices.
 1295          */
 1296         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
 1297 
 1298 #ifdef __brokenalpha__
 1299         /*
 1300          * Must insure that we do not cross an 8K (bytes) boundary
 1301          * for DMA reads.  Our highest limit is 1K bytes.  This is a
 1302          * restriction on some ALPHA platforms with early revision
 1303          * 21174 PCI chipsets, such as the AlphaPC 164lx
 1304          */
 1305         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
 1306             BGE_PCI_READ_BNDRY_1024BYTES, 4);
 1307 #endif
 1308 
 1309         /* Set the timer prescaler (always 66Mhz) */
 1310         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
 1311 
 1312         return(0);
 1313 }
 1314 
 1315 static int
 1316 bge_blockinit(sc)
 1317         struct bge_softc *sc;
 1318 {
 1319         struct bge_rcb *rcb;
 1320         volatile struct bge_rcb *vrcb;
 1321         int i;
 1322 
 1323         /*
 1324          * Initialize the memory window pointer register so that
 1325          * we can access the first 32K of internal NIC RAM. This will
 1326          * allow us to set up the TX send ring RCBs and the RX return
 1327          * ring RCBs, plus other things which live in NIC memory.
 1328          */
 1329         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
 1330 
 1331         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
 1332 
 1333         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1334             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1335                 /* Configure mbuf memory pool */
 1336                 if (sc->bge_extram) {
 1337                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1338                             BGE_EXT_SSRAM);
 1339                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1340                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1341                         else
 1342                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1343                 } else {
 1344                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1345                             BGE_BUFFPOOL_1);
 1346                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1347                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1348                         else
 1349                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1350                 }
 1351 
 1352                 /* Configure DMA resource pool */
 1353                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
 1354                     BGE_DMA_DESCRIPTORS);
 1355                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
 1356         }
 1357 
 1358         /* Configure mbuf pool watermarks */
 1359         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1360             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 1361                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
 1362                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
 1363         } else {
 1364                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
 1365                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
 1366         }
 1367         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
 1368 
 1369         /* Configure DMA resource watermarks */
 1370         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
 1371         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
 1372 
 1373         /* Enable buffer manager */
 1374         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1375             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1376                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
 1377                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
 1378 
 1379                 /* Poll for buffer manager start indication */
 1380                 for (i = 0; i < BGE_TIMEOUT; i++) {
 1381                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
 1382                                 break;
 1383                         DELAY(10);
 1384                 }
 1385 
 1386                 if (i == BGE_TIMEOUT) {
 1387                         printf("bge%d: buffer manager failed to start\n",
 1388                             sc->bge_unit);
 1389                         return(ENXIO);
 1390                 }
 1391         }
 1392 
 1393         /* Enable flow-through queues */
 1394         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 1395         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 1396 
 1397         /* Wait until queue initialization is complete */
 1398         for (i = 0; i < BGE_TIMEOUT; i++) {
 1399                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
 1400                         break;
 1401                 DELAY(10);
 1402         }
 1403 
 1404         if (i == BGE_TIMEOUT) {
 1405                 printf("bge%d: flow-through queue init failed\n",
 1406                     sc->bge_unit);
 1407                 return(ENXIO);
 1408         }
 1409 
 1410         /* Initialize the standard RX ring control block */
 1411         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
 1412         rcb->bge_hostaddr.bge_addr_lo =
 1413             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
 1414         rcb->bge_hostaddr.bge_addr_hi =
 1415             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
 1416         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1417             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
 1418         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1419             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1420                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
 1421         else
 1422                 rcb->bge_maxlen_flags =
 1423                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
 1424         if (sc->bge_extram)
 1425                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
 1426         else
 1427                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
 1428         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
 1429         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
 1430 
 1431         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1432         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
 1433 
 1434         /*
 1435          * Initialize the jumbo RX ring control block
 1436          * We set the 'ring disabled' bit in the flags
 1437          * field until we're actually ready to start
 1438          * using this ring (i.e. once we set the MTU
 1439          * high enough to require it).
 1440          */
 1441         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1442             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1443                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1444 
 1445                 rcb->bge_hostaddr.bge_addr_lo =
 1446                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1447                 rcb->bge_hostaddr.bge_addr_hi =
 1448                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1449                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1450                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 1451                     BUS_DMASYNC_PREREAD);
 1452                 rcb->bge_maxlen_flags =
 1453                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
 1454                     BGE_RCB_FLAG_RING_DISABLED);
 1455                 if (sc->bge_extram)
 1456                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
 1457                 else
 1458                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
 1459                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
 1460                     rcb->bge_hostaddr.bge_addr_hi);
 1461                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
 1462                     rcb->bge_hostaddr.bge_addr_lo);
 1463 
 1464                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
 1465                     rcb->bge_maxlen_flags);
 1466                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
 1467 
 1468                 /* Set up dummy disabled mini ring RCB */
 1469                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
 1470                 rcb->bge_maxlen_flags =
 1471                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1472                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
 1473                     rcb->bge_maxlen_flags);
 1474         }
 1475 
 1476         /*
 1477          * Set the BD ring replentish thresholds. The recommended
 1478          * values are 1/8th the number of descriptors allocated to
 1479          * each ring.
 1480          */
 1481         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
 1482         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
 1483 
 1484         /*
 1485          * Disable all unused send rings by setting the 'ring disabled'
 1486          * bit in the flags field of all the TX send ring control blocks.
 1487          * These are located in NIC memory.
 1488          */
 1489         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1490             BGE_SEND_RING_RCB);
 1491         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
 1492                 vrcb->bge_maxlen_flags =
 1493                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1494                 vrcb->bge_nicaddr = 0;
 1495                 vrcb++;
 1496         }
 1497 
 1498         /* Configure TX RCB 0 (we use only the first ring) */
 1499         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1500             BGE_SEND_RING_RCB);
 1501         vrcb->bge_hostaddr.bge_addr_lo =
 1502             htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
 1503         vrcb->bge_hostaddr.bge_addr_hi =
 1504             htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
 1505         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
 1506         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1507             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1508                 vrcb->bge_maxlen_flags =
 1509                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
 1510 
 1511         /* Disable all unused RX return rings */
 1512         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1513             BGE_RX_RETURN_RING_RCB);
 1514         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
 1515                 vrcb->bge_hostaddr.bge_addr_hi = 0;
 1516                 vrcb->bge_hostaddr.bge_addr_lo = 0;
 1517                 vrcb->bge_maxlen_flags =
 1518                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
 1519                     BGE_RCB_FLAG_RING_DISABLED);
 1520                 vrcb->bge_nicaddr = 0;
 1521                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
 1522                     (i * (sizeof(u_int64_t))), 0);
 1523                 vrcb++;
 1524         }
 1525 
 1526         /* Initialize RX ring indexes */
 1527         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
 1528         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
 1529         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
 1530 
 1531         /*
 1532          * Set up RX return ring 0
 1533          * Note that the NIC address for RX return rings is 0x00000000.
 1534          * The return rings live entirely within the host, so the
 1535          * nicaddr field in the RCB isn't used.
 1536          */
 1537         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1538             BGE_RX_RETURN_RING_RCB);
 1539         vrcb->bge_hostaddr.bge_addr_lo =
 1540             BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
 1541         vrcb->bge_hostaddr.bge_addr_hi =
 1542             BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
 1543         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 1544             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 1545         vrcb->bge_nicaddr = 0x00000000;
 1546         vrcb->bge_maxlen_flags =
 1547             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
 1548 
 1549         /* Set random backoff seed for TX */
 1550         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
 1551             IFP2ENADDR(sc->bge_ifp)[0] + IFP2ENADDR(sc->bge_ifp)[1] +
 1552             IFP2ENADDR(sc->bge_ifp)[2] + IFP2ENADDR(sc->bge_ifp)[3] +
 1553             IFP2ENADDR(sc->bge_ifp)[4] + IFP2ENADDR(sc->bge_ifp)[5] +
 1554             BGE_TX_BACKOFF_SEED_MASK);
 1555 
 1556         /* Set inter-packet gap */
 1557         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
 1558 
 1559         /*
 1560          * Specify which ring to use for packets that don't match
 1561          * any RX rules.
 1562          */
 1563         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
 1564 
 1565         /*
 1566          * Configure number of RX lists. One interrupt distribution
 1567          * list, sixteen active lists, one bad frames class.
 1568          */
 1569         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
 1570 
 1571         /* Inialize RX list placement stats mask. */
 1572         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
 1573         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
 1574 
 1575         /* Disable host coalescing until we get it set up */
 1576         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
 1577 
 1578         /* Poll to make sure it's shut down. */
 1579         for (i = 0; i < BGE_TIMEOUT; i++) {
 1580                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
 1581                         break;
 1582                 DELAY(10);
 1583         }
 1584 
 1585         if (i == BGE_TIMEOUT) {
 1586                 printf("bge%d: host coalescing engine failed to idle\n",
 1587                     sc->bge_unit);
 1588                 return(ENXIO);
 1589         }
 1590 
 1591         /* Set up host coalescing defaults */
 1592         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
 1593         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
 1594         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
 1595         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
 1596         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1597             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1598                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
 1599                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
 1600         }
 1601         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
 1602         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
 1603 
 1604         /* Set up address of statistics block */
 1605         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1606             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1607                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
 1608                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
 1609                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
 1610                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
 1611                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
 1612                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
 1613                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
 1614         }
 1615 
 1616         /* Set up address of status block */
 1617         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
 1618             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
 1619         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
 1620             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
 1621         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 1622             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 1623         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
 1624         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
 1625 
 1626         /* Turn on host coalescing state machine */
 1627         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 1628 
 1629         /* Turn on RX BD completion state machine and enable attentions */
 1630         CSR_WRITE_4(sc, BGE_RBDC_MODE,
 1631             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
 1632 
 1633         /* Turn on RX list placement state machine */
 1634         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 1635 
 1636         /* Turn on RX list selector state machine. */
 1637         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1638             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1639                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 1640 
 1641         /* Turn on DMA, clear stats */
 1642         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
 1643             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
 1644             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
 1645             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
 1646             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
 1647 
 1648         /* Set misc. local control, enable interrupts on attentions */
 1649         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
 1650 
 1651 #ifdef notdef
 1652         /* Assert GPIO pins for PHY reset */
 1653         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
 1654             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
 1655         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
 1656             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
 1657 #endif
 1658 
 1659         /* Turn on DMA completion state machine */
 1660         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1661             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1662                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 1663 
 1664         /* Turn on write DMA state machine */
 1665         CSR_WRITE_4(sc, BGE_WDMA_MODE,
 1666             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
 1667 
 1668         /* Turn on read DMA state machine */
 1669         CSR_WRITE_4(sc, BGE_RDMA_MODE,
 1670             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
 1671 
 1672         /* Turn on RX data completion state machine */
 1673         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 1674 
 1675         /* Turn on RX BD initiator state machine */
 1676         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 1677 
 1678         /* Turn on RX data and RX BD initiator state machine */
 1679         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
 1680 
 1681         /* Turn on Mbuf cluster free state machine */
 1682         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1683             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1684                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 1685 
 1686         /* Turn on send BD completion state machine */
 1687         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 1688 
 1689         /* Turn on send data completion state machine */
 1690         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 1691 
 1692         /* Turn on send data initiator state machine */
 1693         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 1694 
 1695         /* Turn on send BD initiator state machine */
 1696         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 1697 
 1698         /* Turn on send BD selector state machine */
 1699         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 1700 
 1701         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
 1702         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
 1703             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
 1704 
 1705         /* ack/clear link change events */
 1706         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 1707             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 1708             BGE_MACSTAT_LINK_CHANGED);
 1709         CSR_WRITE_4(sc, BGE_MI_STS, 0);
 1710 
 1711         /* Enable PHY auto polling (for MII/GMII only) */
 1712         if (sc->bge_tbi) {
 1713                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
 1714         } else {
 1715                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
 1716                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
 1717                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 1718                             BGE_EVTENB_MI_INTERRUPT);
 1719         }
 1720 
 1721         /* Enable link state change attentions. */
 1722         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
 1723 
 1724         return(0);
 1725 }
 1726 
 1727 /*
 1728  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
 1729  * against our list and return its name if we find a match. Note
 1730  * that since the Broadcom controller contains VPD support, we
 1731  * can get the device name string from the controller itself instead
 1732  * of the compiled-in string. This is a little slow, but it guarantees
 1733  * we'll always announce the right product name.
 1734  */
 1735 static int
 1736 bge_probe(dev)
 1737         device_t dev;
 1738 {
 1739         struct bge_type *t;
 1740         struct bge_softc *sc;
 1741         char *descbuf;
 1742 
 1743         t = bge_devs;
 1744 
 1745         sc = device_get_softc(dev);
 1746         bzero(sc, sizeof(struct bge_softc));
 1747         sc->bge_unit = device_get_unit(dev);
 1748         sc->bge_dev = dev;
 1749 
 1750         while(t->bge_name != NULL) {
 1751                 if ((pci_get_vendor(dev) == t->bge_vid) &&
 1752                     (pci_get_device(dev) == t->bge_did)) {
 1753 #ifdef notdef
 1754                         bge_vpd_read(sc);
 1755                         device_set_desc(dev, sc->bge_vpd_prodname);
 1756 #endif
 1757                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
 1758                         if (descbuf == NULL)
 1759                                 return(ENOMEM);
 1760                         snprintf(descbuf, BGE_DEVDESC_MAX,
 1761                             "%s, ASIC rev. %#04x", t->bge_name,
 1762                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
 1763                         device_set_desc_copy(dev, descbuf);
 1764                         if (pci_get_subvendor(dev) == DELL_VENDORID)
 1765                                 sc->bge_no_3_led = 1;
 1766                         free(descbuf, M_TEMP);
 1767                         return(0);
 1768                 }
 1769                 t++;
 1770         }
 1771 
 1772         return(ENXIO);
 1773 }
 1774 
 1775 static void
 1776 bge_dma_free(sc)
 1777         struct bge_softc *sc;
 1778 {
 1779         int i;
 1780 
 1781 
 1782         /* Destroy DMA maps for RX buffers */
 1783 
 1784         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1785                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
 1786                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1787                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1788         }
 1789 
 1790         /* Destroy DMA maps for jumbo RX buffers */
 1791 
 1792         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1793                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
 1794                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
 1795                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1796         }
 1797 
 1798         /* Destroy DMA maps for TX buffers */
 1799 
 1800         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1801                 if (sc->bge_cdata.bge_tx_dmamap[i])
 1802                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1803                             sc->bge_cdata.bge_tx_dmamap[i]);
 1804         }
 1805 
 1806         if (sc->bge_cdata.bge_mtag)
 1807                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
 1808 
 1809 
 1810         /* Destroy standard RX ring */
 1811 
 1812         if (sc->bge_ldata.bge_rx_std_ring)
 1813                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
 1814                     sc->bge_ldata.bge_rx_std_ring,
 1815                     sc->bge_cdata.bge_rx_std_ring_map);
 1816 
 1817         if (sc->bge_cdata.bge_rx_std_ring_map) {
 1818                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
 1819                     sc->bge_cdata.bge_rx_std_ring_map);
 1820                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
 1821                     sc->bge_cdata.bge_rx_std_ring_map);
 1822         }
 1823 
 1824         if (sc->bge_cdata.bge_rx_std_ring_tag)
 1825                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
 1826 
 1827         /* Destroy jumbo RX ring */
 1828 
 1829         if (sc->bge_ldata.bge_rx_jumbo_ring)
 1830                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1831                     sc->bge_ldata.bge_rx_jumbo_ring,
 1832                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1833 
 1834         if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
 1835                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1836                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1837                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1838                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1839         }
 1840 
 1841         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
 1842                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
 1843 
 1844         /* Destroy RX return ring */
 1845 
 1846         if (sc->bge_ldata.bge_rx_return_ring)
 1847                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
 1848                     sc->bge_ldata.bge_rx_return_ring,
 1849                     sc->bge_cdata.bge_rx_return_ring_map);
 1850 
 1851         if (sc->bge_cdata.bge_rx_return_ring_map) {
 1852                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
 1853                     sc->bge_cdata.bge_rx_return_ring_map);
 1854                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
 1855                     sc->bge_cdata.bge_rx_return_ring_map);
 1856         }
 1857 
 1858         if (sc->bge_cdata.bge_rx_return_ring_tag)
 1859                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
 1860 
 1861         /* Destroy TX ring */
 1862 
 1863         if (sc->bge_ldata.bge_tx_ring)
 1864                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
 1865                     sc->bge_ldata.bge_tx_ring,
 1866                     sc->bge_cdata.bge_tx_ring_map);
 1867 
 1868         if (sc->bge_cdata.bge_tx_ring_map) {
 1869                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
 1870                     sc->bge_cdata.bge_tx_ring_map);
 1871                 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
 1872                     sc->bge_cdata.bge_tx_ring_map);
 1873         }
 1874 
 1875         if (sc->bge_cdata.bge_tx_ring_tag)
 1876                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
 1877 
 1878         /* Destroy status block */
 1879 
 1880         if (sc->bge_ldata.bge_status_block)
 1881                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
 1882                     sc->bge_ldata.bge_status_block,
 1883                     sc->bge_cdata.bge_status_map);
 1884 
 1885         if (sc->bge_cdata.bge_status_map) {
 1886                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
 1887                     sc->bge_cdata.bge_status_map);
 1888                 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
 1889                     sc->bge_cdata.bge_status_map);
 1890         }
 1891 
 1892         if (sc->bge_cdata.bge_status_tag)
 1893                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
 1894 
 1895         /* Destroy statistics block */
 1896 
 1897         if (sc->bge_ldata.bge_stats)
 1898                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
 1899                     sc->bge_ldata.bge_stats,
 1900                     sc->bge_cdata.bge_stats_map);
 1901 
 1902         if (sc->bge_cdata.bge_stats_map) {
 1903                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
 1904                     sc->bge_cdata.bge_stats_map);
 1905                 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
 1906                     sc->bge_cdata.bge_stats_map);
 1907         }
 1908 
 1909         if (sc->bge_cdata.bge_stats_tag)
 1910                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
 1911 
 1912         /* Destroy the parent tag */
 1913 
 1914         if (sc->bge_cdata.bge_parent_tag)
 1915                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
 1916 
 1917         return;
 1918 }
 1919 
 1920 static int
 1921 bge_dma_alloc(dev)
 1922         device_t dev;
 1923 {
 1924         struct bge_softc *sc;
 1925         int nseg, i, error;
 1926         struct bge_dmamap_arg ctx;
 1927 
 1928         sc = device_get_softc(dev);
 1929 
 1930         /*
 1931          * Allocate the parent bus DMA tag appropriate for PCI.
 1932          */
 1933 #define BGE_NSEG_NEW 32
 1934         error = bus_dma_tag_create(NULL,        /* parent */
 1935                         PAGE_SIZE, 0,           /* alignment, boundary */
 1936                         BUS_SPACE_MAXADDR,      /* lowaddr */
 1937                         BUS_SPACE_MAXADDR,      /* highaddr */
 1938                         NULL, NULL,             /* filter, filterarg */
 1939                         MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
 1940                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
 1941                         0,                      /* flags */
 1942                         NULL, NULL,             /* lockfunc, lockarg */
 1943                         &sc->bge_cdata.bge_parent_tag);
 1944 
 1945         /*
 1946          * Create tag for RX mbufs.
 1947          */
 1948         nseg = 32;
 1949         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
 1950             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1951             NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL,
 1952             &sc->bge_cdata.bge_mtag);
 1953 
 1954         if (error) {
 1955                 device_printf(dev, "could not allocate dma tag\n");
 1956                 return (ENOMEM);
 1957         }
 1958 
 1959         /* Create DMA maps for RX buffers */
 1960 
 1961         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1962                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1963                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
 1964                 if (error) {
 1965                         device_printf(dev, "can't create DMA map for RX\n");
 1966                         return(ENOMEM);
 1967                 }
 1968         }
 1969 
 1970         /* Create DMA maps for TX buffers */
 1971 
 1972         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1973                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1974                             &sc->bge_cdata.bge_tx_dmamap[i]);
 1975                 if (error) {
 1976                         device_printf(dev, "can't create DMA map for RX\n");
 1977                         return(ENOMEM);
 1978                 }
 1979         }
 1980 
 1981         /* Create tag for standard RX ring */
 1982 
 1983         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1984             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1985             NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
 1986             NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
 1987 
 1988         if (error) {
 1989                 device_printf(dev, "could not allocate dma tag\n");
 1990                 return (ENOMEM);
 1991         }
 1992 
 1993         /* Allocate DMA'able memory for standard RX ring */
 1994 
 1995         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
 1996             (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
 1997             &sc->bge_cdata.bge_rx_std_ring_map);
 1998         if (error)
 1999                 return (ENOMEM);
 2000 
 2001         bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
 2002 
 2003         /* Load the address of the standard RX ring */
 2004 
 2005         ctx.bge_maxsegs = 1;
 2006         ctx.sc = sc;
 2007 
 2008         error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
 2009             sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
 2010             BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2011 
 2012         if (error)
 2013                 return (ENOMEM);
 2014 
 2015         sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
 2016 
 2017         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2018             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2019 
 2020                 /*
 2021                  * Create tag for jumbo mbufs.
 2022                  * This is really a bit of a kludge. We allocate a special
 2023                  * jumbo buffer pool which (thanks to the way our DMA
 2024                  * memory allocation works) will consist of contiguous
 2025                  * pages. This means that even though a jumbo buffer might
 2026                  * be larger than a page size, we don't really need to
 2027                  * map it into more than one DMA segment. However, the
 2028                  * default mbuf tag will result in multi-segment mappings,
 2029                  * so we have to create a special jumbo mbuf tag that
 2030                  * lets us get away with mapping the jumbo buffers as
 2031                  * a single segment. I think eventually the driver should
 2032                  * be changed so that it uses ordinary mbufs and cluster
 2033                  * buffers, i.e. jumbo frames can span multiple DMA
 2034                  * descriptors. But that's a project for another day.
 2035                  */
 2036 
 2037                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2038                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2039                     NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
 2040                     &sc->bge_cdata.bge_mtag_jumbo);
 2041 
 2042                 if (error) {
 2043                         device_printf(dev, "could not allocate dma tag\n");
 2044                         return (ENOMEM);
 2045                 }
 2046 
 2047                 /* Create tag for jumbo RX ring */
 2048 
 2049                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2050                     PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2051                     NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
 2052                     NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
 2053 
 2054                 if (error) {
 2055                         device_printf(dev, "could not allocate dma tag\n");
 2056                         return (ENOMEM);
 2057                 }
 2058 
 2059                 /* Allocate DMA'able memory for jumbo RX ring */
 2060 
 2061                 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2062                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
 2063                     &sc->bge_cdata.bge_rx_jumbo_ring_map);
 2064                 if (error)
 2065                         return (ENOMEM);
 2066 
 2067                 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
 2068                     BGE_JUMBO_RX_RING_SZ);
 2069 
 2070                 /* Load the address of the jumbo RX ring */
 2071 
 2072                 ctx.bge_maxsegs = 1;
 2073                 ctx.sc = sc;
 2074 
 2075                 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2076                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2077                     sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
 2078                     bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2079 
 2080                 if (error)
 2081                         return (ENOMEM);
 2082 
 2083                 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
 2084 
 2085                 /* Create DMA maps for jumbo RX buffers */
 2086 
 2087                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 2088                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
 2089                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 2090                         if (error) {
 2091                                 device_printf(dev,
 2092                                     "can't create DMA map for RX\n");
 2093                                 return(ENOMEM);
 2094                         }
 2095                 }
 2096 
 2097         }
 2098 
 2099         /* Create tag for RX return ring */
 2100 
 2101         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2102             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2103             NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
 2104             NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
 2105 
 2106         if (error) {
 2107                 device_printf(dev, "could not allocate dma tag\n");
 2108                 return (ENOMEM);
 2109         }
 2110 
 2111         /* Allocate DMA'able memory for RX return ring */
 2112 
 2113         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
 2114             (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
 2115             &sc->bge_cdata.bge_rx_return_ring_map);
 2116         if (error)
 2117                 return (ENOMEM);
 2118 
 2119         bzero((char *)sc->bge_ldata.bge_rx_return_ring,
 2120             BGE_RX_RTN_RING_SZ(sc));
 2121 
 2122         /* Load the address of the RX return ring */
 2123 
 2124         ctx.bge_maxsegs = 1;
 2125         ctx.sc = sc;
 2126 
 2127         error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
 2128             sc->bge_cdata.bge_rx_return_ring_map,
 2129             sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
 2130             bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2131 
 2132         if (error)
 2133                 return (ENOMEM);
 2134 
 2135         sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
 2136 
 2137         /* Create tag for TX ring */
 2138 
 2139         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2140             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2141             NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
 2142             &sc->bge_cdata.bge_tx_ring_tag);
 2143 
 2144         if (error) {
 2145                 device_printf(dev, "could not allocate dma tag\n");
 2146                 return (ENOMEM);
 2147         }
 2148 
 2149         /* Allocate DMA'able memory for TX ring */
 2150 
 2151         error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
 2152             (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
 2153             &sc->bge_cdata.bge_tx_ring_map);
 2154         if (error)
 2155                 return (ENOMEM);
 2156 
 2157         bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
 2158 
 2159         /* Load the address of the TX ring */
 2160 
 2161         ctx.bge_maxsegs = 1;
 2162         ctx.sc = sc;
 2163 
 2164         error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
 2165             sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
 2166             BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2167 
 2168         if (error)
 2169                 return (ENOMEM);
 2170 
 2171         sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
 2172 
 2173         /* Create tag for status block */
 2174 
 2175         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2176             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2177             NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
 2178             NULL, NULL, &sc->bge_cdata.bge_status_tag);
 2179 
 2180         if (error) {
 2181                 device_printf(dev, "could not allocate dma tag\n");
 2182                 return (ENOMEM);
 2183         }
 2184 
 2185         /* Allocate DMA'able memory for status block */
 2186 
 2187         error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
 2188             (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
 2189             &sc->bge_cdata.bge_status_map);
 2190         if (error)
 2191                 return (ENOMEM);
 2192 
 2193         bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
 2194 
 2195         /* Load the address of the status block */
 2196 
 2197         ctx.sc = sc;
 2198         ctx.bge_maxsegs = 1;
 2199 
 2200         error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
 2201             sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
 2202             BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2203 
 2204         if (error)
 2205                 return (ENOMEM);
 2206 
 2207         sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
 2208 
 2209         /* Create tag for statistics block */
 2210 
 2211         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2212             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2213             NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
 2214             &sc->bge_cdata.bge_stats_tag);
 2215 
 2216         if (error) {
 2217                 device_printf(dev, "could not allocate dma tag\n");
 2218                 return (ENOMEM);
 2219         }
 2220 
 2221         /* Allocate DMA'able memory for statistics block */
 2222 
 2223         error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
 2224             (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
 2225             &sc->bge_cdata.bge_stats_map);
 2226         if (error)
 2227                 return (ENOMEM);
 2228 
 2229         bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
 2230 
 2231         /* Load the address of the statstics block */
 2232 
 2233         ctx.sc = sc;
 2234         ctx.bge_maxsegs = 1;
 2235 
 2236         error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
 2237             sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
 2238             BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2239 
 2240         if (error)
 2241                 return (ENOMEM);
 2242 
 2243         sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
 2244 
 2245         return(0);
 2246 }
 2247 
 2248 static int
 2249 bge_attach(dev)
 2250         device_t dev;
 2251 {
 2252         struct ifnet *ifp;
 2253         struct bge_softc *sc;
 2254         u_int32_t hwcfg = 0;
 2255         u_int32_t mac_tmp = 0;
 2256         u_char eaddr[6];
 2257         int unit, error = 0, rid;
 2258 
 2259         sc = device_get_softc(dev);
 2260         unit = device_get_unit(dev);
 2261         sc->bge_dev = dev;
 2262         sc->bge_unit = unit;
 2263 
 2264         /*
 2265          * Map control/status registers.
 2266          */
 2267         pci_enable_busmaster(dev);
 2268 
 2269         rid = BGE_PCI_BAR0;
 2270         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 2271             RF_ACTIVE|PCI_RF_DENSE);
 2272 
 2273         if (sc->bge_res == NULL) {
 2274                 printf ("bge%d: couldn't map memory\n", unit);
 2275                 error = ENXIO;
 2276                 goto fail;
 2277         }
 2278 
 2279         sc->bge_btag = rman_get_bustag(sc->bge_res);
 2280         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
 2281         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
 2282 
 2283         /* Allocate interrupt */
 2284         rid = 0;
 2285 
 2286         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 2287             RF_SHAREABLE | RF_ACTIVE);
 2288 
 2289         if (sc->bge_irq == NULL) {
 2290                 printf("bge%d: couldn't map interrupt\n", unit);
 2291                 error = ENXIO;
 2292                 goto fail;
 2293         }
 2294 
 2295         sc->bge_unit = unit;
 2296 
 2297         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
 2298 
 2299         /* Save ASIC rev. */
 2300 
 2301         sc->bge_chipid =
 2302             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
 2303             BGE_PCIMISCCTL_ASICREV;
 2304         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
 2305         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
 2306 
 2307         /*
 2308          * Treat the 5714 like the 5750 until we have more info
 2309          * on this chip.
 2310          */
 2311         if (sc->bge_asicrev == BGE_ASICREV_BCM5714)
 2312                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
 2313 
 2314         /*
 2315          * XXX: Broadcom Linux driver.  Not in specs or eratta.
 2316          * PCI-Express?
 2317          */
 2318         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 2319                 u_int32_t v;
 2320 
 2321                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
 2322                 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
 2323                         v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
 2324                         if ((v & 0xff) == BGE_PCIE_CAPID)
 2325                                 sc->bge_pcie = 1;
 2326                 }
 2327         }
 2328 
 2329         /* Try to reset the chip. */
 2330         bge_reset(sc);
 2331 
 2332         if (bge_chipinit(sc)) {
 2333                 printf("bge%d: chip initialization failed\n", sc->bge_unit);
 2334                 bge_release_resources(sc);
 2335                 error = ENXIO;
 2336                 goto fail;
 2337         }
 2338 
 2339         /*
 2340          * Get station address from the EEPROM.
 2341          */
 2342         mac_tmp = bge_readmem_ind(sc, 0x0c14);
 2343         if ((mac_tmp >> 16) == 0x484b) {
 2344                 eaddr[0] = (u_char)(mac_tmp >> 8);
 2345                 eaddr[1] = (u_char)mac_tmp;
 2346                 mac_tmp = bge_readmem_ind(sc, 0x0c18);
 2347                 eaddr[2] = (u_char)(mac_tmp >> 24);
 2348                 eaddr[3] = (u_char)(mac_tmp >> 16);
 2349                 eaddr[4] = (u_char)(mac_tmp >> 8);
 2350                 eaddr[5] = (u_char)mac_tmp;
 2351         } else if (bge_read_eeprom(sc, eaddr,
 2352             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
 2353                 printf("bge%d: failed to read station address\n", unit);
 2354                 bge_release_resources(sc);
 2355                 error = ENXIO;
 2356                 goto fail;
 2357         }
 2358 
 2359         /* 5705 limits RX return ring to 512 entries. */
 2360         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 2361             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 2362                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
 2363         else
 2364                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
 2365 
 2366         if (bge_dma_alloc(dev)) {
 2367                 printf ("bge%d: failed to allocate DMA resources\n",
 2368                     sc->bge_unit);
 2369                 bge_release_resources(sc);
 2370                 error = ENXIO;
 2371                 goto fail;
 2372         }
 2373 
 2374         /*
 2375          * Try to allocate memory for jumbo buffers.
 2376          * The 5705 does not appear to support jumbo frames.
 2377          */
 2378         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2379             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2380                 if (bge_alloc_jumbo_mem(sc)) {
 2381                         printf("bge%d: jumbo buffer allocation "
 2382                             "failed\n", sc->bge_unit);
 2383                         bge_release_resources(sc);
 2384                         error = ENXIO;
 2385                         goto fail;
 2386                 }
 2387         }
 2388 
 2389         /* Set default tuneable values. */
 2390         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
 2391         sc->bge_rx_coal_ticks = 150;
 2392         sc->bge_tx_coal_ticks = 150;
 2393         sc->bge_rx_max_coal_bds = 64;
 2394         sc->bge_tx_max_coal_bds = 128;
 2395 
 2396         /* Set up ifnet structure */
 2397         ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
 2398         if (ifp == NULL) {
 2399                 printf("bge%d: failed to if_alloc()\n", sc->bge_unit);
 2400                 bge_release_resources(sc);
 2401                 error = ENXIO;
 2402                 goto fail;
 2403         }
 2404         ifp->if_softc = sc;
 2405         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2406         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2407         ifp->if_ioctl = bge_ioctl;
 2408         ifp->if_start = bge_start;
 2409         ifp->if_watchdog = bge_watchdog;
 2410         ifp->if_init = bge_init;
 2411         ifp->if_mtu = ETHERMTU;
 2412         ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
 2413         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
 2414         IFQ_SET_READY(&ifp->if_snd);
 2415         ifp->if_hwassist = BGE_CSUM_FEATURES;
 2416         /* NB: the code for RX csum offload is disabled for now */
 2417         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
 2418             IFCAP_VLAN_MTU;
 2419         ifp->if_capenable = ifp->if_capabilities;
 2420 
 2421         /*
 2422          * Figure out what sort of media we have by checking the
 2423          * hardware config word in the first 32k of NIC internal memory,
 2424          * or fall back to examining the EEPROM if necessary.
 2425          * Note: on some BCM5700 cards, this value appears to be unset.
 2426          * If that's the case, we have to rely on identifying the NIC
 2427          * by its PCI subsystem ID, as we do below for the SysKonnect
 2428          * SK-9D41.
 2429          */
 2430         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
 2431                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
 2432         else {
 2433                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
 2434                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
 2435                 hwcfg = ntohl(hwcfg);
 2436         }
 2437 
 2438         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
 2439                 sc->bge_tbi = 1;
 2440 
 2441         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
 2442         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
 2443                 sc->bge_tbi = 1;
 2444 
 2445         if (sc->bge_tbi) {
 2446                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
 2447                     bge_ifmedia_upd, bge_ifmedia_sts);
 2448                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
 2449                 ifmedia_add(&sc->bge_ifmedia,
 2450                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
 2451                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
 2452                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
 2453                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
 2454         } else {
 2455                 /*
 2456                  * Do transceiver setup.
 2457                  */
 2458                 if (mii_phy_probe(dev, &sc->bge_miibus,
 2459                     bge_ifmedia_upd, bge_ifmedia_sts)) {
 2460                         printf("bge%d: MII without any PHY!\n", sc->bge_unit);
 2461                         bge_release_resources(sc);
 2462                         bge_free_jumbo_mem(sc);
 2463                         error = ENXIO;
 2464                         goto fail;
 2465                 }
 2466         }
 2467 
 2468         /*
 2469          * When using the BCM5701 in PCI-X mode, data corruption has
 2470          * been observed in the first few bytes of some received packets.
 2471          * Aligning the packet buffer in memory eliminates the corruption.
 2472          * Unfortunately, this misaligns the packet payloads.  On platforms
 2473          * which do not support unaligned accesses, we will realign the
 2474          * payloads by copying the received packets.
 2475          */
 2476         switch (sc->bge_chipid) {
 2477         case BGE_CHIPID_BCM5701_A0:
 2478         case BGE_CHIPID_BCM5701_B0:
 2479         case BGE_CHIPID_BCM5701_B2:
 2480         case BGE_CHIPID_BCM5701_B5:
 2481                 /* If in PCI-X mode, work around the alignment bug. */
 2482                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
 2483                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
 2484                     BGE_PCISTATE_PCI_BUSSPEED)
 2485                         sc->bge_rx_alignment_bug = 1;
 2486                 break;
 2487         }
 2488 
 2489         /*
 2490          * Call MI attach routine.
 2491          */
 2492         ether_ifattach(ifp, eaddr);
 2493         callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
 2494 
 2495         /*
 2496          * Hookup IRQ last.
 2497          */
 2498         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
 2499            bge_intr, sc, &sc->bge_intrhand);
 2500 
 2501         if (error) {
 2502                 bge_detach(dev);
 2503                 printf("bge%d: couldn't set up irq\n", unit);
 2504         }
 2505 
 2506 fail:
 2507         return(error);
 2508 }
 2509 
 2510 static int
 2511 bge_detach(dev)
 2512         device_t dev;
 2513 {
 2514         struct bge_softc *sc;
 2515         struct ifnet *ifp;
 2516 
 2517         sc = device_get_softc(dev);
 2518         ifp = sc->bge_ifp;
 2519 
 2520         BGE_LOCK(sc);
 2521         bge_stop(sc);
 2522         bge_reset(sc);
 2523         BGE_UNLOCK(sc);
 2524 
 2525         ether_ifdetach(ifp);
 2526 
 2527         if (sc->bge_tbi) {
 2528                 ifmedia_removeall(&sc->bge_ifmedia);
 2529         } else {
 2530                 bus_generic_detach(dev);
 2531                 device_delete_child(dev, sc->bge_miibus);
 2532         }
 2533 
 2534         bge_release_resources(sc);
 2535         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2536             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 2537                 bge_free_jumbo_mem(sc);
 2538 
 2539         return(0);
 2540 }
 2541 
 2542 static void
 2543 bge_release_resources(sc)
 2544         struct bge_softc *sc;
 2545 {
 2546         device_t dev;
 2547 
 2548         dev = sc->bge_dev;
 2549 
 2550         if (sc->bge_vpd_prodname != NULL)
 2551                 free(sc->bge_vpd_prodname, M_DEVBUF);
 2552 
 2553         if (sc->bge_vpd_readonly != NULL)
 2554                 free(sc->bge_vpd_readonly, M_DEVBUF);
 2555 
 2556         if (sc->bge_intrhand != NULL)
 2557                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
 2558 
 2559         if (sc->bge_irq != NULL)
 2560                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
 2561 
 2562         if (sc->bge_res != NULL)
 2563                 bus_release_resource(dev, SYS_RES_MEMORY,
 2564                     BGE_PCI_BAR0, sc->bge_res);
 2565 
 2566         if (sc->bge_ifp != NULL)
 2567                 if_free(sc->bge_ifp);
 2568 
 2569         bge_dma_free(sc);
 2570 
 2571         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
 2572                 BGE_LOCK_DESTROY(sc);
 2573 
 2574         return;
 2575 }
 2576 
 2577 static void
 2578 bge_reset(sc)
 2579         struct bge_softc *sc;
 2580 {
 2581         device_t dev;
 2582         u_int32_t cachesize, command, pcistate, reset;
 2583         int i, val = 0;
 2584 
 2585         dev = sc->bge_dev;
 2586 
 2587         /* Save some important PCI state. */
 2588         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
 2589         command = pci_read_config(dev, BGE_PCI_CMD, 4);
 2590         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
 2591 
 2592         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2593             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2594             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2595 
 2596         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
 2597 
 2598         /* XXX: Broadcom Linux driver. */
 2599         if (sc->bge_pcie) {
 2600                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
 2601                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
 2602                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2603                         /* Prevent PCIE link training during global reset */
 2604                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
 2605                         reset |= (1<<29);
 2606                 }
 2607         }
 2608 
 2609         /* Issue global reset */
 2610         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
 2611 
 2612         DELAY(1000);
 2613 
 2614         /* XXX: Broadcom Linux driver. */
 2615         if (sc->bge_pcie) {
 2616                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
 2617                         uint32_t v;
 2618 
 2619                         DELAY(500000); /* wait for link training to complete */
 2620                         v = pci_read_config(dev, 0xc4, 4);
 2621                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
 2622                 }
 2623                 /* Set PCIE max payload size and clear error status. */
 2624                 pci_write_config(dev, 0xd8, 0xf5000, 4);
 2625         }
 2626 
 2627         /* Reset some of the PCI state that got zapped by reset */
 2628         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2629             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2630             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2631         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
 2632         pci_write_config(dev, BGE_PCI_CMD, command, 4);
 2633         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
 2634 
 2635         /* Enable memory arbiter. */
 2636         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2637             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 2638                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 2639 
 2640         /*
 2641          * Prevent PXE restart: write a magic number to the
 2642          * general communications memory at 0xB50.
 2643          */
 2644         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
 2645         /*
 2646          * Poll the value location we just wrote until
 2647          * we see the 1's complement of the magic number.
 2648          * This indicates that the firmware initialization
 2649          * is complete.
 2650          */
 2651         for (i = 0; i < BGE_TIMEOUT; i++) {
 2652                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
 2653                 if (val == ~BGE_MAGIC_NUMBER)
 2654                         break;
 2655                 DELAY(10);
 2656         }
 2657 
 2658         if (i == BGE_TIMEOUT) {
 2659                 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
 2660                 return;
 2661         }
 2662 
 2663         /*
 2664          * XXX Wait for the value of the PCISTATE register to
 2665          * return to its original pre-reset state. This is a
 2666          * fairly good indicator of reset completion. If we don't
 2667          * wait for the reset to fully complete, trying to read
 2668          * from the device's non-PCI registers may yield garbage
 2669          * results.
 2670          */
 2671         for (i = 0; i < BGE_TIMEOUT; i++) {
 2672                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
 2673                         break;
 2674                 DELAY(10);
 2675         }
 2676 
 2677         /* Fix up byte swapping */
 2678         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
 2679             BGE_MODECTL_BYTESWAP_DATA);
 2680 
 2681         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 2682 
 2683         /*
 2684          * The 5704 in TBI mode apparently needs some special
 2685          * adjustment to insure the SERDES drive level is set
 2686          * to 1.2V.
 2687          */
 2688         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
 2689                 uint32_t serdescfg;
 2690                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
 2691                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
 2692                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
 2693         }
 2694 
 2695         /* XXX: Broadcom Linux driver. */
 2696         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2697                 uint32_t v;
 2698 
 2699                 v = CSR_READ_4(sc, 0x7c00);
 2700                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
 2701         }
 2702         DELAY(10000);
 2703 
 2704         return;
 2705 }
 2706 
 2707 /*
 2708  * Frame reception handling. This is called if there's a frame
 2709  * on the receive return list.
 2710  *
 2711  * Note: we have to be able to handle two possibilities here:
 2712  * 1) the frame is from the jumbo recieve ring
 2713  * 2) the frame is from the standard receive ring
 2714  */
 2715 
 2716 static void
 2717 bge_rxeof(sc)
 2718         struct bge_softc *sc;
 2719 {
 2720         struct ifnet *ifp;
 2721         int stdcnt = 0, jumbocnt = 0;
 2722 
 2723         BGE_LOCK_ASSERT(sc);
 2724 
 2725         ifp = sc->bge_ifp;
 2726 
 2727         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2728             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
 2729         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2730             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
 2731         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2732             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2733                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2734                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2735                     BUS_DMASYNC_POSTREAD);
 2736         }
 2737 
 2738         while(sc->bge_rx_saved_considx !=
 2739             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
 2740                 struct bge_rx_bd        *cur_rx;
 2741                 u_int32_t               rxidx;
 2742                 struct ether_header     *eh;
 2743                 struct mbuf             *m = NULL;
 2744                 u_int16_t               vlan_tag = 0;
 2745                 int                     have_tag = 0;
 2746 
 2747                 cur_rx =
 2748             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
 2749 
 2750                 rxidx = cur_rx->bge_idx;
 2751                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
 2752 
 2753                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
 2754                         have_tag = 1;
 2755                         vlan_tag = cur_rx->bge_vlan_tag;
 2756                 }
 2757 
 2758                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
 2759                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
 2760                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
 2761                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
 2762                             BUS_DMASYNC_POSTREAD);
 2763                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 2764                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
 2765                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
 2766                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
 2767                         jumbocnt++;
 2768                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2769                                 ifp->if_ierrors++;
 2770                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2771                                 continue;
 2772                         }
 2773                         if (bge_newbuf_jumbo(sc,
 2774                             sc->bge_jumbo, NULL) == ENOBUFS) {
 2775                                 ifp->if_ierrors++;
 2776                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2777                                 continue;
 2778                         }
 2779                 } else {
 2780                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
 2781                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 2782                             sc->bge_cdata.bge_rx_std_dmamap[rxidx],
 2783                             BUS_DMASYNC_POSTREAD);
 2784                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2785                             sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
 2786                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
 2787                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
 2788                         stdcnt++;
 2789                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2790                                 ifp->if_ierrors++;
 2791                                 bge_newbuf_std(sc, sc->bge_std, m);
 2792                                 continue;
 2793                         }
 2794                         if (bge_newbuf_std(sc, sc->bge_std,
 2795                             NULL) == ENOBUFS) {
 2796                                 ifp->if_ierrors++;
 2797                                 bge_newbuf_std(sc, sc->bge_std, m);
 2798                                 continue;
 2799                         }
 2800                 }
 2801 
 2802                 ifp->if_ipackets++;
 2803 #ifndef __i386__
 2804                 /*
 2805                  * The i386 allows unaligned accesses, but for other
 2806                  * platforms we must make sure the payload is aligned.
 2807                  */
 2808                 if (sc->bge_rx_alignment_bug) {
 2809                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
 2810                             cur_rx->bge_len);
 2811                         m->m_data += ETHER_ALIGN;
 2812                 }
 2813 #endif
 2814                 eh = mtod(m, struct ether_header *);
 2815                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
 2816                 m->m_pkthdr.rcvif = ifp;
 2817 
 2818 #if 0 /* currently broken for some packets, possibly related to TCP options */
 2819                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2820                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2821                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
 2822                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2823                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
 2824                                 m->m_pkthdr.csum_data =
 2825                                     cur_rx->bge_tcp_udp_csum;
 2826                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 2827                         }
 2828                 }
 2829 #endif
 2830 
 2831                 /*
 2832                  * If we received a packet with a vlan tag,
 2833                  * attach that information to the packet.
 2834                  */
 2835                 if (have_tag)
 2836                         VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
 2837 
 2838                 BGE_UNLOCK(sc);
 2839                 (*ifp->if_input)(ifp, m);
 2840                 BGE_LOCK(sc);
 2841         }
 2842 
 2843         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2844             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 2845         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2846             sc->bge_cdata.bge_rx_std_ring_map,
 2847             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
 2848         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2849             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2850                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2851                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2852                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 2853         }
 2854 
 2855         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
 2856         if (stdcnt)
 2857                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 2858         if (jumbocnt)
 2859                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 2860 
 2861         return;
 2862 }
 2863 
 2864 static void
 2865 bge_txeof(sc)
 2866         struct bge_softc *sc;
 2867 {
 2868         struct bge_tx_bd *cur_tx = NULL;
 2869         struct ifnet *ifp;
 2870 
 2871         BGE_LOCK_ASSERT(sc);
 2872 
 2873         ifp = sc->bge_ifp;
 2874 
 2875         /*
 2876          * Go through our tx ring and free mbufs for those
 2877          * frames that have been sent.
 2878          */
 2879         while (sc->bge_tx_saved_considx !=
 2880             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
 2881                 u_int32_t               idx = 0;
 2882 
 2883                 idx = sc->bge_tx_saved_considx;
 2884                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
 2885                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
 2886                         ifp->if_opackets++;
 2887                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
 2888                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
 2889                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
 2890                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2891                             sc->bge_cdata.bge_tx_dmamap[idx]);
 2892                 }
 2893                 sc->bge_txcnt--;
 2894                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
 2895                 ifp->if_timer = 0;
 2896         }
 2897 
 2898         if (cur_tx != NULL)
 2899                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2900 
 2901         return;
 2902 }
 2903 
 2904 static void
 2905 bge_intr(xsc)
 2906         void *xsc;
 2907 {
 2908         struct bge_softc *sc;
 2909         struct ifnet *ifp;
 2910         u_int32_t statusword;
 2911         u_int32_t status, mimode;
 2912 
 2913         sc = xsc;
 2914         ifp = sc->bge_ifp;
 2915 
 2916         BGE_LOCK(sc);
 2917 
 2918         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2919             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
 2920 
 2921         statusword =
 2922             atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
 2923 
 2924 #ifdef notdef
 2925         /* Avoid this for now -- checking this register is expensive. */
 2926         /* Make sure this is really our interrupt. */
 2927         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
 2928                 return;
 2929 #endif
 2930         /* Ack interrupt and stop others from occuring. */
 2931         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 2932 
 2933         /*
 2934          * Process link state changes.
 2935          * Grrr. The link status word in the status block does
 2936          * not work correctly on the BCM5700 rev AX and BX chips,
 2937          * according to all available information. Hence, we have
 2938          * to enable MII interrupts in order to properly obtain
 2939          * async link changes. Unfortunately, this also means that
 2940          * we have to read the MAC status register to detect link
 2941          * changes, thereby adding an additional register access to
 2942          * the interrupt handler.
 2943          */
 2944 
 2945         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
 2946 
 2947                 status = CSR_READ_4(sc, BGE_MAC_STS);
 2948                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
 2949                         sc->bge_link = 0;
 2950                         callout_stop(&sc->bge_stat_ch);
 2951                         bge_tick_locked(sc);
 2952                         /* Clear the interrupt */
 2953                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 2954                             BGE_EVTENB_MI_INTERRUPT);
 2955                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
 2956                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
 2957                             BRGPHY_INTRS);
 2958                 }
 2959         } else {
 2960                 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
 2961                         /*
 2962                          * Sometimes PCS encoding errors are detected in
 2963                          * TBI mode (on fiber NICs), and for some reason
 2964                          * the chip will signal them as link changes.
 2965                          * If we get a link change event, but the 'PCS
 2966                          * encoding error' bit in the MAC status register
 2967                          * is set, don't bother doing a link check.
 2968                          * This avoids spurious "gigabit link up" messages
 2969                          * that sometimes appear on fiber NICs during
 2970                          * periods of heavy traffic. (There should be no
 2971                          * effect on copper NICs.)
 2972                          *
 2973                          * If we do have a copper NIC (bge_tbi == 0) then
 2974                          * check that the AUTOPOLL bit is set before
 2975                          * processing the event as a real link change.
 2976                          * Turning AUTOPOLL on and off in the MII read/write
 2977                          * functions will often trigger a link status
 2978                          * interrupt for no reason.
 2979                          */
 2980                         status = CSR_READ_4(sc, BGE_MAC_STS);
 2981                         mimode = CSR_READ_4(sc, BGE_MI_MODE);
 2982                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
 2983                             BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
 2984                             (mimode & BGE_MIMODE_AUTOPOLL))) {
 2985                                 sc->bge_link = 0;
 2986                                 callout_stop(&sc->bge_stat_ch);
 2987                                 bge_tick_locked(sc);
 2988                         }
 2989                         /* Clear the interrupt */
 2990                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 2991                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 2992                             BGE_MACSTAT_LINK_CHANGED);
 2993 
 2994                         /* Force flush the status block cached by PCI bridge */
 2995                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
 2996                 }
 2997         }
 2998 
 2999         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3000                 /* Check RX return ring producer/consumer */
 3001                 bge_rxeof(sc);
 3002 
 3003                 /* Check TX ring producer/consumer */
 3004                 bge_txeof(sc);
 3005         }
 3006 
 3007         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 3008             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 3009 
 3010         bge_handle_events(sc);
 3011 
 3012         /* Re-enable interrupts. */
 3013         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3014 
 3015         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3016             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 3017                 bge_start_locked(ifp);
 3018 
 3019         BGE_UNLOCK(sc);
 3020 
 3021         return;
 3022 }
 3023 
 3024 static void
 3025 bge_tick_locked(sc)
 3026         struct bge_softc *sc;
 3027 {
 3028         struct mii_data *mii = NULL;
 3029         struct ifmedia *ifm = NULL;
 3030         struct ifnet *ifp;
 3031 
 3032         ifp = sc->bge_ifp;
 3033 
 3034         BGE_LOCK_ASSERT(sc);
 3035 
 3036         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 3037             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 3038                 bge_stats_update_regs(sc);
 3039         else
 3040                 bge_stats_update(sc);
 3041         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3042         if (sc->bge_link)
 3043                 return;
 3044 
 3045         if (sc->bge_tbi) {
 3046                 ifm = &sc->bge_ifmedia;
 3047                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3048                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
 3049                         sc->bge_link++;
 3050                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 3051                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3052                                     BGE_MACMODE_TBI_SEND_CFGS);
 3053                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
 3054                         if (bootverbose)
 3055                                 printf("bge%d: gigabit link up\n",
 3056                                     sc->bge_unit);
 3057                         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 3058                                 bge_start_locked(ifp);
 3059                 }
 3060                 return;
 3061         }
 3062 
 3063         mii = device_get_softc(sc->bge_miibus);
 3064         mii_tick(mii);
 3065 
 3066         if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
 3067             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 3068                 sc->bge_link++;
 3069                 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
 3070                     IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
 3071                     bootverbose)
 3072                         printf("bge%d: gigabit link up\n", sc->bge_unit);
 3073                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 3074                         bge_start_locked(ifp);
 3075         }
 3076 
 3077         return;
 3078 }
 3079 
 3080 static void
 3081 bge_tick(xsc)
 3082         void *xsc;
 3083 {
 3084         struct bge_softc *sc;
 3085 
 3086         sc = xsc;
 3087 
 3088         BGE_LOCK(sc);
 3089         bge_tick_locked(sc);
 3090         BGE_UNLOCK(sc);
 3091 }
 3092 
 3093 static void
 3094 bge_stats_update_regs(sc)
 3095         struct bge_softc *sc;
 3096 {
 3097         struct ifnet *ifp;
 3098         struct bge_mac_stats_regs stats;
 3099         u_int32_t *s;
 3100         int i;
 3101 
 3102         ifp = sc->bge_ifp;
 3103 
 3104         s = (u_int32_t *)&stats;
 3105         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
 3106                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
 3107                 s++;
 3108         }
 3109 
 3110         ifp->if_collisions +=
 3111            (stats.dot3StatsSingleCollisionFrames +
 3112            stats.dot3StatsMultipleCollisionFrames +
 3113            stats.dot3StatsExcessiveCollisions +
 3114            stats.dot3StatsLateCollisions) -
 3115            ifp->if_collisions;
 3116 
 3117         return;
 3118 }
 3119 
 3120 static void
 3121 bge_stats_update(sc)
 3122         struct bge_softc *sc;
 3123 {
 3124         struct ifnet *ifp;
 3125         struct bge_stats *stats;
 3126 
 3127         ifp = sc->bge_ifp;
 3128 
 3129         stats = (struct bge_stats *)(sc->bge_vhandle +
 3130             BGE_MEMWIN_START + BGE_STATS_BLOCK);
 3131 
 3132         ifp->if_collisions +=
 3133            (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
 3134            stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
 3135            stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
 3136            stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
 3137            ifp->if_collisions;
 3138 
 3139 #ifdef notdef
 3140         ifp->if_collisions +=
 3141            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
 3142            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
 3143            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
 3144            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
 3145            ifp->if_collisions;
 3146 #endif
 3147 
 3148         return;
 3149 }
 3150 
 3151 /*
 3152  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
 3153  * pointers to descriptors.
 3154  */
 3155 static int
 3156 bge_encap(sc, m_head, txidx)
 3157         struct bge_softc *sc;
 3158         struct mbuf *m_head;
 3159         u_int32_t *txidx;
 3160 {
 3161         struct bge_tx_bd        *f = NULL;
 3162         u_int16_t               csum_flags = 0;
 3163         struct m_tag            *mtag;
 3164         struct bge_dmamap_arg   ctx;
 3165         bus_dmamap_t            map;
 3166         int                     error;
 3167 
 3168 
 3169         if (m_head->m_pkthdr.csum_flags) {
 3170                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 3171                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
 3172                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
 3173                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
 3174                 if (m_head->m_flags & M_LASTFRAG)
 3175                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
 3176                 else if (m_head->m_flags & M_FRAG)
 3177                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
 3178         }
 3179 
 3180         mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
 3181 
 3182         ctx.sc = sc;
 3183         ctx.bge_idx = *txidx;
 3184         ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
 3185         ctx.bge_flags = csum_flags;
 3186         /*
 3187          * Sanity check: avoid coming within 16 descriptors
 3188          * of the end of the ring.
 3189          */
 3190         ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
 3191 
 3192         map = sc->bge_cdata.bge_tx_dmamap[*txidx];
 3193         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
 3194             m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
 3195 
 3196         if (error || ctx.bge_maxsegs == 0 /*||
 3197             ctx.bge_idx == sc->bge_tx_saved_considx*/)
 3198                 return (ENOBUFS);
 3199 
 3200         /*
 3201          * Insure that the map for this transmission
 3202          * is placed at the array index of the last descriptor
 3203          * in this chain.
 3204          */
 3205         sc->bge_cdata.bge_tx_dmamap[*txidx] =
 3206             sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
 3207         sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
 3208         sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
 3209         sc->bge_txcnt += ctx.bge_maxsegs;
 3210         f = &sc->bge_ldata.bge_tx_ring[*txidx];
 3211         if (mtag != NULL) {
 3212                 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
 3213                 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
 3214         } else {
 3215                 f->bge_vlan_tag = 0;
 3216         }
 3217 
 3218         BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
 3219         *txidx = ctx.bge_idx;
 3220 
 3221         return(0);
 3222 }
 3223 
 3224 /*
 3225  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3226  * to the mbuf data regions directly in the transmit descriptors.
 3227  */
 3228 static void
 3229 bge_start_locked(ifp)
 3230         struct ifnet *ifp;
 3231 {
 3232         struct bge_softc *sc;
 3233         struct mbuf *m_head = NULL;
 3234         u_int32_t prodidx = 0;
 3235         int count = 0;
 3236 
 3237         sc = ifp->if_softc;
 3238 
 3239         if (!sc->bge_link && IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 3240                 return;
 3241 
 3242         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
 3243 
 3244         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
 3245                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 3246                 if (m_head == NULL)
 3247                         break;
 3248 
 3249                 /*
 3250                  * XXX
 3251                  * The code inside the if() block is never reached since we
 3252                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
 3253                  * requests to checksum TCP/UDP in a fragmented packet.
 3254                  *
 3255                  * XXX
 3256                  * safety overkill.  If this is a fragmented packet chain
 3257                  * with delayed TCP/UDP checksums, then only encapsulate
 3258                  * it if we have enough descriptors to handle the entire
 3259                  * chain at once.
 3260                  * (paranoia -- may not actually be needed)
 3261                  */
 3262                 if (m_head->m_flags & M_FIRSTFRAG &&
 3263                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
 3264                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
 3265                             m_head->m_pkthdr.csum_data + 16) {
 3266                                 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 3267                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 3268                                 break;
 3269                         }
 3270                 }
 3271 
 3272                 /*
 3273                  * Pack the data into the transmit ring. If we
 3274                  * don't have room, set the OACTIVE flag and wait
 3275                  * for the NIC to drain the ring.
 3276                  */
 3277                 if (bge_encap(sc, m_head, &prodidx)) {
 3278                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 3279                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 3280                         break;
 3281                 }
 3282                 ++count;
 3283 
 3284                 /*
 3285                  * If there's a BPF listener, bounce a copy of this frame
 3286                  * to him.
 3287                  */
 3288                 BPF_MTAP(ifp, m_head);
 3289         }
 3290 
 3291         if (count == 0) {
 3292                 /* no packets were dequeued */
 3293                 return;
 3294         }
 3295 
 3296         /* Transmit */
 3297         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3298         /* 5700 b2 errata */
 3299         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 3300                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3301 
 3302         /*
 3303          * Set a timeout in case the chip goes out to lunch.
 3304          */
 3305         ifp->if_timer = 5;
 3306 
 3307         return;
 3308 }
 3309 
 3310 /*
 3311  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3312  * to the mbuf data regions directly in the transmit descriptors.
 3313  */
 3314 static void
 3315 bge_start(ifp)
 3316         struct ifnet *ifp;
 3317 {
 3318         struct bge_softc *sc;
 3319 
 3320         sc = ifp->if_softc;
 3321         BGE_LOCK(sc);
 3322         bge_start_locked(ifp);
 3323         BGE_UNLOCK(sc);
 3324 }
 3325 
 3326 static void
 3327 bge_init_locked(sc)
 3328         struct bge_softc *sc;
 3329 {
 3330         struct ifnet *ifp;
 3331         u_int16_t *m;
 3332 
 3333         BGE_LOCK_ASSERT(sc);
 3334 
 3335         ifp = sc->bge_ifp;
 3336 
 3337         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3338                 return;
 3339 
 3340         /* Cancel pending I/O and flush buffers. */
 3341         bge_stop(sc);
 3342         bge_reset(sc);
 3343         bge_chipinit(sc);
 3344 
 3345         /*
 3346          * Init the various state machines, ring
 3347          * control blocks and firmware.
 3348          */
 3349         if (bge_blockinit(sc)) {
 3350                 printf("bge%d: initialization failure\n", sc->bge_unit);
 3351                 return;
 3352         }
 3353 
 3354         ifp = sc->bge_ifp;
 3355 
 3356         /* Specify MTU. */
 3357         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
 3358             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
 3359 
 3360         /* Load our MAC address. */
 3361         m = (u_int16_t *)&IFP2ENADDR(sc->bge_ifp)[0];
 3362         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
 3363         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
 3364 
 3365         /* Enable or disable promiscuous mode as needed. */
 3366         if (ifp->if_flags & IFF_PROMISC) {
 3367                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3368         } else {
 3369                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3370         }
 3371 
 3372         /* Program multicast filter. */
 3373         bge_setmulti(sc);
 3374 
 3375         /* Init RX ring. */
 3376         bge_init_rx_ring_std(sc);
 3377 
 3378         /*
 3379          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
 3380          * memory to insure that the chip has in fact read the first
 3381          * entry of the ring.
 3382          */
 3383         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
 3384                 u_int32_t               v, i;
 3385                 for (i = 0; i < 10; i++) {
 3386                         DELAY(20);
 3387                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
 3388                         if (v == (MCLBYTES - ETHER_ALIGN))
 3389                                 break;
 3390                 }
 3391                 if (i == 10)
 3392                         printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
 3393                             sc->bge_unit);
 3394         }
 3395 
 3396         /* Init jumbo RX ring. */
 3397         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
 3398                 bge_init_rx_ring_jumbo(sc);
 3399 
 3400         /* Init our RX return ring index */
 3401         sc->bge_rx_saved_considx = 0;
 3402 
 3403         /* Init TX ring. */
 3404         bge_init_tx_ring(sc);
 3405 
 3406         /* Turn on transmitter */
 3407         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
 3408 
 3409         /* Turn on receiver */
 3410         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3411 
 3412         /* Tell firmware we're alive. */
 3413         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3414 
 3415         /* Enable host interrupts. */
 3416         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
 3417         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3418         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3419 
 3420         bge_ifmedia_upd(ifp);
 3421 
 3422         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 3423         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3424 
 3425         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3426 
 3427         return;
 3428 }
 3429 
 3430 static void
 3431 bge_init(xsc)
 3432         void *xsc;
 3433 {
 3434         struct bge_softc *sc = xsc;
 3435 
 3436         BGE_LOCK(sc);
 3437         bge_init_locked(sc);
 3438         BGE_UNLOCK(sc);
 3439 
 3440         return;
 3441 }
 3442 
 3443 /*
 3444  * Set media options.
 3445  */
 3446 static int
 3447 bge_ifmedia_upd(ifp)
 3448         struct ifnet *ifp;
 3449 {
 3450         struct bge_softc *sc;
 3451         struct mii_data *mii;
 3452         struct ifmedia *ifm;
 3453 
 3454         sc = ifp->if_softc;
 3455         ifm = &sc->bge_ifmedia;
 3456 
 3457         /* If this is a 1000baseX NIC, enable the TBI port. */
 3458         if (sc->bge_tbi) {
 3459                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 3460                         return(EINVAL);
 3461                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
 3462                 case IFM_AUTO:
 3463 #ifndef BGE_FAKE_AUTONEG
 3464                         /*
 3465                          * The BCM5704 ASIC appears to have a special
 3466                          * mechanism for programming the autoneg
 3467                          * advertisement registers in TBI mode.
 3468                          */
 3469                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 3470                                 uint32_t sgdig;
 3471                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
 3472                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
 3473                                 sgdig |= BGE_SGDIGCFG_AUTO|
 3474                                     BGE_SGDIGCFG_PAUSE_CAP|
 3475                                     BGE_SGDIGCFG_ASYM_PAUSE;
 3476                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
 3477                                     sgdig|BGE_SGDIGCFG_SEND);
 3478                                 DELAY(5);
 3479                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
 3480                         }
 3481 #endif
 3482                         break;
 3483                 case IFM_1000_SX:
 3484                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3485                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3486                                     BGE_MACMODE_HALF_DUPLEX);
 3487                         } else {
 3488                                 BGE_SETBIT(sc, BGE_MAC_MODE,
 3489                                     BGE_MACMODE_HALF_DUPLEX);
 3490                         }
 3491                         break;
 3492                 default:
 3493                         return(EINVAL);
 3494                 }
 3495                 return(0);
 3496         }
 3497 
 3498         mii = device_get_softc(sc->bge_miibus);
 3499         sc->bge_link = 0;
 3500         if (mii->mii_instance) {
 3501                 struct mii_softc *miisc;
 3502                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
 3503                     miisc = LIST_NEXT(miisc, mii_list))
 3504                         mii_phy_reset(miisc);
 3505         }
 3506         mii_mediachg(mii);
 3507 
 3508         return(0);
 3509 }
 3510 
 3511 /*
 3512  * Report current media status.
 3513  */
 3514 static void
 3515 bge_ifmedia_sts(ifp, ifmr)
 3516         struct ifnet *ifp;
 3517         struct ifmediareq *ifmr;
 3518 {
 3519         struct bge_softc *sc;
 3520         struct mii_data *mii;
 3521 
 3522         sc = ifp->if_softc;
 3523 
 3524         if (sc->bge_tbi) {
 3525                 ifmr->ifm_status = IFM_AVALID;
 3526                 ifmr->ifm_active = IFM_ETHER;
 3527                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3528                     BGE_MACSTAT_TBI_PCS_SYNCHED)
 3529                         ifmr->ifm_status |= IFM_ACTIVE;
 3530                 ifmr->ifm_active |= IFM_1000_SX;
 3531                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
 3532                         ifmr->ifm_active |= IFM_HDX;
 3533                 else
 3534                         ifmr->ifm_active |= IFM_FDX;
 3535                 return;
 3536         }
 3537 
 3538         mii = device_get_softc(sc->bge_miibus);
 3539         mii_pollstat(mii);
 3540         ifmr->ifm_active = mii->mii_media_active;
 3541         ifmr->ifm_status = mii->mii_media_status;
 3542 
 3543         return;
 3544 }
 3545 
 3546 static int
 3547 bge_ioctl(ifp, command, data)
 3548         struct ifnet *ifp;
 3549         u_long command;
 3550         caddr_t data;
 3551 {
 3552         struct bge_softc *sc = ifp->if_softc;
 3553         struct ifreq *ifr = (struct ifreq *) data;
 3554         int mask, error = 0;
 3555         struct mii_data *mii;
 3556 
 3557         switch(command) {
 3558         case SIOCSIFMTU:
 3559                 /* Disallow jumbo frames on 5705. */
 3560                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 3561                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
 3562                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
 3563                         error = EINVAL;
 3564                 else {
 3565                         ifp->if_mtu = ifr->ifr_mtu;
 3566                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3567                         bge_init(sc);
 3568                 }
 3569                 break;
 3570         case SIOCSIFFLAGS:
 3571                 BGE_LOCK(sc);
 3572                 if (ifp->if_flags & IFF_UP) {
 3573                         /*
 3574                          * If only the state of the PROMISC flag changed,
 3575                          * then just use the 'set promisc mode' command
 3576                          * instead of reinitializing the entire NIC. Doing
 3577                          * a full re-init means reloading the firmware and
 3578                          * waiting for it to start up, which may take a
 3579                          * second or two.
 3580                          */
 3581                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3582                             ifp->if_flags & IFF_PROMISC &&
 3583                             !(sc->bge_if_flags & IFF_PROMISC)) {
 3584                                 BGE_SETBIT(sc, BGE_RX_MODE,
 3585                                     BGE_RXMODE_RX_PROMISC);
 3586                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3587                             !(ifp->if_flags & IFF_PROMISC) &&
 3588                             sc->bge_if_flags & IFF_PROMISC) {
 3589                                 BGE_CLRBIT(sc, BGE_RX_MODE,
 3590                                     BGE_RXMODE_RX_PROMISC);
 3591                         } else
 3592                                 bge_init_locked(sc);
 3593                 } else {
 3594                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3595                                 bge_stop(sc);
 3596                         }
 3597                 }
 3598                 sc->bge_if_flags = ifp->if_flags;
 3599                 BGE_UNLOCK(sc);
 3600                 error = 0;
 3601                 break;
 3602         case SIOCADDMULTI:
 3603         case SIOCDELMULTI:
 3604                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3605                         BGE_LOCK(sc);
 3606                         bge_setmulti(sc);
 3607                         BGE_UNLOCK(sc);
 3608                         error = 0;
 3609                 }
 3610                 break;
 3611         case SIOCSIFMEDIA:
 3612         case SIOCGIFMEDIA:
 3613                 if (sc->bge_tbi) {
 3614                         error = ifmedia_ioctl(ifp, ifr,
 3615                             &sc->bge_ifmedia, command);
 3616                 } else {
 3617                         mii = device_get_softc(sc->bge_miibus);
 3618                         error = ifmedia_ioctl(ifp, ifr,
 3619                             &mii->mii_media, command);
 3620                 }
 3621                 break;
 3622         case SIOCSIFCAP:
 3623                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3624                 /* NB: the code for RX csum offload is disabled for now */
 3625                 if (mask & IFCAP_TXCSUM) {
 3626                         ifp->if_capenable ^= IFCAP_TXCSUM;
 3627                         if (IFCAP_TXCSUM & ifp->if_capenable)
 3628                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
 3629                         else
 3630                                 ifp->if_hwassist = 0;
 3631                 }
 3632                 error = 0;
 3633                 break;
 3634         default:
 3635                 error = ether_ioctl(ifp, command, data);
 3636                 break;
 3637         }
 3638 
 3639         return(error);
 3640 }
 3641 
 3642 static void
 3643 bge_watchdog(ifp)
 3644         struct ifnet *ifp;
 3645 {
 3646         struct bge_softc *sc;
 3647 
 3648         sc = ifp->if_softc;
 3649 
 3650         printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
 3651 
 3652         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3653         bge_init(sc);
 3654 
 3655         ifp->if_oerrors++;
 3656 
 3657         return;
 3658 }
 3659 
 3660 /*
 3661  * Stop the adapter and free any mbufs allocated to the
 3662  * RX and TX lists.
 3663  */
 3664 static void
 3665 bge_stop(sc)
 3666         struct bge_softc *sc;
 3667 {
 3668         struct ifnet *ifp;
 3669         struct ifmedia_entry *ifm;
 3670         struct mii_data *mii = NULL;
 3671         int mtmp, itmp;
 3672 
 3673         BGE_LOCK_ASSERT(sc);
 3674 
 3675         ifp = sc->bge_ifp;
 3676 
 3677         if (!sc->bge_tbi)
 3678                 mii = device_get_softc(sc->bge_miibus);
 3679 
 3680         callout_stop(&sc->bge_stat_ch);
 3681 
 3682         /*
 3683          * Disable all of the receiver blocks
 3684          */
 3685         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3686         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 3687         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 3688         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3689             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3690                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 3691         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
 3692         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 3693         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
 3694 
 3695         /*
 3696          * Disable all of the transmit blocks
 3697          */
 3698         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 3699         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 3700         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 3701         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
 3702         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 3703         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3704             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3705                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 3706         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 3707 
 3708         /*
 3709          * Shut down all of the memory managers and related
 3710          * state machines.
 3711          */
 3712         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 3713         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
 3714         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3715             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3716                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 3717         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 3718         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 3719         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3720             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 3721                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
 3722                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 3723         }
 3724 
 3725         /* Disable host interrupts. */
 3726         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3727         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3728 
 3729         /*
 3730          * Tell firmware we're shutting down.
 3731          */
 3732         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3733 
 3734         /* Free the RX lists. */
 3735         bge_free_rx_ring_std(sc);
 3736 
 3737         /* Free jumbo RX list. */
 3738         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3739             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3740                 bge_free_rx_ring_jumbo(sc);
 3741 
 3742         /* Free TX buffers. */
 3743         bge_free_tx_ring(sc);
 3744 
 3745         /*
 3746          * Isolate/power down the PHY, but leave the media selection
 3747          * unchanged so that things will be put back to normal when
 3748          * we bring the interface back up.
 3749          */
 3750         if (!sc->bge_tbi) {
 3751                 itmp = ifp->if_flags;
 3752                 ifp->if_flags |= IFF_UP;
 3753                 /*
 3754                  * If we are called from bge_detach(), mii is already NULL.
 3755                  */
 3756                 if (mii != NULL) {
 3757                         ifm = mii->mii_media.ifm_cur;
 3758                         mtmp = ifm->ifm_media;
 3759                         ifm->ifm_media = IFM_ETHER|IFM_NONE;
 3760                         mii_mediachg(mii);
 3761                         ifm->ifm_media = mtmp;
 3762                 }
 3763                 ifp->if_flags = itmp;
 3764         }
 3765 
 3766         sc->bge_link = 0;
 3767 
 3768         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
 3769 
 3770         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3771 
 3772         return;
 3773 }
 3774 
 3775 /*
 3776  * Stop all chip I/O so that the kernel's probe routines don't
 3777  * get confused by errant DMAs when rebooting.
 3778  */
 3779 static void
 3780 bge_shutdown(dev)
 3781         device_t dev;
 3782 {
 3783         struct bge_softc *sc;
 3784 
 3785         sc = device_get_softc(dev);
 3786 
 3787         BGE_LOCK(sc);
 3788         bge_stop(sc);
 3789         bge_reset(sc);
 3790         BGE_UNLOCK(sc);
 3791 
 3792         return;
 3793 }

Cache object: 856e7f7fc38bc2d44570f7481f244874


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.