The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001 Wind River Systems
    3  * Copyright (c) 1997, 1998, 1999, 2001
    4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. All advertising materials mentioning features or use of this software
   15  *    must display the following acknowledgement:
   16  *      This product includes software developed by Bill Paul.
   17  * 4. Neither the name of the author nor the names of any co-contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   31  * THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/5.4/sys/dev/bge/if_bge.c 145942 2005-05-06 00:38:51Z dwhite $");
   36 
   37 /*
   38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
   39  *
   40  * The Broadcom BCM5700 is based on technology originally developed by
   41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
   42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
   43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
   44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
   45  * frames, highly configurable RX filtering, and 16 RX and TX queues
   46  * (which, along with RX filter rules, can be used for QOS applications).
   47  * Other features, such as TCP segmentation, may be available as part
   48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
   49  * firmware images can be stored in hardware and need not be compiled
   50  * into the driver.
   51  *
   52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
   53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
   54  *
   55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
   56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
   57  * does not support external SSRAM.
   58  *
   59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
   60  * brand name, which is functionally similar but lacks PCI-X support.
   61  *
   62  * Without external SSRAM, you can only have at most 4 TX rings,
   63  * and the use of the mini RX ring is disabled. This seems to imply
   64  * that these features are simply not available on the BCM5701. As a
   65  * result, this driver does not implement any support for the mini RX
   66  * ring.
   67  */
   68 
   69 #include <sys/param.h>
   70 #include <sys/endian.h>
   71 #include <sys/systm.h>
   72 #include <sys/sockio.h>
   73 #include <sys/mbuf.h>
   74 #include <sys/malloc.h>
   75 #include <sys/kernel.h>
   76 #include <sys/module.h>
   77 #include <sys/socket.h>
   78 #include <sys/queue.h>
   79 
   80 #include <net/if.h>
   81 #include <net/if_arp.h>
   82 #include <net/ethernet.h>
   83 #include <net/if_dl.h>
   84 #include <net/if_media.h>
   85 
   86 #include <net/bpf.h>
   87 
   88 #include <net/if_types.h>
   89 #include <net/if_vlan_var.h>
   90 
   91 #include <netinet/in_systm.h>
   92 #include <netinet/in.h>
   93 #include <netinet/ip.h>
   94 
   95 #include <machine/clock.h>      /* for DELAY */
   96 #include <machine/bus_memio.h>
   97 #include <machine/bus.h>
   98 #include <machine/resource.h>
   99 #include <sys/bus.h>
  100 #include <sys/rman.h>
  101 
  102 #include <dev/mii/mii.h>
  103 #include <dev/mii/miivar.h>
  104 #include "miidevs.h"
  105 #include <dev/mii/brgphyreg.h>
  106 
  107 #include <dev/pci/pcireg.h>
  108 #include <dev/pci/pcivar.h>
  109 
  110 #include <dev/bge/if_bgereg.h>
  111 
  112 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
  113 
  114 MODULE_DEPEND(bge, pci, 1, 1, 1);
  115 MODULE_DEPEND(bge, ether, 1, 1, 1);
  116 MODULE_DEPEND(bge, miibus, 1, 1, 1);
  117 
  118 /* "controller miibus0" required.  See GENERIC if you get errors here. */
  119 #include "miibus_if.h"
  120 
  121 /*
  122  * Various supported device vendors/types and their names. Note: the
  123  * spec seems to indicate that the hardware still has Alteon's vendor
  124  * ID burned into it, though it will always be overriden by the vendor
  125  * ID in the EEPROM. Just to be safe, we cover all possibilities.
  126  */
  127 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
  128 
  129 static struct bge_type bge_devs[] = {
  130         { ALT_VENDORID, ALT_DEVICEID_BCM5700,
  131                 "Broadcom BCM5700 Gigabit Ethernet" },
  132         { ALT_VENDORID, ALT_DEVICEID_BCM5701,
  133                 "Broadcom BCM5701 Gigabit Ethernet" },
  134         { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
  135                 "Broadcom BCM5700 Gigabit Ethernet" },
  136         { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
  137                 "Broadcom BCM5701 Gigabit Ethernet" },
  138         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
  139                 "Broadcom BCM5702 Gigabit Ethernet" },
  140         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
  141                 "Broadcom BCM5702X Gigabit Ethernet" },
  142         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
  143                 "Broadcom BCM5703 Gigabit Ethernet" },
  144         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
  145                 "Broadcom BCM5703X Gigabit Ethernet" },
  146         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
  147                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
  148         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
  149                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
  150         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
  151                 "Broadcom BCM5705 Gigabit Ethernet" },
  152         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
  153                 "Broadcom BCM5705K Gigabit Ethernet" },
  154         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
  155                 "Broadcom BCM5705M Gigabit Ethernet" },
  156         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
  157                 "Broadcom BCM5705M Gigabit Ethernet" },
  158         { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
  159                 "Broadcom BCM5721 Gigabit Ethernet" },
  160         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
  161                 "Broadcom BCM5750 Gigabit Ethernet" },
  162         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
  163                 "Broadcom BCM5750M Gigabit Ethernet" },
  164         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
  165                 "Broadcom BCM5751 Gigabit Ethernet" },
  166         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
  167                 "Broadcom BCM5751M Gigabit Ethernet" },
  168         { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
  169                 "Broadcom BCM5782 Gigabit Ethernet" },
  170         { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
  171                 "Broadcom BCM5788 Gigabit Ethernet" },
  172         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
  173                 "Broadcom BCM5901 Fast Ethernet" },
  174         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
  175                 "Broadcom BCM5901A2 Fast Ethernet" },
  176         { SK_VENDORID, SK_DEVICEID_ALTIMA,
  177                 "SysKonnect Gigabit Ethernet" },
  178         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
  179                 "Altima AC1000 Gigabit Ethernet" },
  180         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
  181                 "Altima AC1002 Gigabit Ethernet" },
  182         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
  183                 "Altima AC9100 Gigabit Ethernet" },
  184         { 0, 0, NULL }
  185 };
  186 
  187 static int bge_probe            (device_t);
  188 static int bge_attach           (device_t);
  189 static int bge_detach           (device_t);
  190 static void bge_release_resources
  191                                 (struct bge_softc *);
  192 static void bge_dma_map_addr    (void *, bus_dma_segment_t *, int, int);
  193 static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
  194                                     bus_size_t, int);
  195 static int bge_dma_alloc        (device_t);
  196 static void bge_dma_free        (struct bge_softc *);
  197 
  198 static void bge_txeof           (struct bge_softc *);
  199 static void bge_rxeof           (struct bge_softc *);
  200 
  201 static void bge_tick_locked     (struct bge_softc *);
  202 static void bge_tick            (void *);
  203 static void bge_stats_update    (struct bge_softc *);
  204 static void bge_stats_update_regs
  205                                 (struct bge_softc *);
  206 static int bge_encap            (struct bge_softc *, struct mbuf *,
  207                                         u_int32_t *);
  208 
  209 static void bge_intr            (void *);
  210 static void bge_start_locked    (struct ifnet *);
  211 static void bge_start           (struct ifnet *);
  212 static int bge_ioctl            (struct ifnet *, u_long, caddr_t);
  213 static void bge_init_locked     (struct bge_softc *);
  214 static void bge_init            (void *);
  215 static void bge_stop            (struct bge_softc *);
  216 static void bge_watchdog                (struct ifnet *);
  217 static void bge_shutdown                (device_t);
  218 static int bge_ifmedia_upd      (struct ifnet *);
  219 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
  220 
  221 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *, int, u_int8_t *);
  222 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
  223 
  224 static void bge_setmulti        (struct bge_softc *);
  225 
  226 static void bge_handle_events   (struct bge_softc *);
  227 static int bge_alloc_jumbo_mem  (struct bge_softc *);
  228 static void bge_free_jumbo_mem  (struct bge_softc *);
  229 static void *bge_jalloc         (struct bge_softc *);
  230 static void bge_jfree           (void *, void *);
  231 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
  232 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
  233 static int bge_init_rx_ring_std (struct bge_softc *);
  234 static void bge_free_rx_ring_std        (struct bge_softc *);
  235 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
  236 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
  237 static void bge_free_tx_ring    (struct bge_softc *);
  238 static int bge_init_tx_ring     (struct bge_softc *);
  239 
  240 static int bge_chipinit         (struct bge_softc *);
  241 static int bge_blockinit        (struct bge_softc *);
  242 
  243 #ifdef notdef
  244 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
  245 static void bge_vpd_read_res    (struct bge_softc *, struct vpd_res *, int);
  246 static void bge_vpd_read        (struct bge_softc *);
  247 #endif
  248 
  249 static u_int32_t bge_readmem_ind
  250                                 (struct bge_softc *, int);
  251 static void bge_writemem_ind    (struct bge_softc *, int, int);
  252 #ifdef notdef
  253 static u_int32_t bge_readreg_ind
  254                                 (struct bge_softc *, int);
  255 #endif
  256 static void bge_writereg_ind    (struct bge_softc *, int, int);
  257 
  258 static int bge_miibus_readreg   (device_t, int, int);
  259 static int bge_miibus_writereg  (device_t, int, int, int);
  260 static void bge_miibus_statchg  (device_t);
  261 
  262 static void bge_reset           (struct bge_softc *);
  263 
  264 static device_method_t bge_methods[] = {
  265         /* Device interface */
  266         DEVMETHOD(device_probe,         bge_probe),
  267         DEVMETHOD(device_attach,        bge_attach),
  268         DEVMETHOD(device_detach,        bge_detach),
  269         DEVMETHOD(device_shutdown,      bge_shutdown),
  270 
  271         /* bus interface */
  272         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  273         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  274 
  275         /* MII interface */
  276         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
  277         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
  278         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
  279 
  280         { 0, 0 }
  281 };
  282 
  283 static driver_t bge_driver = {
  284         "bge",
  285         bge_methods,
  286         sizeof(struct bge_softc)
  287 };
  288 
  289 static devclass_t bge_devclass;
  290 
  291 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
  292 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
  293 
  294 static u_int32_t
  295 bge_readmem_ind(sc, off)
  296         struct bge_softc *sc;
  297         int off;
  298 {
  299         device_t dev;
  300 
  301         dev = sc->bge_dev;
  302 
  303         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  304         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
  305 }
  306 
  307 static void
  308 bge_writemem_ind(sc, off, val)
  309         struct bge_softc *sc;
  310         int off, val;
  311 {
  312         device_t dev;
  313 
  314         dev = sc->bge_dev;
  315 
  316         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  317         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
  318 
  319         return;
  320 }
  321 
  322 #ifdef notdef
  323 static u_int32_t
  324 bge_readreg_ind(sc, off)
  325         struct bge_softc *sc;
  326         int off;
  327 {
  328         device_t dev;
  329 
  330         dev = sc->bge_dev;
  331 
  332         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  333         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
  334 }
  335 #endif
  336 
  337 static void
  338 bge_writereg_ind(sc, off, val)
  339         struct bge_softc *sc;
  340         int off, val;
  341 {
  342         device_t dev;
  343 
  344         dev = sc->bge_dev;
  345 
  346         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  347         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
  348 
  349         return;
  350 }
  351 
  352 /*
  353  * Map a single buffer address.
  354  */
  355 
  356 static void
  357 bge_dma_map_addr(arg, segs, nseg, error)
  358         void *arg;
  359         bus_dma_segment_t *segs;
  360         int nseg;
  361         int error;
  362 {
  363         struct bge_dmamap_arg *ctx;
  364 
  365         if (error)
  366                 return;
  367 
  368         ctx = arg;
  369 
  370         if (nseg > ctx->bge_maxsegs) {
  371                 ctx->bge_maxsegs = 0;
  372                 return;
  373         }
  374 
  375         ctx->bge_busaddr = segs->ds_addr;
  376 
  377         return;
  378 }
  379 
  380 /*
  381  * Map an mbuf chain into an TX ring.
  382  */
  383 
  384 static void
  385 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
  386         void *arg;
  387         bus_dma_segment_t *segs;
  388         int nseg;
  389         bus_size_t mapsize;
  390         int error;
  391 {
  392         struct bge_dmamap_arg *ctx;
  393         struct bge_tx_bd *d = NULL;
  394         int i = 0, idx;
  395 
  396         if (error)
  397                 return;
  398 
  399         ctx = arg;
  400 
  401         /* Signal error to caller if there's too many segments */
  402         if (nseg > ctx->bge_maxsegs) {
  403                 ctx->bge_maxsegs = 0;
  404                 return;
  405         }
  406 
  407         idx = ctx->bge_idx;
  408         while(1) {
  409                 d = &ctx->bge_ring[idx];
  410                 d->bge_addr.bge_addr_lo =
  411                     htole32(BGE_ADDR_LO(segs[i].ds_addr));
  412                 d->bge_addr.bge_addr_hi =
  413                     htole32(BGE_ADDR_HI(segs[i].ds_addr));
  414                 d->bge_len = htole16(segs[i].ds_len);
  415                 d->bge_flags = htole16(ctx->bge_flags);
  416                 i++;
  417                 if (i == nseg)
  418                         break;
  419                 BGE_INC(idx, BGE_TX_RING_CNT);
  420         }
  421 
  422         d->bge_flags |= htole16(BGE_TXBDFLAG_END);
  423         ctx->bge_maxsegs = nseg;
  424         ctx->bge_idx = idx;
  425 
  426         return;
  427 }
  428 
  429 
  430 #ifdef notdef
  431 static u_int8_t
  432 bge_vpd_readbyte(sc, addr)
  433         struct bge_softc *sc;
  434         int addr;
  435 {
  436         int i;
  437         device_t dev;
  438         u_int32_t val;
  439 
  440         dev = sc->bge_dev;
  441         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
  442         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
  443                 DELAY(10);
  444                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
  445                         break;
  446         }
  447 
  448         if (i == BGE_TIMEOUT) {
  449                 printf("bge%d: VPD read timed out\n", sc->bge_unit);
  450                 return(0);
  451         }
  452 
  453         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
  454 
  455         return((val >> ((addr % 4) * 8)) & 0xFF);
  456 }
  457 
  458 static void
  459 bge_vpd_read_res(sc, res, addr)
  460         struct bge_softc *sc;
  461         struct vpd_res *res;
  462         int addr;
  463 {
  464         int i;
  465         u_int8_t *ptr;
  466 
  467         ptr = (u_int8_t *)res;
  468         for (i = 0; i < sizeof(struct vpd_res); i++)
  469                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
  470 
  471         return;
  472 }
  473 
  474 static void
  475 bge_vpd_read(sc)
  476         struct bge_softc *sc;
  477 {
  478         int pos = 0, i;
  479         struct vpd_res res;
  480 
  481         if (sc->bge_vpd_prodname != NULL)
  482                 free(sc->bge_vpd_prodname, M_DEVBUF);
  483         if (sc->bge_vpd_readonly != NULL)
  484                 free(sc->bge_vpd_readonly, M_DEVBUF);
  485         sc->bge_vpd_prodname = NULL;
  486         sc->bge_vpd_readonly = NULL;
  487 
  488         bge_vpd_read_res(sc, &res, pos);
  489 
  490         if (res.vr_id != VPD_RES_ID) {
  491                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  492                         sc->bge_unit, VPD_RES_ID, res.vr_id);
  493                 return;
  494         }
  495 
  496         pos += sizeof(res);
  497         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
  498         for (i = 0; i < res.vr_len; i++)
  499                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
  500         sc->bge_vpd_prodname[i] = '\0';
  501         pos += i;
  502 
  503         bge_vpd_read_res(sc, &res, pos);
  504 
  505         if (res.vr_id != VPD_RES_READ) {
  506                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  507                     sc->bge_unit, VPD_RES_READ, res.vr_id);
  508                 return;
  509         }
  510 
  511         pos += sizeof(res);
  512         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
  513         for (i = 0; i < res.vr_len + 1; i++)
  514                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
  515 
  516         return;
  517 }
  518 #endif
  519 
  520 /*
  521  * Read a byte of data stored in the EEPROM at address 'addr.' The
  522  * BCM570x supports both the traditional bitbang interface and an
  523  * auto access interface for reading the EEPROM. We use the auto
  524  * access method.
  525  */
  526 static u_int8_t
  527 bge_eeprom_getbyte(sc, addr, dest)
  528         struct bge_softc *sc;
  529         int addr;
  530         u_int8_t *dest;
  531 {
  532         int i;
  533         u_int32_t byte = 0;
  534 
  535         /*
  536          * Enable use of auto EEPROM access so we can avoid
  537          * having to use the bitbang method.
  538          */
  539         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
  540 
  541         /* Reset the EEPROM, load the clock period. */
  542         CSR_WRITE_4(sc, BGE_EE_ADDR,
  543             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
  544         DELAY(20);
  545 
  546         /* Issue the read EEPROM command. */
  547         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
  548 
  549         /* Wait for completion */
  550         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
  551                 DELAY(10);
  552                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
  553                         break;
  554         }
  555 
  556         if (i == BGE_TIMEOUT) {
  557                 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
  558                 return(0);
  559         }
  560 
  561         /* Get result. */
  562         byte = CSR_READ_4(sc, BGE_EE_DATA);
  563 
  564         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
  565 
  566         return(0);
  567 }
  568 
  569 /*
  570  * Read a sequence of bytes from the EEPROM.
  571  */
  572 static int
  573 bge_read_eeprom(sc, dest, off, cnt)
  574         struct bge_softc *sc;
  575         caddr_t dest;
  576         int off;
  577         int cnt;
  578 {
  579         int err = 0, i;
  580         u_int8_t byte = 0;
  581 
  582         for (i = 0; i < cnt; i++) {
  583                 err = bge_eeprom_getbyte(sc, off + i, &byte);
  584                 if (err)
  585                         break;
  586                 *(dest + i) = byte;
  587         }
  588 
  589         return(err ? 1 : 0);
  590 }
  591 
  592 static int
  593 bge_miibus_readreg(dev, phy, reg)
  594         device_t dev;
  595         int phy, reg;
  596 {
  597         struct bge_softc *sc;
  598         u_int32_t val, autopoll;
  599         int i;
  600 
  601         sc = device_get_softc(dev);
  602 
  603         /*
  604          * Broadcom's own driver always assumes the internal
  605          * PHY is at GMII address 1. On some chips, the PHY responds
  606          * to accesses at all addresses, which could cause us to
  607          * bogusly attach the PHY 32 times at probe type. Always
  608          * restricting the lookup to address 1 is simpler than
  609          * trying to figure out which chips revisions should be
  610          * special-cased.
  611          */
  612         if (phy != 1)
  613                 return(0);
  614 
  615         /* Reading with autopolling on may trigger PCI errors */
  616         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  617         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  618                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  619                 DELAY(40);
  620         }
  621 
  622         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
  623             BGE_MIPHY(phy)|BGE_MIREG(reg));
  624 
  625         for (i = 0; i < BGE_TIMEOUT; i++) {
  626                 val = CSR_READ_4(sc, BGE_MI_COMM);
  627                 if (!(val & BGE_MICOMM_BUSY))
  628                         break;
  629         }
  630 
  631         if (i == BGE_TIMEOUT) {
  632                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  633                 val = 0;
  634                 goto done;
  635         }
  636 
  637         val = CSR_READ_4(sc, BGE_MI_COMM);
  638 
  639 done:
  640         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  641                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  642                 DELAY(40);
  643         }
  644 
  645         if (val & BGE_MICOMM_READFAIL)
  646                 return(0);
  647 
  648         return(val & 0xFFFF);
  649 }
  650 
  651 static int
  652 bge_miibus_writereg(dev, phy, reg, val)
  653         device_t dev;
  654         int phy, reg, val;
  655 {
  656         struct bge_softc *sc;
  657         u_int32_t autopoll;
  658         int i;
  659 
  660         sc = device_get_softc(dev);
  661 
  662         /* Reading with autopolling on may trigger PCI errors */
  663         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  664         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  665                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  666                 DELAY(40);
  667         }
  668 
  669         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
  670             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
  671 
  672         for (i = 0; i < BGE_TIMEOUT; i++) {
  673                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
  674                         break;
  675         }
  676 
  677         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  678                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  679                 DELAY(40);
  680         }
  681 
  682         if (i == BGE_TIMEOUT) {
  683                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  684                 return(0);
  685         }
  686 
  687         return(0);
  688 }
  689 
  690 static void
  691 bge_miibus_statchg(dev)
  692         device_t dev;
  693 {
  694         struct bge_softc *sc;
  695         struct mii_data *mii;
  696 
  697         sc = device_get_softc(dev);
  698         mii = device_get_softc(sc->bge_miibus);
  699 
  700         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
  701         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
  702                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
  703         } else {
  704                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
  705         }
  706 
  707         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  708                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  709         } else {
  710                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  711         }
  712 
  713         return;
  714 }
  715 
  716 /*
  717  * Handle events that have triggered interrupts.
  718  */
  719 static void
  720 bge_handle_events(sc)
  721         struct bge_softc                *sc;
  722 {
  723 
  724         return;
  725 }
  726 
  727 /*
  728  * Memory management for jumbo frames.
  729  */
  730 
  731 static int
  732 bge_alloc_jumbo_mem(sc)
  733         struct bge_softc                *sc;
  734 {
  735         caddr_t                 ptr;
  736         register int            i, error;
  737         struct bge_jpool_entry   *entry;
  738 
  739         /* Create tag for jumbo buffer block */
  740 
  741         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
  742             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
  743             NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
  744             &sc->bge_cdata.bge_jumbo_tag);
  745 
  746         if (error) {
  747                 printf("bge%d: could not allocate jumbo dma tag\n",
  748                     sc->bge_unit);
  749                 return (ENOMEM);
  750         }
  751 
  752         /* Allocate DMA'able memory for jumbo buffer block */
  753 
  754         error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
  755             (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
  756             &sc->bge_cdata.bge_jumbo_map);
  757 
  758         if (error)
  759                 return (ENOMEM);
  760 
  761         SLIST_INIT(&sc->bge_jfree_listhead);
  762         SLIST_INIT(&sc->bge_jinuse_listhead);
  763 
  764         /*
  765          * Now divide it up into 9K pieces and save the addresses
  766          * in an array.
  767          */
  768         ptr = sc->bge_ldata.bge_jumbo_buf;
  769         for (i = 0; i < BGE_JSLOTS; i++) {
  770                 sc->bge_cdata.bge_jslots[i] = ptr;
  771                 ptr += BGE_JLEN;
  772                 entry = malloc(sizeof(struct bge_jpool_entry),
  773                     M_DEVBUF, M_NOWAIT);
  774                 if (entry == NULL) {
  775                         bge_free_jumbo_mem(sc);
  776                         sc->bge_ldata.bge_jumbo_buf = NULL;
  777                         printf("bge%d: no memory for jumbo "
  778                             "buffer queue!\n", sc->bge_unit);
  779                         return(ENOBUFS);
  780                 }
  781                 entry->slot = i;
  782                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
  783                     entry, jpool_entries);
  784         }
  785 
  786         return(0);
  787 }
  788 
  789 static void
  790 bge_free_jumbo_mem(sc)
  791         struct bge_softc *sc;
  792 {
  793         int i;
  794         struct bge_jpool_entry *entry;
  795 
  796         for (i = 0; i < BGE_JSLOTS; i++) {
  797                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  798                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  799                 free(entry, M_DEVBUF);
  800         }
  801 
  802         /* Destroy jumbo buffer block */
  803 
  804         if (sc->bge_ldata.bge_rx_jumbo_ring)
  805                 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
  806                     sc->bge_ldata.bge_jumbo_buf,
  807                     sc->bge_cdata.bge_jumbo_map);
  808 
  809         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
  810                 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
  811                     sc->bge_cdata.bge_jumbo_map);
  812 
  813         if (sc->bge_cdata.bge_jumbo_tag)
  814                 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
  815 
  816         return;
  817 }
  818 
  819 /*
  820  * Allocate a jumbo buffer.
  821  */
  822 static void *
  823 bge_jalloc(sc)
  824         struct bge_softc                *sc;
  825 {
  826         struct bge_jpool_entry   *entry;
  827 
  828         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  829 
  830         if (entry == NULL) {
  831                 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
  832                 return(NULL);
  833         }
  834 
  835         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  836         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
  837         return(sc->bge_cdata.bge_jslots[entry->slot]);
  838 }
  839 
  840 /*
  841  * Release a jumbo buffer.
  842  */
  843 static void
  844 bge_jfree(buf, args)
  845         void *buf;
  846         void *args;
  847 {
  848         struct bge_jpool_entry *entry;
  849         struct bge_softc *sc;
  850         int i;
  851 
  852         /* Extract the softc struct pointer. */
  853         sc = (struct bge_softc *)args;
  854 
  855         if (sc == NULL)
  856                 panic("bge_jfree: can't find softc pointer!");
  857 
  858         /* calculate the slot this buffer belongs to */
  859 
  860         i = ((vm_offset_t)buf
  861              - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
  862 
  863         if ((i < 0) || (i >= BGE_JSLOTS))
  864                 panic("bge_jfree: asked to free buffer that we don't manage!");
  865 
  866         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
  867         if (entry == NULL)
  868                 panic("bge_jfree: buffer not in use!");
  869         entry->slot = i;
  870         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
  871         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
  872 
  873         return;
  874 }
  875 
  876 
  877 /*
  878  * Intialize a standard receive ring descriptor.
  879  */
  880 static int
  881 bge_newbuf_std(sc, i, m)
  882         struct bge_softc        *sc;
  883         int                     i;
  884         struct mbuf             *m;
  885 {
  886         struct mbuf             *m_new = NULL;
  887         struct bge_rx_bd        *r;
  888         struct bge_dmamap_arg   ctx;
  889         int                     error;
  890 
  891         if (m == NULL) {
  892                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  893                 if (m_new == NULL) {
  894                         return(ENOBUFS);
  895                 }
  896 
  897                 MCLGET(m_new, M_DONTWAIT);
  898                 if (!(m_new->m_flags & M_EXT)) {
  899                         m_freem(m_new);
  900                         return(ENOBUFS);
  901                 }
  902                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  903         } else {
  904                 m_new = m;
  905                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  906                 m_new->m_data = m_new->m_ext.ext_buf;
  907         }
  908 
  909         if (!sc->bge_rx_alignment_bug)
  910                 m_adj(m_new, ETHER_ALIGN);
  911         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
  912         r = &sc->bge_ldata.bge_rx_std_ring[i];
  913         ctx.bge_maxsegs = 1;
  914         ctx.sc = sc;
  915         error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
  916             sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
  917             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  918         if (error || ctx.bge_maxsegs == 0) {
  919                 if (m == NULL)
  920                         m_freem(m_new);
  921                 return(ENOMEM);
  922         }
  923         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
  924         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
  925         r->bge_flags = htole16(BGE_RXBDFLAG_END);
  926         r->bge_len = htole16(m_new->m_len);
  927         r->bge_idx = htole16(i);
  928 
  929         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  930             sc->bge_cdata.bge_rx_std_dmamap[i],
  931             BUS_DMASYNC_PREREAD);
  932 
  933         return(0);
  934 }
  935 
  936 /*
  937  * Initialize a jumbo receive ring descriptor. This allocates
  938  * a jumbo buffer from the pool managed internally by the driver.
  939  */
  940 static int
  941 bge_newbuf_jumbo(sc, i, m)
  942         struct bge_softc *sc;
  943         int i;
  944         struct mbuf *m;
  945 {
  946         struct mbuf *m_new = NULL;
  947         struct bge_rx_bd *r;
  948         struct bge_dmamap_arg ctx;
  949         int error;
  950 
  951         if (m == NULL) {
  952                 caddr_t                 *buf = NULL;
  953 
  954                 /* Allocate the mbuf. */
  955                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  956                 if (m_new == NULL) {
  957                         return(ENOBUFS);
  958                 }
  959 
  960                 /* Allocate the jumbo buffer */
  961                 buf = bge_jalloc(sc);
  962                 if (buf == NULL) {
  963                         m_freem(m_new);
  964                         printf("bge%d: jumbo allocation failed "
  965                             "-- packet dropped!\n", sc->bge_unit);
  966                         return(ENOBUFS);
  967                 }
  968 
  969                 /* Attach the buffer to the mbuf. */
  970                 m_new->m_data = (void *) buf;
  971                 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
  972                 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
  973                     (struct bge_softc *)sc, 0, EXT_NET_DRV);
  974         } else {
  975                 m_new = m;
  976                 m_new->m_data = m_new->m_ext.ext_buf;
  977                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
  978         }
  979 
  980         if (!sc->bge_rx_alignment_bug)
  981                 m_adj(m_new, ETHER_ALIGN);
  982         /* Set up the descriptor. */
  983         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
  984         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
  985         ctx.bge_maxsegs = 1;
  986         ctx.sc = sc;
  987         error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
  988             sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
  989             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  990         if (error || ctx.bge_maxsegs == 0) {
  991                 if (m == NULL)
  992                         m_freem(m_new);
  993                 return(ENOMEM);
  994         }
  995         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
  996         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
  997         r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
  998         r->bge_len = htole16(m_new->m_len);
  999         r->bge_idx = htole16(i);
 1000 
 1001         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 1002             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
 1003             BUS_DMASYNC_PREREAD);
 1004 
 1005         return(0);
 1006 }
 1007 
 1008 /*
 1009  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
 1010  * that's 1MB or memory, which is a lot. For now, we fill only the first
 1011  * 256 ring entries and hope that our CPU is fast enough to keep up with
 1012  * the NIC.
 1013  */
 1014 static int
 1015 bge_init_rx_ring_std(sc)
 1016         struct bge_softc *sc;
 1017 {
 1018         int i;
 1019 
 1020         for (i = 0; i < BGE_SSLOTS; i++) {
 1021                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
 1022                         return(ENOBUFS);
 1023         };
 1024 
 1025         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1026             sc->bge_cdata.bge_rx_std_ring_map,
 1027             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1028 
 1029         sc->bge_std = i - 1;
 1030         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 1031 
 1032         return(0);
 1033 }
 1034 
 1035 static void
 1036 bge_free_rx_ring_std(sc)
 1037         struct bge_softc *sc;
 1038 {
 1039         int i;
 1040 
 1041         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1042                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
 1043                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
 1044                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
 1045                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1046                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1047                 }
 1048                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
 1049                     sizeof(struct bge_rx_bd));
 1050         }
 1051 
 1052         return;
 1053 }
 1054 
 1055 static int
 1056 bge_init_rx_ring_jumbo(sc)
 1057         struct bge_softc *sc;
 1058 {
 1059         int i;
 1060         struct bge_rcb *rcb;
 1061 
 1062         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1063                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
 1064                         return(ENOBUFS);
 1065         };
 1066 
 1067         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1068             sc->bge_cdata.bge_rx_jumbo_ring_map,
 1069             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1070 
 1071         sc->bge_jumbo = i - 1;
 1072 
 1073         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1074         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
 1075         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1076 
 1077         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 1078 
 1079         return(0);
 1080 }
 1081 
 1082 static void
 1083 bge_free_rx_ring_jumbo(sc)
 1084         struct bge_softc *sc;
 1085 {
 1086         int i;
 1087 
 1088         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1089                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
 1090                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
 1091                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
 1092                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 1093                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1094                 }
 1095                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
 1096                     sizeof(struct bge_rx_bd));
 1097         }
 1098 
 1099         return;
 1100 }
 1101 
 1102 static void
 1103 bge_free_tx_ring(sc)
 1104         struct bge_softc *sc;
 1105 {
 1106         int i;
 1107 
 1108         if (sc->bge_ldata.bge_tx_ring == NULL)
 1109                 return;
 1110 
 1111         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1112                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
 1113                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
 1114                         sc->bge_cdata.bge_tx_chain[i] = NULL;
 1115                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1116                             sc->bge_cdata.bge_tx_dmamap[i]);
 1117                 }
 1118                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
 1119                     sizeof(struct bge_tx_bd));
 1120         }
 1121 
 1122         return;
 1123 }
 1124 
 1125 static int
 1126 bge_init_tx_ring(sc)
 1127         struct bge_softc *sc;
 1128 {
 1129         sc->bge_txcnt = 0;
 1130         sc->bge_tx_saved_considx = 0;
 1131 
 1132         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1133         /* 5700 b2 errata */
 1134         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1135                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1136 
 1137         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1138         /* 5700 b2 errata */
 1139         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1140                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1141 
 1142         return(0);
 1143 }
 1144 
 1145 static void
 1146 bge_setmulti(sc)
 1147         struct bge_softc *sc;
 1148 {
 1149         struct ifnet *ifp;
 1150         struct ifmultiaddr *ifma;
 1151         u_int32_t hashes[4] = { 0, 0, 0, 0 };
 1152         int h, i;
 1153 
 1154         BGE_LOCK_ASSERT(sc);
 1155 
 1156         ifp = &sc->arpcom.ac_if;
 1157 
 1158         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
 1159                 for (i = 0; i < 4; i++)
 1160                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
 1161                 return;
 1162         }
 1163 
 1164         /* First, zot all the existing filters. */
 1165         for (i = 0; i < 4; i++)
 1166                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
 1167 
 1168         /* Now program new ones. */
 1169         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1170                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1171                         continue;
 1172                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1173                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
 1174                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
 1175         }
 1176 
 1177         for (i = 0; i < 4; i++)
 1178                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
 1179 
 1180         return;
 1181 }
 1182 
 1183 /*
 1184  * Do endian, PCI and DMA initialization. Also check the on-board ROM
 1185  * self-test results.
 1186  */
 1187 static int
 1188 bge_chipinit(sc)
 1189         struct bge_softc *sc;
 1190 {
 1191         int                     i;
 1192         u_int32_t               dma_rw_ctl;
 1193 
 1194         /* Set endianness before we access any non-PCI registers. */
 1195 #if BYTE_ORDER == BIG_ENDIAN
 1196         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1197             BGE_BIGENDIAN_INIT, 4);
 1198 #else
 1199         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1200             BGE_LITTLEENDIAN_INIT, 4);
 1201 #endif
 1202 
 1203         /*
 1204          * Check the 'ROM failed' bit on the RX CPU to see if
 1205          * self-tests passed.
 1206          */
 1207         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
 1208                 printf("bge%d: RX CPU self-diagnostics failed!\n",
 1209                     sc->bge_unit);
 1210                 return(ENODEV);
 1211         }
 1212 
 1213         /* Clear the MAC control register */
 1214         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 1215 
 1216         /*
 1217          * Clear the MAC statistics block in the NIC's
 1218          * internal memory.
 1219          */
 1220         for (i = BGE_STATS_BLOCK;
 1221             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1222                 BGE_MEMWIN_WRITE(sc, i, 0);
 1223 
 1224         for (i = BGE_STATUS_BLOCK;
 1225             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1226                 BGE_MEMWIN_WRITE(sc, i, 0);
 1227 
 1228         /* Set up the PCI DMA control register. */
 1229         if (sc->bge_pcie) {
 1230                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1231                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1232                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1233         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
 1234             BGE_PCISTATE_PCI_BUSMODE) {
 1235                 /* Conventional PCI bus */
 1236                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1237                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1238                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1239                     (0x0F);
 1240         } else {
 1241                 /* PCI-X bus */
 1242                 /*
 1243                  * The 5704 uses a different encoding of read/write
 1244                  * watermarks.
 1245                  */
 1246                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1247                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1248                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1249                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1250                 else
 1251                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1252                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1253                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1254                             (0x0F);
 1255 
 1256                 /*
 1257                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
 1258                  * for hardware bugs.
 1259                  */
 1260                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1261                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 1262                         u_int32_t tmp;
 1263 
 1264                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
 1265                         if (tmp == 0x6 || tmp == 0x7)
 1266                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
 1267                 }
 1268         }
 1269 
 1270         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1271             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
 1272             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1273             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1274                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
 1275         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
 1276 
 1277         /*
 1278          * Set up general mode register.
 1279          */
 1280         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
 1281             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
 1282             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
 1283             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
 1284 
 1285         /*
 1286          * Disable memory write invalidate.  Apparently it is not supported
 1287          * properly by these devices.
 1288          */
 1289         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
 1290 
 1291 #ifdef __brokenalpha__
 1292         /*
 1293          * Must insure that we do not cross an 8K (bytes) boundary
 1294          * for DMA reads.  Our highest limit is 1K bytes.  This is a
 1295          * restriction on some ALPHA platforms with early revision
 1296          * 21174 PCI chipsets, such as the AlphaPC 164lx
 1297          */
 1298         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
 1299             BGE_PCI_READ_BNDRY_1024BYTES, 4);
 1300 #endif
 1301 
 1302         /* Set the timer prescaler (always 66Mhz) */
 1303         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
 1304 
 1305         return(0);
 1306 }
 1307 
 1308 static int
 1309 bge_blockinit(sc)
 1310         struct bge_softc *sc;
 1311 {
 1312         struct bge_rcb *rcb;
 1313         volatile struct bge_rcb *vrcb;
 1314         int i;
 1315 
 1316         /*
 1317          * Initialize the memory window pointer register so that
 1318          * we can access the first 32K of internal NIC RAM. This will
 1319          * allow us to set up the TX send ring RCBs and the RX return
 1320          * ring RCBs, plus other things which live in NIC memory.
 1321          */
 1322         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
 1323 
 1324         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
 1325 
 1326         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1327             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1328                 /* Configure mbuf memory pool */
 1329                 if (sc->bge_extram) {
 1330                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1331                             BGE_EXT_SSRAM);
 1332                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1333                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1334                         else
 1335                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1336                 } else {
 1337                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1338                             BGE_BUFFPOOL_1);
 1339                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1340                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1341                         else
 1342                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1343                 }
 1344 
 1345                 /* Configure DMA resource pool */
 1346                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
 1347                     BGE_DMA_DESCRIPTORS);
 1348                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
 1349         }
 1350 
 1351         /* Configure mbuf pool watermarks */
 1352         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1353             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 1354                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
 1355                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
 1356         } else {
 1357                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
 1358                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
 1359         }
 1360         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
 1361 
 1362         /* Configure DMA resource watermarks */
 1363         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
 1364         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
 1365 
 1366         /* Enable buffer manager */
 1367         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1368             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1369                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
 1370                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
 1371 
 1372                 /* Poll for buffer manager start indication */
 1373                 for (i = 0; i < BGE_TIMEOUT; i++) {
 1374                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
 1375                                 break;
 1376                         DELAY(10);
 1377                 }
 1378 
 1379                 if (i == BGE_TIMEOUT) {
 1380                         printf("bge%d: buffer manager failed to start\n",
 1381                             sc->bge_unit);
 1382                         return(ENXIO);
 1383                 }
 1384         }
 1385 
 1386         /* Enable flow-through queues */
 1387         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 1388         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 1389 
 1390         /* Wait until queue initialization is complete */
 1391         for (i = 0; i < BGE_TIMEOUT; i++) {
 1392                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
 1393                         break;
 1394                 DELAY(10);
 1395         }
 1396 
 1397         if (i == BGE_TIMEOUT) {
 1398                 printf("bge%d: flow-through queue init failed\n",
 1399                     sc->bge_unit);
 1400                 return(ENXIO);
 1401         }
 1402 
 1403         /* Initialize the standard RX ring control block */
 1404         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
 1405         rcb->bge_hostaddr.bge_addr_lo =
 1406             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
 1407         rcb->bge_hostaddr.bge_addr_hi =
 1408             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
 1409         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1410             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
 1411         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1412             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1413                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
 1414         else
 1415                 rcb->bge_maxlen_flags =
 1416                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
 1417         if (sc->bge_extram)
 1418                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
 1419         else
 1420                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
 1421         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
 1422         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
 1423 
 1424         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1425         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
 1426 
 1427         /*
 1428          * Initialize the jumbo RX ring control block
 1429          * We set the 'ring disabled' bit in the flags
 1430          * field until we're actually ready to start
 1431          * using this ring (i.e. once we set the MTU
 1432          * high enough to require it).
 1433          */
 1434         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1435             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1436                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1437 
 1438                 rcb->bge_hostaddr.bge_addr_lo =
 1439                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1440                 rcb->bge_hostaddr.bge_addr_hi =
 1441                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1442                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1443                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 1444                     BUS_DMASYNC_PREREAD);
 1445                 rcb->bge_maxlen_flags =
 1446                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
 1447                     BGE_RCB_FLAG_RING_DISABLED);
 1448                 if (sc->bge_extram)
 1449                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
 1450                 else
 1451                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
 1452                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
 1453                     rcb->bge_hostaddr.bge_addr_hi);
 1454                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
 1455                     rcb->bge_hostaddr.bge_addr_lo);
 1456 
 1457                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
 1458                     rcb->bge_maxlen_flags);
 1459                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
 1460 
 1461                 /* Set up dummy disabled mini ring RCB */
 1462                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
 1463                 rcb->bge_maxlen_flags =
 1464                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1465                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
 1466                     rcb->bge_maxlen_flags);
 1467         }
 1468 
 1469         /*
 1470          * Set the BD ring replentish thresholds. The recommended
 1471          * values are 1/8th the number of descriptors allocated to
 1472          * each ring.
 1473          */
 1474         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
 1475         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
 1476 
 1477         /*
 1478          * Disable all unused send rings by setting the 'ring disabled'
 1479          * bit in the flags field of all the TX send ring control blocks.
 1480          * These are located in NIC memory.
 1481          */
 1482         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1483             BGE_SEND_RING_RCB);
 1484         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
 1485                 vrcb->bge_maxlen_flags =
 1486                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1487                 vrcb->bge_nicaddr = 0;
 1488                 vrcb++;
 1489         }
 1490 
 1491         /* Configure TX RCB 0 (we use only the first ring) */
 1492         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1493             BGE_SEND_RING_RCB);
 1494         vrcb->bge_hostaddr.bge_addr_lo =
 1495             htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
 1496         vrcb->bge_hostaddr.bge_addr_hi =
 1497             htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
 1498         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
 1499         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1500             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1501                 vrcb->bge_maxlen_flags =
 1502                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
 1503 
 1504         /* Disable all unused RX return rings */
 1505         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1506             BGE_RX_RETURN_RING_RCB);
 1507         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
 1508                 vrcb->bge_hostaddr.bge_addr_hi = 0;
 1509                 vrcb->bge_hostaddr.bge_addr_lo = 0;
 1510                 vrcb->bge_maxlen_flags =
 1511                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
 1512                     BGE_RCB_FLAG_RING_DISABLED);
 1513                 vrcb->bge_nicaddr = 0;
 1514                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
 1515                     (i * (sizeof(u_int64_t))), 0);
 1516                 vrcb++;
 1517         }
 1518 
 1519         /* Initialize RX ring indexes */
 1520         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
 1521         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
 1522         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
 1523 
 1524         /*
 1525          * Set up RX return ring 0
 1526          * Note that the NIC address for RX return rings is 0x00000000.
 1527          * The return rings live entirely within the host, so the
 1528          * nicaddr field in the RCB isn't used.
 1529          */
 1530         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1531             BGE_RX_RETURN_RING_RCB);
 1532         vrcb->bge_hostaddr.bge_addr_lo =
 1533             BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
 1534         vrcb->bge_hostaddr.bge_addr_hi =
 1535             BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
 1536         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 1537             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 1538         vrcb->bge_nicaddr = 0x00000000;
 1539         vrcb->bge_maxlen_flags =
 1540             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
 1541 
 1542         /* Set random backoff seed for TX */
 1543         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
 1544             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
 1545             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
 1546             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
 1547             BGE_TX_BACKOFF_SEED_MASK);
 1548 
 1549         /* Set inter-packet gap */
 1550         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
 1551 
 1552         /*
 1553          * Specify which ring to use for packets that don't match
 1554          * any RX rules.
 1555          */
 1556         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
 1557 
 1558         /*
 1559          * Configure number of RX lists. One interrupt distribution
 1560          * list, sixteen active lists, one bad frames class.
 1561          */
 1562         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
 1563 
 1564         /* Inialize RX list placement stats mask. */
 1565         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
 1566         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
 1567 
 1568         /* Disable host coalescing until we get it set up */
 1569         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
 1570 
 1571         /* Poll to make sure it's shut down. */
 1572         for (i = 0; i < BGE_TIMEOUT; i++) {
 1573                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
 1574                         break;
 1575                 DELAY(10);
 1576         }
 1577 
 1578         if (i == BGE_TIMEOUT) {
 1579                 printf("bge%d: host coalescing engine failed to idle\n",
 1580                     sc->bge_unit);
 1581                 return(ENXIO);
 1582         }
 1583 
 1584         /* Set up host coalescing defaults */
 1585         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
 1586         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
 1587         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
 1588         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
 1589         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1590             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1591                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
 1592                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
 1593         }
 1594         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
 1595         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
 1596 
 1597         /* Set up address of statistics block */
 1598         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1599             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1600                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
 1601                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
 1602                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
 1603                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
 1604                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
 1605                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
 1606                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
 1607         }
 1608 
 1609         /* Set up address of status block */
 1610         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
 1611             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
 1612         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
 1613             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
 1614         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 1615             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 1616         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
 1617         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
 1618 
 1619         /* Turn on host coalescing state machine */
 1620         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 1621 
 1622         /* Turn on RX BD completion state machine and enable attentions */
 1623         CSR_WRITE_4(sc, BGE_RBDC_MODE,
 1624             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
 1625 
 1626         /* Turn on RX list placement state machine */
 1627         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 1628 
 1629         /* Turn on RX list selector state machine. */
 1630         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1631             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1632                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 1633 
 1634         /* Turn on DMA, clear stats */
 1635         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
 1636             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
 1637             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
 1638             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
 1639             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
 1640 
 1641         /* Set misc. local control, enable interrupts on attentions */
 1642         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
 1643 
 1644 #ifdef notdef
 1645         /* Assert GPIO pins for PHY reset */
 1646         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
 1647             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
 1648         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
 1649             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
 1650 #endif
 1651 
 1652         /* Turn on DMA completion state machine */
 1653         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1654             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1655                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 1656 
 1657         /* Turn on write DMA state machine */
 1658         CSR_WRITE_4(sc, BGE_WDMA_MODE,
 1659             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
 1660 
 1661         /* Turn on read DMA state machine */
 1662         CSR_WRITE_4(sc, BGE_RDMA_MODE,
 1663             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
 1664 
 1665         /* Turn on RX data completion state machine */
 1666         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 1667 
 1668         /* Turn on RX BD initiator state machine */
 1669         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 1670 
 1671         /* Turn on RX data and RX BD initiator state machine */
 1672         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
 1673 
 1674         /* Turn on Mbuf cluster free state machine */
 1675         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1676             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1677                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 1678 
 1679         /* Turn on send BD completion state machine */
 1680         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 1681 
 1682         /* Turn on send data completion state machine */
 1683         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 1684 
 1685         /* Turn on send data initiator state machine */
 1686         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 1687 
 1688         /* Turn on send BD initiator state machine */
 1689         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 1690 
 1691         /* Turn on send BD selector state machine */
 1692         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 1693 
 1694         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
 1695         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
 1696             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
 1697 
 1698         /* ack/clear link change events */
 1699         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 1700             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 1701             BGE_MACSTAT_LINK_CHANGED);
 1702         CSR_WRITE_4(sc, BGE_MI_STS, 0);
 1703 
 1704         /* Enable PHY auto polling (for MII/GMII only) */
 1705         if (sc->bge_tbi) {
 1706                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
 1707         } else {
 1708                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
 1709                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
 1710                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 1711                             BGE_EVTENB_MI_INTERRUPT);
 1712         }
 1713 
 1714         /* Enable link state change attentions. */
 1715         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
 1716 
 1717         return(0);
 1718 }
 1719 
 1720 /*
 1721  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
 1722  * against our list and return its name if we find a match. Note
 1723  * that since the Broadcom controller contains VPD support, we
 1724  * can get the device name string from the controller itself instead
 1725  * of the compiled-in string. This is a little slow, but it guarantees
 1726  * we'll always announce the right product name.
 1727  */
 1728 static int
 1729 bge_probe(dev)
 1730         device_t dev;
 1731 {
 1732         struct bge_type *t;
 1733         struct bge_softc *sc;
 1734         char *descbuf;
 1735 
 1736         t = bge_devs;
 1737 
 1738         sc = device_get_softc(dev);
 1739         bzero(sc, sizeof(struct bge_softc));
 1740         sc->bge_unit = device_get_unit(dev);
 1741         sc->bge_dev = dev;
 1742 
 1743         while(t->bge_name != NULL) {
 1744                 if ((pci_get_vendor(dev) == t->bge_vid) &&
 1745                     (pci_get_device(dev) == t->bge_did)) {
 1746 #ifdef notdef
 1747                         bge_vpd_read(sc);
 1748                         device_set_desc(dev, sc->bge_vpd_prodname);
 1749 #endif
 1750                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
 1751                         if (descbuf == NULL)
 1752                                 return(ENOMEM);
 1753                         snprintf(descbuf, BGE_DEVDESC_MAX,
 1754                             "%s, ASIC rev. %#04x", t->bge_name,
 1755                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
 1756                         device_set_desc_copy(dev, descbuf);
 1757                         if (pci_get_subvendor(dev) == DELL_VENDORID)
 1758                                 sc->bge_no_3_led = 1;
 1759                         free(descbuf, M_TEMP);
 1760                         return(0);
 1761                 }
 1762                 t++;
 1763         }
 1764 
 1765         return(ENXIO);
 1766 }
 1767 
 1768 static void
 1769 bge_dma_free(sc)
 1770         struct bge_softc *sc;
 1771 {
 1772         int i;
 1773 
 1774 
 1775         /* Destroy DMA maps for RX buffers */
 1776 
 1777         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1778                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
 1779                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1780                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1781         }
 1782 
 1783         /* Destroy DMA maps for jumbo RX buffers */
 1784 
 1785         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1786                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
 1787                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
 1788                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1789         }
 1790 
 1791         /* Destroy DMA maps for TX buffers */
 1792 
 1793         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1794                 if (sc->bge_cdata.bge_tx_dmamap[i])
 1795                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1796                             sc->bge_cdata.bge_tx_dmamap[i]);
 1797         }
 1798 
 1799         if (sc->bge_cdata.bge_mtag)
 1800                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
 1801 
 1802 
 1803         /* Destroy standard RX ring */
 1804 
 1805         if (sc->bge_ldata.bge_rx_std_ring)
 1806                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
 1807                     sc->bge_ldata.bge_rx_std_ring,
 1808                     sc->bge_cdata.bge_rx_std_ring_map);
 1809 
 1810         if (sc->bge_cdata.bge_rx_std_ring_map) {
 1811                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
 1812                     sc->bge_cdata.bge_rx_std_ring_map);
 1813                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
 1814                     sc->bge_cdata.bge_rx_std_ring_map);
 1815         }
 1816 
 1817         if (sc->bge_cdata.bge_rx_std_ring_tag)
 1818                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
 1819 
 1820         /* Destroy jumbo RX ring */
 1821 
 1822         if (sc->bge_ldata.bge_rx_jumbo_ring)
 1823                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1824                     sc->bge_ldata.bge_rx_jumbo_ring,
 1825                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1826 
 1827         if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
 1828                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1829                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1830                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1831                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1832         }
 1833 
 1834         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
 1835                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
 1836 
 1837         /* Destroy RX return ring */
 1838 
 1839         if (sc->bge_ldata.bge_rx_return_ring)
 1840                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
 1841                     sc->bge_ldata.bge_rx_return_ring,
 1842                     sc->bge_cdata.bge_rx_return_ring_map);
 1843 
 1844         if (sc->bge_cdata.bge_rx_return_ring_map) {
 1845                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
 1846                     sc->bge_cdata.bge_rx_return_ring_map);
 1847                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
 1848                     sc->bge_cdata.bge_rx_return_ring_map);
 1849         }
 1850 
 1851         if (sc->bge_cdata.bge_rx_return_ring_tag)
 1852                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
 1853 
 1854         /* Destroy TX ring */
 1855 
 1856         if (sc->bge_ldata.bge_tx_ring)
 1857                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
 1858                     sc->bge_ldata.bge_tx_ring,
 1859                     sc->bge_cdata.bge_tx_ring_map);
 1860 
 1861         if (sc->bge_cdata.bge_tx_ring_map) {
 1862                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
 1863                     sc->bge_cdata.bge_tx_ring_map);
 1864                 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
 1865                     sc->bge_cdata.bge_tx_ring_map);
 1866         }
 1867 
 1868         if (sc->bge_cdata.bge_tx_ring_tag)
 1869                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
 1870 
 1871         /* Destroy status block */
 1872 
 1873         if (sc->bge_ldata.bge_status_block)
 1874                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
 1875                     sc->bge_ldata.bge_status_block,
 1876                     sc->bge_cdata.bge_status_map);
 1877 
 1878         if (sc->bge_cdata.bge_status_map) {
 1879                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
 1880                     sc->bge_cdata.bge_status_map);
 1881                 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
 1882                     sc->bge_cdata.bge_status_map);
 1883         }
 1884 
 1885         if (sc->bge_cdata.bge_status_tag)
 1886                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
 1887 
 1888         /* Destroy statistics block */
 1889 
 1890         if (sc->bge_ldata.bge_stats)
 1891                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
 1892                     sc->bge_ldata.bge_stats,
 1893                     sc->bge_cdata.bge_stats_map);
 1894 
 1895         if (sc->bge_cdata.bge_stats_map) {
 1896                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
 1897                     sc->bge_cdata.bge_stats_map);
 1898                 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
 1899                     sc->bge_cdata.bge_stats_map);
 1900         }
 1901 
 1902         if (sc->bge_cdata.bge_stats_tag)
 1903                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
 1904 
 1905         /* Destroy the parent tag */
 1906 
 1907         if (sc->bge_cdata.bge_parent_tag)
 1908                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
 1909 
 1910         return;
 1911 }
 1912 
 1913 static int
 1914 bge_dma_alloc(dev)
 1915         device_t dev;
 1916 {
 1917         struct bge_softc *sc;
 1918         int nseg, i, error;
 1919         struct bge_dmamap_arg ctx;
 1920 
 1921         sc = device_get_softc(dev);
 1922 
 1923         /*
 1924          * Allocate the parent bus DMA tag appropriate for PCI.
 1925          */
 1926 #define BGE_NSEG_NEW 32
 1927         error = bus_dma_tag_create(NULL,        /* parent */
 1928                         PAGE_SIZE, 0,           /* alignment, boundary */
 1929                         BUS_SPACE_MAXADDR,      /* lowaddr */
 1930                         BUS_SPACE_MAXADDR_32BIT,/* highaddr */
 1931                         NULL, NULL,             /* filter, filterarg */
 1932                         MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
 1933                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
 1934                         0,                      /* flags */
 1935                         NULL, NULL,             /* lockfunc, lockarg */
 1936                         &sc->bge_cdata.bge_parent_tag);
 1937 
 1938         /*
 1939          * Create tag for RX mbufs.
 1940          */
 1941         nseg = 32;
 1942         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
 1943             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1944             NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL,
 1945             &sc->bge_cdata.bge_mtag);
 1946 
 1947         if (error) {
 1948                 device_printf(dev, "could not allocate dma tag\n");
 1949                 return (ENOMEM);
 1950         }
 1951 
 1952         /* Create DMA maps for RX buffers */
 1953 
 1954         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1955                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1956                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
 1957                 if (error) {
 1958                         device_printf(dev, "can't create DMA map for RX\n");
 1959                         return(ENOMEM);
 1960                 }
 1961         }
 1962 
 1963         /* Create DMA maps for TX buffers */
 1964 
 1965         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1966                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1967                             &sc->bge_cdata.bge_tx_dmamap[i]);
 1968                 if (error) {
 1969                         device_printf(dev, "can't create DMA map for RX\n");
 1970                         return(ENOMEM);
 1971                 }
 1972         }
 1973 
 1974         /* Create tag for standard RX ring */
 1975 
 1976         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1977             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1978             NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
 1979             NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
 1980 
 1981         if (error) {
 1982                 device_printf(dev, "could not allocate dma tag\n");
 1983                 return (ENOMEM);
 1984         }
 1985 
 1986         /* Allocate DMA'able memory for standard RX ring */
 1987 
 1988         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
 1989             (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
 1990             &sc->bge_cdata.bge_rx_std_ring_map);
 1991         if (error)
 1992                 return (ENOMEM);
 1993 
 1994         bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
 1995 
 1996         /* Load the address of the standard RX ring */
 1997 
 1998         ctx.bge_maxsegs = 1;
 1999         ctx.sc = sc;
 2000 
 2001         error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
 2002             sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
 2003             BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2004 
 2005         if (error)
 2006                 return (ENOMEM);
 2007 
 2008         sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
 2009 
 2010         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2011             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2012 
 2013                 /*
 2014                  * Create tag for jumbo mbufs.
 2015                  * This is really a bit of a kludge. We allocate a special
 2016                  * jumbo buffer pool which (thanks to the way our DMA
 2017                  * memory allocation works) will consist of contiguous
 2018                  * pages. This means that even though a jumbo buffer might
 2019                  * be larger than a page size, we don't really need to
 2020                  * map it into more than one DMA segment. However, the
 2021                  * default mbuf tag will result in multi-segment mappings,
 2022                  * so we have to create a special jumbo mbuf tag that
 2023                  * lets us get away with mapping the jumbo buffers as
 2024                  * a single segment. I think eventually the driver should
 2025                  * be changed so that it uses ordinary mbufs and cluster
 2026                  * buffers, i.e. jumbo frames can span multiple DMA
 2027                  * descriptors. But that's a project for another day.
 2028                  */
 2029 
 2030                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2031                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2032                     NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
 2033                     &sc->bge_cdata.bge_mtag_jumbo);
 2034 
 2035                 if (error) {
 2036                         device_printf(dev, "could not allocate dma tag\n");
 2037                         return (ENOMEM);
 2038                 }
 2039 
 2040                 /* Create tag for jumbo RX ring */
 2041 
 2042                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2043                     PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2044                     NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
 2045                     NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
 2046 
 2047                 if (error) {
 2048                         device_printf(dev, "could not allocate dma tag\n");
 2049                         return (ENOMEM);
 2050                 }
 2051 
 2052                 /* Allocate DMA'able memory for jumbo RX ring */
 2053 
 2054                 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2055                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
 2056                     &sc->bge_cdata.bge_rx_jumbo_ring_map);
 2057                 if (error)
 2058                         return (ENOMEM);
 2059 
 2060                 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
 2061                     BGE_JUMBO_RX_RING_SZ);
 2062 
 2063                 /* Load the address of the jumbo RX ring */
 2064 
 2065                 ctx.bge_maxsegs = 1;
 2066                 ctx.sc = sc;
 2067 
 2068                 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2069                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2070                     sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
 2071                     bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2072 
 2073                 if (error)
 2074                         return (ENOMEM);
 2075 
 2076                 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
 2077 
 2078                 /* Create DMA maps for jumbo RX buffers */
 2079 
 2080                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 2081                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
 2082                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 2083                         if (error) {
 2084                                 device_printf(dev,
 2085                                     "can't create DMA map for RX\n");
 2086                                 return(ENOMEM);
 2087                         }
 2088                 }
 2089 
 2090         }
 2091 
 2092         /* Create tag for RX return ring */
 2093 
 2094         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2095             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2096             NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
 2097             NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
 2098 
 2099         if (error) {
 2100                 device_printf(dev, "could not allocate dma tag\n");
 2101                 return (ENOMEM);
 2102         }
 2103 
 2104         /* Allocate DMA'able memory for RX return ring */
 2105 
 2106         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
 2107             (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
 2108             &sc->bge_cdata.bge_rx_return_ring_map);
 2109         if (error)
 2110                 return (ENOMEM);
 2111 
 2112         bzero((char *)sc->bge_ldata.bge_rx_return_ring,
 2113             BGE_RX_RTN_RING_SZ(sc));
 2114 
 2115         /* Load the address of the RX return ring */
 2116 
 2117         ctx.bge_maxsegs = 1;
 2118         ctx.sc = sc;
 2119 
 2120         error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
 2121             sc->bge_cdata.bge_rx_return_ring_map,
 2122             sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
 2123             bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2124 
 2125         if (error)
 2126                 return (ENOMEM);
 2127 
 2128         sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
 2129 
 2130         /* Create tag for TX ring */
 2131 
 2132         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2133             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2134             NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
 2135             &sc->bge_cdata.bge_tx_ring_tag);
 2136 
 2137         if (error) {
 2138                 device_printf(dev, "could not allocate dma tag\n");
 2139                 return (ENOMEM);
 2140         }
 2141 
 2142         /* Allocate DMA'able memory for TX ring */
 2143 
 2144         error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
 2145             (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
 2146             &sc->bge_cdata.bge_tx_ring_map);
 2147         if (error)
 2148                 return (ENOMEM);
 2149 
 2150         bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
 2151 
 2152         /* Load the address of the TX ring */
 2153 
 2154         ctx.bge_maxsegs = 1;
 2155         ctx.sc = sc;
 2156 
 2157         error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
 2158             sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
 2159             BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2160 
 2161         if (error)
 2162                 return (ENOMEM);
 2163 
 2164         sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
 2165 
 2166         /* Create tag for status block */
 2167 
 2168         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2169             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2170             NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
 2171             NULL, NULL, &sc->bge_cdata.bge_status_tag);
 2172 
 2173         if (error) {
 2174                 device_printf(dev, "could not allocate dma tag\n");
 2175                 return (ENOMEM);
 2176         }
 2177 
 2178         /* Allocate DMA'able memory for status block */
 2179 
 2180         error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
 2181             (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
 2182             &sc->bge_cdata.bge_status_map);
 2183         if (error)
 2184                 return (ENOMEM);
 2185 
 2186         bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
 2187 
 2188         /* Load the address of the status block */
 2189 
 2190         ctx.sc = sc;
 2191         ctx.bge_maxsegs = 1;
 2192 
 2193         error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
 2194             sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
 2195             BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2196 
 2197         if (error)
 2198                 return (ENOMEM);
 2199 
 2200         sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
 2201 
 2202         /* Create tag for statistics block */
 2203 
 2204         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2205             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2206             NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
 2207             &sc->bge_cdata.bge_stats_tag);
 2208 
 2209         if (error) {
 2210                 device_printf(dev, "could not allocate dma tag\n");
 2211                 return (ENOMEM);
 2212         }
 2213 
 2214         /* Allocate DMA'able memory for statistics block */
 2215 
 2216         error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
 2217             (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
 2218             &sc->bge_cdata.bge_stats_map);
 2219         if (error)
 2220                 return (ENOMEM);
 2221 
 2222         bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
 2223 
 2224         /* Load the address of the statstics block */
 2225 
 2226         ctx.sc = sc;
 2227         ctx.bge_maxsegs = 1;
 2228 
 2229         error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
 2230             sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
 2231             BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2232 
 2233         if (error)
 2234                 return (ENOMEM);
 2235 
 2236         sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
 2237 
 2238         return(0);
 2239 }
 2240 
 2241 static int
 2242 bge_attach(dev)
 2243         device_t dev;
 2244 {
 2245         struct ifnet *ifp;
 2246         struct bge_softc *sc;
 2247         u_int32_t hwcfg = 0;
 2248         u_int32_t mac_addr = 0;
 2249         int unit, error = 0, rid;
 2250 
 2251         sc = device_get_softc(dev);
 2252         unit = device_get_unit(dev);
 2253         sc->bge_dev = dev;
 2254         sc->bge_unit = unit;
 2255 
 2256         /*
 2257          * Map control/status registers.
 2258          */
 2259         pci_enable_busmaster(dev);
 2260 
 2261         rid = BGE_PCI_BAR0;
 2262         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 2263             RF_ACTIVE|PCI_RF_DENSE);
 2264 
 2265         if (sc->bge_res == NULL) {
 2266                 printf ("bge%d: couldn't map memory\n", unit);
 2267                 error = ENXIO;
 2268                 goto fail;
 2269         }
 2270 
 2271         sc->bge_btag = rman_get_bustag(sc->bge_res);
 2272         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
 2273         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
 2274 
 2275         /* Allocate interrupt */
 2276         rid = 0;
 2277 
 2278         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 2279             RF_SHAREABLE | RF_ACTIVE);
 2280 
 2281         if (sc->bge_irq == NULL) {
 2282                 printf("bge%d: couldn't map interrupt\n", unit);
 2283                 error = ENXIO;
 2284                 goto fail;
 2285         }
 2286 
 2287         sc->bge_unit = unit;
 2288 
 2289         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
 2290 
 2291         /* Save ASIC rev. */
 2292 
 2293         sc->bge_chipid =
 2294             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
 2295             BGE_PCIMISCCTL_ASICREV;
 2296         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
 2297         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
 2298 
 2299         /*
 2300          * XXX: Broadcom Linux driver.  Not in specs or eratta.
 2301          * PCI-Express?
 2302          */
 2303         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 2304                 u_int32_t v;
 2305 
 2306                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
 2307                 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
 2308                         v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
 2309                         if ((v & 0xff) == BGE_PCIE_CAPID)
 2310                                 sc->bge_pcie = 1;
 2311                 }
 2312         }
 2313 
 2314         /* Try to reset the chip. */
 2315         bge_reset(sc);
 2316 
 2317         if (bge_chipinit(sc)) {
 2318                 printf("bge%d: chip initialization failed\n", sc->bge_unit);
 2319                 bge_release_resources(sc);
 2320                 error = ENXIO;
 2321                 goto fail;
 2322         }
 2323 
 2324         /*
 2325          * Get station address from the EEPROM.
 2326          */
 2327         mac_addr = bge_readmem_ind(sc, 0x0c14);
 2328         if ((mac_addr >> 16) == 0x484b) {
 2329                 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
 2330                 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
 2331                 mac_addr = bge_readmem_ind(sc, 0x0c18);
 2332                 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
 2333                 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
 2334                 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
 2335                 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
 2336         } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
 2337             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
 2338                 printf("bge%d: failed to read station address\n", unit);
 2339                 bge_release_resources(sc);
 2340                 error = ENXIO;
 2341                 goto fail;
 2342         }
 2343 
 2344         /* 5705 limits RX return ring to 512 entries. */
 2345         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 2346             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 2347                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
 2348         else
 2349                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
 2350 
 2351         if (bge_dma_alloc(dev)) {
 2352                 printf ("bge%d: failed to allocate DMA resources\n",
 2353                     sc->bge_unit);
 2354                 bge_release_resources(sc);
 2355                 error = ENXIO;
 2356                 goto fail;
 2357         }
 2358 
 2359         /*
 2360          * Try to allocate memory for jumbo buffers.
 2361          * The 5705 does not appear to support jumbo frames.
 2362          */
 2363         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2364             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2365                 if (bge_alloc_jumbo_mem(sc)) {
 2366                         printf("bge%d: jumbo buffer allocation "
 2367                             "failed\n", sc->bge_unit);
 2368                         bge_release_resources(sc);
 2369                         error = ENXIO;
 2370                         goto fail;
 2371                 }
 2372         }
 2373 
 2374         /* Set default tuneable values. */
 2375         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
 2376         sc->bge_rx_coal_ticks = 150;
 2377         sc->bge_tx_coal_ticks = 150;
 2378         sc->bge_rx_max_coal_bds = 64;
 2379         sc->bge_tx_max_coal_bds = 128;
 2380 
 2381         /* Set up ifnet structure */
 2382         ifp = &sc->arpcom.ac_if;
 2383         ifp->if_softc = sc;
 2384         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2385         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2386         ifp->if_ioctl = bge_ioctl;
 2387         ifp->if_start = bge_start;
 2388         ifp->if_watchdog = bge_watchdog;
 2389         ifp->if_init = bge_init;
 2390         ifp->if_mtu = ETHERMTU;
 2391         ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
 2392         ifp->if_hwassist = BGE_CSUM_FEATURES;
 2393         /* NB: the code for RX csum offload is disabled for now */
 2394         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
 2395             IFCAP_VLAN_MTU;
 2396         ifp->if_capenable = ifp->if_capabilities;
 2397 
 2398         /*
 2399          * Figure out what sort of media we have by checking the
 2400          * hardware config word in the first 32k of NIC internal memory,
 2401          * or fall back to examining the EEPROM if necessary.
 2402          * Note: on some BCM5700 cards, this value appears to be unset.
 2403          * If that's the case, we have to rely on identifying the NIC
 2404          * by its PCI subsystem ID, as we do below for the SysKonnect
 2405          * SK-9D41.
 2406          */
 2407         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
 2408                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
 2409         else {
 2410                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
 2411                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
 2412                 hwcfg = ntohl(hwcfg);
 2413         }
 2414 
 2415         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
 2416                 sc->bge_tbi = 1;
 2417 
 2418         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
 2419         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
 2420                 sc->bge_tbi = 1;
 2421 
 2422         if (sc->bge_tbi) {
 2423                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
 2424                     bge_ifmedia_upd, bge_ifmedia_sts);
 2425                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
 2426                 ifmedia_add(&sc->bge_ifmedia,
 2427                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
 2428                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
 2429                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
 2430                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
 2431         } else {
 2432                 /*
 2433                  * Do transceiver setup.
 2434                  */
 2435                 if (mii_phy_probe(dev, &sc->bge_miibus,
 2436                     bge_ifmedia_upd, bge_ifmedia_sts)) {
 2437                         printf("bge%d: MII without any PHY!\n", sc->bge_unit);
 2438                         bge_release_resources(sc);
 2439                         bge_free_jumbo_mem(sc);
 2440                         error = ENXIO;
 2441                         goto fail;
 2442                 }
 2443         }
 2444 
 2445         /*
 2446          * When using the BCM5701 in PCI-X mode, data corruption has
 2447          * been observed in the first few bytes of some received packets.
 2448          * Aligning the packet buffer in memory eliminates the corruption.
 2449          * Unfortunately, this misaligns the packet payloads.  On platforms
 2450          * which do not support unaligned accesses, we will realign the
 2451          * payloads by copying the received packets.
 2452          */
 2453         switch (sc->bge_chipid) {
 2454         case BGE_CHIPID_BCM5701_A0:
 2455         case BGE_CHIPID_BCM5701_B0:
 2456         case BGE_CHIPID_BCM5701_B2:
 2457         case BGE_CHIPID_BCM5701_B5:
 2458                 /* If in PCI-X mode, work around the alignment bug. */
 2459                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
 2460                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
 2461                     BGE_PCISTATE_PCI_BUSSPEED)
 2462                         sc->bge_rx_alignment_bug = 1;
 2463                 break;
 2464         }
 2465 
 2466         /*
 2467          * Call MI attach routine.
 2468          */
 2469         ether_ifattach(ifp, sc->arpcom.ac_enaddr);
 2470         callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
 2471 
 2472         /*
 2473          * Hookup IRQ last.
 2474          */
 2475         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
 2476            bge_intr, sc, &sc->bge_intrhand);
 2477 
 2478         if (error) {
 2479                 bge_release_resources(sc);
 2480                 printf("bge%d: couldn't set up irq\n", unit);
 2481         }
 2482 
 2483 fail:
 2484         return(error);
 2485 }
 2486 
 2487 static int
 2488 bge_detach(dev)
 2489         device_t dev;
 2490 {
 2491         struct bge_softc *sc;
 2492         struct ifnet *ifp;
 2493 
 2494         sc = device_get_softc(dev);
 2495         ifp = &sc->arpcom.ac_if;
 2496 
 2497         BGE_LOCK(sc);
 2498         bge_stop(sc);
 2499         bge_reset(sc);
 2500         BGE_UNLOCK(sc);
 2501 
 2502         ether_ifdetach(ifp);
 2503 
 2504         if (sc->bge_tbi) {
 2505                 ifmedia_removeall(&sc->bge_ifmedia);
 2506         } else {
 2507                 bus_generic_detach(dev);
 2508                 device_delete_child(dev, sc->bge_miibus);
 2509         }
 2510 
 2511         bge_release_resources(sc);
 2512         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2513             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 2514                 bge_free_jumbo_mem(sc);
 2515 
 2516         return(0);
 2517 }
 2518 
 2519 static void
 2520 bge_release_resources(sc)
 2521         struct bge_softc *sc;
 2522 {
 2523         device_t dev;
 2524 
 2525         dev = sc->bge_dev;
 2526 
 2527         if (sc->bge_vpd_prodname != NULL)
 2528                 free(sc->bge_vpd_prodname, M_DEVBUF);
 2529 
 2530         if (sc->bge_vpd_readonly != NULL)
 2531                 free(sc->bge_vpd_readonly, M_DEVBUF);
 2532 
 2533         if (sc->bge_intrhand != NULL)
 2534                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
 2535 
 2536         if (sc->bge_irq != NULL)
 2537                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
 2538 
 2539         if (sc->bge_res != NULL)
 2540                 bus_release_resource(dev, SYS_RES_MEMORY,
 2541                     BGE_PCI_BAR0, sc->bge_res);
 2542 
 2543         bge_dma_free(sc);
 2544 
 2545         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
 2546                 BGE_LOCK_DESTROY(sc);
 2547 
 2548         return;
 2549 }
 2550 
 2551 static void
 2552 bge_reset(sc)
 2553         struct bge_softc *sc;
 2554 {
 2555         device_t dev;
 2556         u_int32_t cachesize, command, pcistate, reset;
 2557         int i, val = 0;
 2558 
 2559         dev = sc->bge_dev;
 2560 
 2561         /* Save some important PCI state. */
 2562         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
 2563         command = pci_read_config(dev, BGE_PCI_CMD, 4);
 2564         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
 2565 
 2566         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2567             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2568             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2569 
 2570         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
 2571 
 2572         /* XXX: Broadcom Linux driver. */
 2573         if (sc->bge_pcie) {
 2574                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
 2575                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
 2576                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2577                         /* Prevent PCIE link training during global reset */
 2578                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
 2579                         reset |= (1<<29);
 2580                 }
 2581         }
 2582 
 2583         /* Issue global reset */
 2584         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
 2585 
 2586         DELAY(1000);
 2587 
 2588         /* XXX: Broadcom Linux driver. */
 2589         if (sc->bge_pcie) {
 2590                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
 2591                         uint32_t v;
 2592 
 2593                         DELAY(500000); /* wait for link training to complete */
 2594                         v = pci_read_config(dev, 0xc4, 4);
 2595                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
 2596                 }
 2597                 /* Set PCIE max payload size and clear error status. */
 2598                 pci_write_config(dev, 0xd8, 0xf5000, 4);
 2599         }
 2600 
 2601         /* Reset some of the PCI state that got zapped by reset */
 2602         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2603             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2604             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2605         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
 2606         pci_write_config(dev, BGE_PCI_CMD, command, 4);
 2607         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
 2608 
 2609         /* Enable memory arbiter. */
 2610         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2611             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 2612                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 2613 
 2614         /*
 2615          * Prevent PXE restart: write a magic number to the
 2616          * general communications memory at 0xB50.
 2617          */
 2618         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
 2619         /*
 2620          * Poll the value location we just wrote until
 2621          * we see the 1's complement of the magic number.
 2622          * This indicates that the firmware initialization
 2623          * is complete.
 2624          */
 2625         for (i = 0; i < BGE_TIMEOUT; i++) {
 2626                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
 2627                 if (val == ~BGE_MAGIC_NUMBER)
 2628                         break;
 2629                 DELAY(10);
 2630         }
 2631 
 2632         if (i == BGE_TIMEOUT) {
 2633                 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
 2634                 return;
 2635         }
 2636 
 2637         /*
 2638          * XXX Wait for the value of the PCISTATE register to
 2639          * return to its original pre-reset state. This is a
 2640          * fairly good indicator of reset completion. If we don't
 2641          * wait for the reset to fully complete, trying to read
 2642          * from the device's non-PCI registers may yield garbage
 2643          * results.
 2644          */
 2645         for (i = 0; i < BGE_TIMEOUT; i++) {
 2646                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
 2647                         break;
 2648                 DELAY(10);
 2649         }
 2650 
 2651         /* Fix up byte swapping */
 2652         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
 2653             BGE_MODECTL_BYTESWAP_DATA);
 2654 
 2655         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 2656 
 2657         /*
 2658          * The 5704 in TBI mode apparently needs some special
 2659          * adjustment to insure the SERDES drive level is set
 2660          * to 1.2V.
 2661          */
 2662         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
 2663                 uint32_t serdescfg;
 2664                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
 2665                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
 2666                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
 2667         }
 2668 
 2669         /* XXX: Broadcom Linux driver. */
 2670         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2671                 uint32_t v;
 2672 
 2673                 v = CSR_READ_4(sc, 0x7c00);
 2674                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
 2675         }
 2676         DELAY(10000);
 2677 
 2678         return;
 2679 }
 2680 
 2681 /*
 2682  * Frame reception handling. This is called if there's a frame
 2683  * on the receive return list.
 2684  *
 2685  * Note: we have to be able to handle two possibilities here:
 2686  * 1) the frame is from the jumbo recieve ring
 2687  * 2) the frame is from the standard receive ring
 2688  */
 2689 
 2690 static void
 2691 bge_rxeof(sc)
 2692         struct bge_softc *sc;
 2693 {
 2694         struct ifnet *ifp;
 2695         int stdcnt = 0, jumbocnt = 0;
 2696 
 2697         BGE_LOCK_ASSERT(sc);
 2698 
 2699         ifp = &sc->arpcom.ac_if;
 2700 
 2701         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2702             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
 2703         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2704             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
 2705         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2706             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2707                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2708                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2709                     BUS_DMASYNC_POSTREAD);
 2710         }
 2711 
 2712         while(sc->bge_rx_saved_considx !=
 2713             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
 2714                 struct bge_rx_bd        *cur_rx;
 2715                 u_int32_t               rxidx;
 2716                 struct ether_header     *eh;
 2717                 struct mbuf             *m = NULL;
 2718                 u_int16_t               vlan_tag = 0;
 2719                 int                     have_tag = 0;
 2720 
 2721                 cur_rx =
 2722             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
 2723 
 2724                 rxidx = cur_rx->bge_idx;
 2725                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
 2726 
 2727                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
 2728                         have_tag = 1;
 2729                         vlan_tag = cur_rx->bge_vlan_tag;
 2730                 }
 2731 
 2732                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
 2733                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
 2734                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
 2735                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
 2736                             BUS_DMASYNC_POSTREAD);
 2737                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 2738                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
 2739                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
 2740                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
 2741                         jumbocnt++;
 2742                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2743                                 ifp->if_ierrors++;
 2744                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2745                                 continue;
 2746                         }
 2747                         if (bge_newbuf_jumbo(sc,
 2748                             sc->bge_jumbo, NULL) == ENOBUFS) {
 2749                                 ifp->if_ierrors++;
 2750                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2751                                 continue;
 2752                         }
 2753                 } else {
 2754                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
 2755                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 2756                             sc->bge_cdata.bge_rx_std_dmamap[rxidx],
 2757                             BUS_DMASYNC_POSTREAD);
 2758                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2759                             sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
 2760                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
 2761                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
 2762                         stdcnt++;
 2763                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2764                                 ifp->if_ierrors++;
 2765                                 bge_newbuf_std(sc, sc->bge_std, m);
 2766                                 continue;
 2767                         }
 2768                         if (bge_newbuf_std(sc, sc->bge_std,
 2769                             NULL) == ENOBUFS) {
 2770                                 ifp->if_ierrors++;
 2771                                 bge_newbuf_std(sc, sc->bge_std, m);
 2772                                 continue;
 2773                         }
 2774                 }
 2775 
 2776                 ifp->if_ipackets++;
 2777 #ifndef __i386__
 2778                 /*
 2779                  * The i386 allows unaligned accesses, but for other
 2780                  * platforms we must make sure the payload is aligned.
 2781                  */
 2782                 if (sc->bge_rx_alignment_bug) {
 2783                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
 2784                             cur_rx->bge_len);
 2785                         m->m_data += ETHER_ALIGN;
 2786                 }
 2787 #endif
 2788                 eh = mtod(m, struct ether_header *);
 2789                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
 2790                 m->m_pkthdr.rcvif = ifp;
 2791 
 2792 #if 0 /* currently broken for some packets, possibly related to TCP options */
 2793                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2794                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2795                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
 2796                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2797                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
 2798                                 m->m_pkthdr.csum_data =
 2799                                     cur_rx->bge_tcp_udp_csum;
 2800                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 2801                         }
 2802                 }
 2803 #endif
 2804 
 2805                 /*
 2806                  * If we received a packet with a vlan tag,
 2807                  * attach that information to the packet.
 2808                  */
 2809                 if (have_tag)
 2810                         VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
 2811 
 2812                 BGE_UNLOCK(sc);
 2813                 (*ifp->if_input)(ifp, m);
 2814                 BGE_LOCK(sc);
 2815         }
 2816 
 2817         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2818             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 2819         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2820             sc->bge_cdata.bge_rx_std_ring_map,
 2821             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
 2822         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2823             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2824                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2825                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2826                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 2827         }
 2828 
 2829         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
 2830         if (stdcnt)
 2831                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 2832         if (jumbocnt)
 2833                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 2834 
 2835         return;
 2836 }
 2837 
 2838 static void
 2839 bge_txeof(sc)
 2840         struct bge_softc *sc;
 2841 {
 2842         struct bge_tx_bd *cur_tx = NULL;
 2843         struct ifnet *ifp;
 2844 
 2845         BGE_LOCK_ASSERT(sc);
 2846 
 2847         ifp = &sc->arpcom.ac_if;
 2848 
 2849         /*
 2850          * Go through our tx ring and free mbufs for those
 2851          * frames that have been sent.
 2852          */
 2853         while (sc->bge_tx_saved_considx !=
 2854             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
 2855                 u_int32_t               idx = 0;
 2856 
 2857                 idx = sc->bge_tx_saved_considx;
 2858                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
 2859                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
 2860                         ifp->if_opackets++;
 2861                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
 2862                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
 2863                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
 2864                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2865                             sc->bge_cdata.bge_tx_dmamap[idx]);
 2866                 }
 2867                 sc->bge_txcnt--;
 2868                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
 2869                 ifp->if_timer = 0;
 2870         }
 2871 
 2872         if (cur_tx != NULL)
 2873                 ifp->if_flags &= ~IFF_OACTIVE;
 2874 
 2875         return;
 2876 }
 2877 
 2878 static void
 2879 bge_intr(xsc)
 2880         void *xsc;
 2881 {
 2882         struct bge_softc *sc;
 2883         struct ifnet *ifp;
 2884         u_int32_t statusword;
 2885         u_int32_t status, mimode;
 2886 
 2887         sc = xsc;
 2888         ifp = &sc->arpcom.ac_if;
 2889 
 2890         BGE_LOCK(sc);
 2891 
 2892         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2893             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
 2894 
 2895         statusword =
 2896             atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
 2897 
 2898 #ifdef notdef
 2899         /* Avoid this for now -- checking this register is expensive. */
 2900         /* Make sure this is really our interrupt. */
 2901         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
 2902                 return;
 2903 #endif
 2904         /* Ack interrupt and stop others from occuring. */
 2905         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 2906 
 2907         /*
 2908          * Process link state changes.
 2909          * Grrr. The link status word in the status block does
 2910          * not work correctly on the BCM5700 rev AX and BX chips,
 2911          * according to all available information. Hence, we have
 2912          * to enable MII interrupts in order to properly obtain
 2913          * async link changes. Unfortunately, this also means that
 2914          * we have to read the MAC status register to detect link
 2915          * changes, thereby adding an additional register access to
 2916          * the interrupt handler.
 2917          */
 2918 
 2919         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
 2920 
 2921                 status = CSR_READ_4(sc, BGE_MAC_STS);
 2922                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
 2923                         sc->bge_link = 0;
 2924                         callout_stop(&sc->bge_stat_ch);
 2925                         bge_tick_locked(sc);
 2926                         /* Clear the interrupt */
 2927                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 2928                             BGE_EVTENB_MI_INTERRUPT);
 2929                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
 2930                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
 2931                             BRGPHY_INTRS);
 2932                 }
 2933         } else {
 2934                 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
 2935                         /*
 2936                          * Sometimes PCS encoding errors are detected in
 2937                          * TBI mode (on fiber NICs), and for some reason
 2938                          * the chip will signal them as link changes.
 2939                          * If we get a link change event, but the 'PCS
 2940                          * encoding error' bit in the MAC status register
 2941                          * is set, don't bother doing a link check.
 2942                          * This avoids spurious "gigabit link up" messages
 2943                          * that sometimes appear on fiber NICs during
 2944                          * periods of heavy traffic. (There should be no
 2945                          * effect on copper NICs.)
 2946                          *
 2947                          * If we do have a copper NIC (bge_tbi == 0) then
 2948                          * check that the AUTOPOLL bit is set before
 2949                          * processing the event as a real link change.
 2950                          * Turning AUTOPOLL on and off in the MII read/write
 2951                          * functions will often trigger a link status
 2952                          * interrupt for no reason.
 2953                          */
 2954                         status = CSR_READ_4(sc, BGE_MAC_STS);
 2955                         mimode = CSR_READ_4(sc, BGE_MI_MODE);
 2956                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
 2957                             BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
 2958                             (mimode & BGE_MIMODE_AUTOPOLL))) {
 2959                                 sc->bge_link = 0;
 2960                                 callout_stop(&sc->bge_stat_ch);
 2961                                 bge_tick_locked(sc);
 2962                         }
 2963                         /* Clear the interrupt */
 2964                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 2965                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 2966                             BGE_MACSTAT_LINK_CHANGED);
 2967 
 2968                         /* Force flush the status block cached by PCI bridge */
 2969                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
 2970                 }
 2971         }
 2972 
 2973         if (ifp->if_flags & IFF_RUNNING) {
 2974                 /* Check RX return ring producer/consumer */
 2975                 bge_rxeof(sc);
 2976 
 2977                 /* Check TX ring producer/consumer */
 2978                 bge_txeof(sc);
 2979         }
 2980 
 2981         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2982             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 2983 
 2984         bge_handle_events(sc);
 2985 
 2986         /* Re-enable interrupts. */
 2987         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 2988 
 2989         if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
 2990                 bge_start_locked(ifp);
 2991 
 2992         BGE_UNLOCK(sc);
 2993 
 2994         return;
 2995 }
 2996 
 2997 static void
 2998 bge_tick_locked(sc)
 2999         struct bge_softc *sc;
 3000 {
 3001         struct mii_data *mii = NULL;
 3002         struct ifmedia *ifm = NULL;
 3003         struct ifnet *ifp;
 3004 
 3005         ifp = &sc->arpcom.ac_if;
 3006 
 3007         BGE_LOCK_ASSERT(sc);
 3008 
 3009         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 3010             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 3011                 bge_stats_update_regs(sc);
 3012         else
 3013                 bge_stats_update(sc);
 3014         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3015         if (sc->bge_link)
 3016                 return;
 3017 
 3018         if (sc->bge_tbi) {
 3019                 ifm = &sc->bge_ifmedia;
 3020                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3021                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
 3022                         sc->bge_link++;
 3023                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 3024                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3025                                     BGE_MACMODE_TBI_SEND_CFGS);
 3026                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
 3027                         if (bootverbose)
 3028                                 printf("bge%d: gigabit link up\n",
 3029                                     sc->bge_unit);
 3030                         if (ifp->if_snd.ifq_head != NULL)
 3031                                 bge_start_locked(ifp);
 3032                 }
 3033                 return;
 3034         }
 3035 
 3036         mii = device_get_softc(sc->bge_miibus);
 3037         mii_tick(mii);
 3038 
 3039         if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
 3040             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 3041                 sc->bge_link++;
 3042                 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
 3043                     IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
 3044                     bootverbose)
 3045                         printf("bge%d: gigabit link up\n", sc->bge_unit);
 3046                 if (ifp->if_snd.ifq_head != NULL)
 3047                         bge_start_locked(ifp);
 3048         }
 3049 
 3050         return;
 3051 }
 3052 
 3053 static void
 3054 bge_tick(xsc)
 3055         void *xsc;
 3056 {
 3057         struct bge_softc *sc;
 3058 
 3059         sc = xsc;
 3060 
 3061         BGE_LOCK(sc);
 3062         bge_tick_locked(sc);
 3063         BGE_UNLOCK(sc);
 3064 }
 3065 
 3066 static void
 3067 bge_stats_update_regs(sc)
 3068         struct bge_softc *sc;
 3069 {
 3070         struct ifnet *ifp;
 3071         struct bge_mac_stats_regs stats;
 3072         u_int32_t *s;
 3073         int i;
 3074 
 3075         ifp = &sc->arpcom.ac_if;
 3076 
 3077         s = (u_int32_t *)&stats;
 3078         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
 3079                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
 3080                 s++;
 3081         }
 3082 
 3083         ifp->if_collisions +=
 3084            (stats.dot3StatsSingleCollisionFrames +
 3085            stats.dot3StatsMultipleCollisionFrames +
 3086            stats.dot3StatsExcessiveCollisions +
 3087            stats.dot3StatsLateCollisions) -
 3088            ifp->if_collisions;
 3089 
 3090         return;
 3091 }
 3092 
 3093 static void
 3094 bge_stats_update(sc)
 3095         struct bge_softc *sc;
 3096 {
 3097         struct ifnet *ifp;
 3098         struct bge_stats *stats;
 3099 
 3100         ifp = &sc->arpcom.ac_if;
 3101 
 3102         stats = (struct bge_stats *)(sc->bge_vhandle +
 3103             BGE_MEMWIN_START + BGE_STATS_BLOCK);
 3104 
 3105         ifp->if_collisions +=
 3106            (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
 3107            stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
 3108            stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
 3109            stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
 3110            ifp->if_collisions;
 3111 
 3112 #ifdef notdef
 3113         ifp->if_collisions +=
 3114            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
 3115            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
 3116            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
 3117            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
 3118            ifp->if_collisions;
 3119 #endif
 3120 
 3121         return;
 3122 }
 3123 
 3124 /*
 3125  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
 3126  * pointers to descriptors.
 3127  */
 3128 static int
 3129 bge_encap(sc, m_head, txidx)
 3130         struct bge_softc *sc;
 3131         struct mbuf *m_head;
 3132         u_int32_t *txidx;
 3133 {
 3134         struct bge_tx_bd        *f = NULL;
 3135         u_int16_t               csum_flags = 0;
 3136         struct m_tag            *mtag;
 3137         struct bge_dmamap_arg   ctx;
 3138         bus_dmamap_t            map;
 3139         int                     error;
 3140 
 3141 
 3142         if (m_head->m_pkthdr.csum_flags) {
 3143                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 3144                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
 3145                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
 3146                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
 3147                 if (m_head->m_flags & M_LASTFRAG)
 3148                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
 3149                 else if (m_head->m_flags & M_FRAG)
 3150                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
 3151         }
 3152 
 3153         mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
 3154 
 3155         ctx.sc = sc;
 3156         ctx.bge_idx = *txidx;
 3157         ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
 3158         ctx.bge_flags = csum_flags;
 3159         /*
 3160          * Sanity check: avoid coming within 16 descriptors
 3161          * of the end of the ring.
 3162          */
 3163         ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
 3164 
 3165         map = sc->bge_cdata.bge_tx_dmamap[*txidx];
 3166         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
 3167             m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
 3168 
 3169         if (error || ctx.bge_maxsegs == 0 /*||
 3170             ctx.bge_idx == sc->bge_tx_saved_considx*/)
 3171                 return (ENOBUFS);
 3172 
 3173         /*
 3174          * Insure that the map for this transmission
 3175          * is placed at the array index of the last descriptor
 3176          * in this chain.
 3177          */
 3178         sc->bge_cdata.bge_tx_dmamap[*txidx] =
 3179             sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
 3180         sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
 3181         sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
 3182         sc->bge_txcnt += ctx.bge_maxsegs;
 3183         f = &sc->bge_ldata.bge_tx_ring[*txidx];
 3184         if (mtag != NULL) {
 3185                 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
 3186                 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
 3187         } else {
 3188                 f->bge_vlan_tag = 0;
 3189         }
 3190 
 3191         BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
 3192         *txidx = ctx.bge_idx;
 3193 
 3194         return(0);
 3195 }
 3196 
 3197 /*
 3198  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3199  * to the mbuf data regions directly in the transmit descriptors.
 3200  */
 3201 static void
 3202 bge_start_locked(ifp)
 3203         struct ifnet *ifp;
 3204 {
 3205         struct bge_softc *sc;
 3206         struct mbuf *m_head = NULL;
 3207         u_int32_t prodidx = 0;
 3208         int count = 0;
 3209 
 3210         sc = ifp->if_softc;
 3211 
 3212         if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
 3213                 return;
 3214 
 3215         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
 3216 
 3217         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
 3218                 IF_DEQUEUE(&ifp->if_snd, m_head);
 3219                 if (m_head == NULL)
 3220                         break;
 3221 
 3222                 /*
 3223                  * XXX
 3224                  * The code inside the if() block is never reached since we
 3225                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
 3226                  * requests to checksum TCP/UDP in a fragmented packet.
 3227                  *
 3228                  * XXX
 3229                  * safety overkill.  If this is a fragmented packet chain
 3230                  * with delayed TCP/UDP checksums, then only encapsulate
 3231                  * it if we have enough descriptors to handle the entire
 3232                  * chain at once.
 3233                  * (paranoia -- may not actually be needed)
 3234                  */
 3235                 if (m_head->m_flags & M_FIRSTFRAG &&
 3236                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
 3237                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
 3238                             m_head->m_pkthdr.csum_data + 16) {
 3239                                 IF_PREPEND(&ifp->if_snd, m_head);
 3240                                 ifp->if_flags |= IFF_OACTIVE;
 3241                                 break;
 3242                         }
 3243                 }
 3244 
 3245                 /*
 3246                  * Pack the data into the transmit ring. If we
 3247                  * don't have room, set the OACTIVE flag and wait
 3248                  * for the NIC to drain the ring.
 3249                  */
 3250                 if (bge_encap(sc, m_head, &prodidx)) {
 3251                         IF_PREPEND(&ifp->if_snd, m_head);
 3252                         ifp->if_flags |= IFF_OACTIVE;
 3253                         break;
 3254                 }
 3255                 ++count;
 3256 
 3257                 /*
 3258                  * If there's a BPF listener, bounce a copy of this frame
 3259                  * to him.
 3260                  */
 3261                 BPF_MTAP(ifp, m_head);
 3262         }
 3263 
 3264         if (count == 0) {
 3265                 /* no packets were dequeued */
 3266                 return;
 3267         }
 3268 
 3269         /* Transmit */
 3270         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3271         /* 5700 b2 errata */
 3272         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 3273                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3274 
 3275         /*
 3276          * Set a timeout in case the chip goes out to lunch.
 3277          */
 3278         ifp->if_timer = 5;
 3279 
 3280         return;
 3281 }
 3282 
 3283 /*
 3284  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3285  * to the mbuf data regions directly in the transmit descriptors.
 3286  */
 3287 static void
 3288 bge_start(ifp)
 3289         struct ifnet *ifp;
 3290 {
 3291         struct bge_softc *sc;
 3292 
 3293         sc = ifp->if_softc;
 3294         BGE_LOCK(sc);
 3295         bge_start_locked(ifp);
 3296         BGE_UNLOCK(sc);
 3297 }
 3298 
 3299 static void
 3300 bge_init_locked(sc)
 3301         struct bge_softc *sc;
 3302 {
 3303         struct ifnet *ifp;
 3304         u_int16_t *m;
 3305 
 3306         BGE_LOCK_ASSERT(sc);
 3307 
 3308         ifp = &sc->arpcom.ac_if;
 3309 
 3310         if (ifp->if_flags & IFF_RUNNING)
 3311                 return;
 3312 
 3313         /* Cancel pending I/O and flush buffers. */
 3314         bge_stop(sc);
 3315         bge_reset(sc);
 3316         bge_chipinit(sc);
 3317 
 3318         /*
 3319          * Init the various state machines, ring
 3320          * control blocks and firmware.
 3321          */
 3322         if (bge_blockinit(sc)) {
 3323                 printf("bge%d: initialization failure\n", sc->bge_unit);
 3324                 return;
 3325         }
 3326 
 3327         ifp = &sc->arpcom.ac_if;
 3328 
 3329         /* Specify MTU. */
 3330         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
 3331             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
 3332 
 3333         /* Load our MAC address. */
 3334         m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
 3335         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
 3336         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
 3337 
 3338         /* Enable or disable promiscuous mode as needed. */
 3339         if (ifp->if_flags & IFF_PROMISC) {
 3340                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3341         } else {
 3342                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3343         }
 3344 
 3345         /* Program multicast filter. */
 3346         bge_setmulti(sc);
 3347 
 3348         /* Init RX ring. */
 3349         bge_init_rx_ring_std(sc);
 3350 
 3351         /*
 3352          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
 3353          * memory to insure that the chip has in fact read the first
 3354          * entry of the ring.
 3355          */
 3356         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
 3357                 u_int32_t               v, i;
 3358                 for (i = 0; i < 10; i++) {
 3359                         DELAY(20);
 3360                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
 3361                         if (v == (MCLBYTES - ETHER_ALIGN))
 3362                                 break;
 3363                 }
 3364                 if (i == 10)
 3365                         printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
 3366                             sc->bge_unit);
 3367         }
 3368 
 3369         /* Init jumbo RX ring. */
 3370         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
 3371                 bge_init_rx_ring_jumbo(sc);
 3372 
 3373         /* Init our RX return ring index */
 3374         sc->bge_rx_saved_considx = 0;
 3375 
 3376         /* Init TX ring. */
 3377         bge_init_tx_ring(sc);
 3378 
 3379         /* Turn on transmitter */
 3380         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
 3381 
 3382         /* Turn on receiver */
 3383         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3384 
 3385         /* Tell firmware we're alive. */
 3386         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3387 
 3388         /* Enable host interrupts. */
 3389         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
 3390         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3391         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3392 
 3393         bge_ifmedia_upd(ifp);
 3394 
 3395         ifp->if_flags |= IFF_RUNNING;
 3396         ifp->if_flags &= ~IFF_OACTIVE;
 3397 
 3398         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3399 
 3400         return;
 3401 }
 3402 
 3403 static void
 3404 bge_init(xsc)
 3405         void *xsc;
 3406 {
 3407         struct bge_softc *sc = xsc;
 3408 
 3409         BGE_LOCK(sc);
 3410         bge_init_locked(sc);
 3411         BGE_UNLOCK(sc);
 3412 
 3413         return;
 3414 }
 3415 
 3416 /*
 3417  * Set media options.
 3418  */
 3419 static int
 3420 bge_ifmedia_upd(ifp)
 3421         struct ifnet *ifp;
 3422 {
 3423         struct bge_softc *sc;
 3424         struct mii_data *mii;
 3425         struct ifmedia *ifm;
 3426 
 3427         sc = ifp->if_softc;
 3428         ifm = &sc->bge_ifmedia;
 3429 
 3430         /* If this is a 1000baseX NIC, enable the TBI port. */
 3431         if (sc->bge_tbi) {
 3432                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 3433                         return(EINVAL);
 3434                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
 3435                 case IFM_AUTO:
 3436                         break;
 3437                 case IFM_1000_SX:
 3438                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3439                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3440                                     BGE_MACMODE_HALF_DUPLEX);
 3441                         } else {
 3442                                 BGE_SETBIT(sc, BGE_MAC_MODE,
 3443                                     BGE_MACMODE_HALF_DUPLEX);
 3444                         }
 3445                         break;
 3446                 default:
 3447                         return(EINVAL);
 3448                 }
 3449                 return(0);
 3450         }
 3451 
 3452         mii = device_get_softc(sc->bge_miibus);
 3453         sc->bge_link = 0;
 3454         if (mii->mii_instance) {
 3455                 struct mii_softc *miisc;
 3456                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
 3457                     miisc = LIST_NEXT(miisc, mii_list))
 3458                         mii_phy_reset(miisc);
 3459         }
 3460         mii_mediachg(mii);
 3461 
 3462         return(0);
 3463 }
 3464 
 3465 /*
 3466  * Report current media status.
 3467  */
 3468 static void
 3469 bge_ifmedia_sts(ifp, ifmr)
 3470         struct ifnet *ifp;
 3471         struct ifmediareq *ifmr;
 3472 {
 3473         struct bge_softc *sc;
 3474         struct mii_data *mii;
 3475 
 3476         sc = ifp->if_softc;
 3477 
 3478         if (sc->bge_tbi) {
 3479                 ifmr->ifm_status = IFM_AVALID;
 3480                 ifmr->ifm_active = IFM_ETHER;
 3481                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3482                     BGE_MACSTAT_TBI_PCS_SYNCHED)
 3483                         ifmr->ifm_status |= IFM_ACTIVE;
 3484                 ifmr->ifm_active |= IFM_1000_SX;
 3485                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
 3486                         ifmr->ifm_active |= IFM_HDX;
 3487                 else
 3488                         ifmr->ifm_active |= IFM_FDX;
 3489                 return;
 3490         }
 3491 
 3492         mii = device_get_softc(sc->bge_miibus);
 3493         mii_pollstat(mii);
 3494         ifmr->ifm_active = mii->mii_media_active;
 3495         ifmr->ifm_status = mii->mii_media_status;
 3496 
 3497         return;
 3498 }
 3499 
 3500 static int
 3501 bge_ioctl(ifp, command, data)
 3502         struct ifnet *ifp;
 3503         u_long command;
 3504         caddr_t data;
 3505 {
 3506         struct bge_softc *sc = ifp->if_softc;
 3507         struct ifreq *ifr = (struct ifreq *) data;
 3508         int mask, error = 0;
 3509         struct mii_data *mii;
 3510 
 3511         switch(command) {
 3512         case SIOCSIFMTU:
 3513                 /* Disallow jumbo frames on 5705. */
 3514                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 3515                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
 3516                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
 3517                         error = EINVAL;
 3518                 else {
 3519                         ifp->if_mtu = ifr->ifr_mtu;
 3520                         ifp->if_flags &= ~IFF_RUNNING;
 3521                         bge_init(sc);
 3522                 }
 3523                 break;
 3524         case SIOCSIFFLAGS:
 3525                 BGE_LOCK(sc);
 3526                 if (ifp->if_flags & IFF_UP) {
 3527                         /*
 3528                          * If only the state of the PROMISC flag changed,
 3529                          * then just use the 'set promisc mode' command
 3530                          * instead of reinitializing the entire NIC. Doing
 3531                          * a full re-init means reloading the firmware and
 3532                          * waiting for it to start up, which may take a
 3533                          * second or two.
 3534                          */
 3535                         if (ifp->if_flags & IFF_RUNNING &&
 3536                             ifp->if_flags & IFF_PROMISC &&
 3537                             !(sc->bge_if_flags & IFF_PROMISC)) {
 3538                                 BGE_SETBIT(sc, BGE_RX_MODE,
 3539                                     BGE_RXMODE_RX_PROMISC);
 3540                         } else if (ifp->if_flags & IFF_RUNNING &&
 3541                             !(ifp->if_flags & IFF_PROMISC) &&
 3542                             sc->bge_if_flags & IFF_PROMISC) {
 3543                                 BGE_CLRBIT(sc, BGE_RX_MODE,
 3544                                     BGE_RXMODE_RX_PROMISC);
 3545                         } else
 3546                                 bge_init_locked(sc);
 3547                 } else {
 3548                         if (ifp->if_flags & IFF_RUNNING) {
 3549                                 bge_stop(sc);
 3550                         }
 3551                 }
 3552                 sc->bge_if_flags = ifp->if_flags;
 3553                 BGE_UNLOCK(sc);
 3554                 error = 0;
 3555                 break;
 3556         case SIOCADDMULTI:
 3557         case SIOCDELMULTI:
 3558                 if (ifp->if_flags & IFF_RUNNING) {
 3559                         BGE_LOCK(sc);
 3560                         bge_setmulti(sc);
 3561                         BGE_UNLOCK(sc);
 3562                         error = 0;
 3563                 }
 3564                 break;
 3565         case SIOCSIFMEDIA:
 3566         case SIOCGIFMEDIA:
 3567                 if (sc->bge_tbi) {
 3568                         error = ifmedia_ioctl(ifp, ifr,
 3569                             &sc->bge_ifmedia, command);
 3570                 } else {
 3571                         mii = device_get_softc(sc->bge_miibus);
 3572                         error = ifmedia_ioctl(ifp, ifr,
 3573                             &mii->mii_media, command);
 3574                 }
 3575                 break;
 3576         case SIOCSIFCAP:
 3577                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3578                 /* NB: the code for RX csum offload is disabled for now */
 3579                 if (mask & IFCAP_TXCSUM) {
 3580                         ifp->if_capenable ^= IFCAP_TXCSUM;
 3581                         if (IFCAP_TXCSUM & ifp->if_capenable)
 3582                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
 3583                         else
 3584                                 ifp->if_hwassist = 0;
 3585                 }
 3586                 error = 0;
 3587                 break;
 3588         default:
 3589                 error = ether_ioctl(ifp, command, data);
 3590                 break;
 3591         }
 3592 
 3593         return(error);
 3594 }
 3595 
 3596 static void
 3597 bge_watchdog(ifp)
 3598         struct ifnet *ifp;
 3599 {
 3600         struct bge_softc *sc;
 3601 
 3602         sc = ifp->if_softc;
 3603 
 3604         printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
 3605 
 3606         ifp->if_flags &= ~IFF_RUNNING;
 3607         bge_init(sc);
 3608 
 3609         ifp->if_oerrors++;
 3610 
 3611         return;
 3612 }
 3613 
 3614 /*
 3615  * Stop the adapter and free any mbufs allocated to the
 3616  * RX and TX lists.
 3617  */
 3618 static void
 3619 bge_stop(sc)
 3620         struct bge_softc *sc;
 3621 {
 3622         struct ifnet *ifp;
 3623         struct ifmedia_entry *ifm;
 3624         struct mii_data *mii = NULL;
 3625         int mtmp, itmp;
 3626 
 3627         BGE_LOCK_ASSERT(sc);
 3628 
 3629         ifp = &sc->arpcom.ac_if;
 3630 
 3631         if (!sc->bge_tbi)
 3632                 mii = device_get_softc(sc->bge_miibus);
 3633 
 3634         callout_stop(&sc->bge_stat_ch);
 3635 
 3636         /*
 3637          * Disable all of the receiver blocks
 3638          */
 3639         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3640         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 3641         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 3642         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3643             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3644                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 3645         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
 3646         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 3647         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
 3648 
 3649         /*
 3650          * Disable all of the transmit blocks
 3651          */
 3652         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 3653         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 3654         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 3655         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
 3656         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 3657         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3658             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3659                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 3660         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 3661 
 3662         /*
 3663          * Shut down all of the memory managers and related
 3664          * state machines.
 3665          */
 3666         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 3667         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
 3668         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3669             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3670                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 3671         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 3672         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 3673         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3674             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 3675                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
 3676                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 3677         }
 3678 
 3679         /* Disable host interrupts. */
 3680         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3681         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3682 
 3683         /*
 3684          * Tell firmware we're shutting down.
 3685          */
 3686         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3687 
 3688         /* Free the RX lists. */
 3689         bge_free_rx_ring_std(sc);
 3690 
 3691         /* Free jumbo RX list. */
 3692         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3693             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3694                 bge_free_rx_ring_jumbo(sc);
 3695 
 3696         /* Free TX buffers. */
 3697         bge_free_tx_ring(sc);
 3698 
 3699         /*
 3700          * Isolate/power down the PHY, but leave the media selection
 3701          * unchanged so that things will be put back to normal when
 3702          * we bring the interface back up.
 3703          */
 3704         if (!sc->bge_tbi) {
 3705                 itmp = ifp->if_flags;
 3706                 ifp->if_flags |= IFF_UP;
 3707                 ifm = mii->mii_media.ifm_cur;
 3708                 mtmp = ifm->ifm_media;
 3709                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
 3710                 mii_mediachg(mii);
 3711                 ifm->ifm_media = mtmp;
 3712                 ifp->if_flags = itmp;
 3713         }
 3714 
 3715         sc->bge_link = 0;
 3716 
 3717         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
 3718 
 3719         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 3720 
 3721         return;
 3722 }
 3723 
 3724 /*
 3725  * Stop all chip I/O so that the kernel's probe routines don't
 3726  * get confused by errant DMAs when rebooting.
 3727  */
 3728 static void
 3729 bge_shutdown(dev)
 3730         device_t dev;
 3731 {
 3732         struct bge_softc *sc;
 3733 
 3734         sc = device_get_softc(dev);
 3735 
 3736         BGE_LOCK(sc);
 3737         bge_stop(sc);
 3738         bge_reset(sc);
 3739         BGE_UNLOCK(sc);
 3740 
 3741         return;
 3742 }

Cache object: deeb9e575c5b143b1b0a65a80b6cef54


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.