The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2001 Wind River Systems
    3  * Copyright (c) 1997, 1998, 1999, 2001
    4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. All advertising materials mentioning features or use of this software
   15  *    must display the following acknowledgement:
   16  *      This product includes software developed by Bill Paul.
   17  * 4. Neither the name of the author nor the names of any co-contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   31  * THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/5.2/sys/dev/bge/if_bge.c 122678 2003-11-14 17:16:58Z obrien $");
   36 
   37 /*
   38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
   39  *
   40  * The Broadcom BCM5700 is based on technology originally developed by
   41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
   42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
   43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
   44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
   45  * frames, highly configurable RX filtering, and 16 RX and TX queues
   46  * (which, along with RX filter rules, can be used for QOS applications).
   47  * Other features, such as TCP segmentation, may be available as part
   48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
   49  * firmware images can be stored in hardware and need not be compiled
   50  * into the driver.
   51  *
   52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
   53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
   54  * 
   55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
   56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
   57  * does not support external SSRAM.
   58  *
   59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
   60  * brand name, which is functionally similar but lacks PCI-X support.
   61  *
   62  * Without external SSRAM, you can only have at most 4 TX rings,
   63  * and the use of the mini RX ring is disabled. This seems to imply
   64  * that these features are simply not available on the BCM5701. As a
   65  * result, this driver does not implement any support for the mini RX
   66  * ring.
   67  */
   68 
   69 #include <sys/param.h>
   70 #include <sys/endian.h>
   71 #include <sys/systm.h>
   72 #include <sys/sockio.h>
   73 #include <sys/mbuf.h>
   74 #include <sys/malloc.h>
   75 #include <sys/kernel.h>
   76 #include <sys/socket.h>
   77 #include <sys/queue.h>
   78 
   79 #include <net/if.h>
   80 #include <net/if_arp.h>
   81 #include <net/ethernet.h>
   82 #include <net/if_dl.h>
   83 #include <net/if_media.h>
   84 
   85 #include <net/bpf.h>
   86 
   87 #include <net/if_types.h>
   88 #include <net/if_vlan_var.h>
   89 
   90 #include <netinet/in_systm.h>
   91 #include <netinet/in.h>
   92 #include <netinet/ip.h>
   93 
   94 #include <machine/clock.h>      /* for DELAY */
   95 #include <machine/bus_memio.h>
   96 #include <machine/bus.h>
   97 #include <machine/resource.h>
   98 #include <sys/bus.h>
   99 #include <sys/rman.h>
  100 
  101 #include <dev/mii/mii.h>
  102 #include <dev/mii/miivar.h>
  103 #include "miidevs.h"
  104 #include <dev/mii/brgphyreg.h>
  105 
  106 #include <dev/pci/pcireg.h>
  107 #include <dev/pci/pcivar.h>
  108 
  109 #include <dev/bge/if_bgereg.h>
  110 
  111 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
  112 
  113 MODULE_DEPEND(bge, pci, 1, 1, 1);
  114 MODULE_DEPEND(bge, ether, 1, 1, 1);
  115 MODULE_DEPEND(bge, miibus, 1, 1, 1);
  116 
  117 /* "controller miibus0" required.  See GENERIC if you get errors here. */
  118 #include "miibus_if.h"
  119 
  120 /*
  121  * Various supported device vendors/types and their names. Note: the
  122  * spec seems to indicate that the hardware still has Alteon's vendor
  123  * ID burned into it, though it will always be overriden by the vendor
  124  * ID in the EEPROM. Just to be safe, we cover all possibilities.
  125  */
  126 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
  127 
  128 static struct bge_type bge_devs[] = {
  129         { ALT_VENDORID, ALT_DEVICEID_BCM5700,
  130                 "Broadcom BCM5700 Gigabit Ethernet" },
  131         { ALT_VENDORID, ALT_DEVICEID_BCM5701,
  132                 "Broadcom BCM5701 Gigabit Ethernet" },
  133         { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
  134                 "Broadcom BCM5700 Gigabit Ethernet" },
  135         { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
  136                 "Broadcom BCM5701 Gigabit Ethernet" },
  137         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
  138                 "Broadcom BCM5702 Gigabit Ethernet" },
  139         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
  140                 "Broadcom BCM5702X Gigabit Ethernet" },
  141         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
  142                 "Broadcom BCM5703 Gigabit Ethernet" },
  143         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
  144                 "Broadcom BCM5703X Gigabit Ethernet" },
  145         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
  146                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
  147         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
  148                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
  149         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
  150                 "Broadcom BCM5705 Gigabit Ethernet" },
  151         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
  152                 "Broadcom BCM5705M Gigabit Ethernet" },
  153         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
  154                 "Broadcom BCM5705M Gigabit Ethernet" },
  155         { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
  156                 "Broadcom BCM5782 Gigabit Ethernet" },
  157         { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
  158                 "Broadcom BCM5788 Gigabit Ethernet" },
  159         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
  160                 "Broadcom BCM5901 Fast Ethernet" },
  161         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
  162                 "Broadcom BCM5901A2 Fast Ethernet" },
  163         { SK_VENDORID, SK_DEVICEID_ALTIMA,
  164                 "SysKonnect Gigabit Ethernet" },
  165         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
  166                 "Altima AC1000 Gigabit Ethernet" },
  167         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
  168                 "Altima AC9100 Gigabit Ethernet" },
  169         { 0, 0, NULL }
  170 };
  171 
  172 static int bge_probe            (device_t);
  173 static int bge_attach           (device_t);
  174 static int bge_detach           (device_t);
  175 static void bge_release_resources
  176                                 (struct bge_softc *);
  177 static void bge_dma_map_addr    (void *, bus_dma_segment_t *, int, int);
  178 static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
  179                                     bus_size_t, int);
  180 static int bge_dma_alloc        (device_t);
  181 static void bge_dma_free        (struct bge_softc *);
  182 
  183 static void bge_txeof           (struct bge_softc *);
  184 static void bge_rxeof           (struct bge_softc *);
  185 
  186 static void bge_tick_locked     (struct bge_softc *);
  187 static void bge_tick            (void *);
  188 static void bge_stats_update    (struct bge_softc *);
  189 static void bge_stats_update_regs
  190                                 (struct bge_softc *);
  191 static int bge_encap            (struct bge_softc *, struct mbuf *,
  192                                         u_int32_t *);
  193 
  194 static void bge_intr            (void *);
  195 static void bge_start_locked    (struct ifnet *);
  196 static void bge_start           (struct ifnet *);
  197 static int bge_ioctl            (struct ifnet *, u_long, caddr_t);
  198 static void bge_init_locked     (struct bge_softc *);
  199 static void bge_init            (void *);
  200 static void bge_stop            (struct bge_softc *);
  201 static void bge_watchdog                (struct ifnet *);
  202 static void bge_shutdown                (device_t);
  203 static int bge_ifmedia_upd      (struct ifnet *);
  204 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
  205 
  206 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *, int, u_int8_t *);
  207 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
  208 
  209 static u_int32_t bge_mchash     (caddr_t);
  210 static void bge_setmulti        (struct bge_softc *);
  211 
  212 static void bge_handle_events   (struct bge_softc *);
  213 static int bge_alloc_jumbo_mem  (struct bge_softc *);
  214 static void bge_free_jumbo_mem  (struct bge_softc *);
  215 static void *bge_jalloc         (struct bge_softc *);
  216 static void bge_jfree           (void *, void *);
  217 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
  218 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
  219 static int bge_init_rx_ring_std (struct bge_softc *);
  220 static void bge_free_rx_ring_std        (struct bge_softc *);
  221 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
  222 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
  223 static void bge_free_tx_ring    (struct bge_softc *);
  224 static int bge_init_tx_ring     (struct bge_softc *);
  225 
  226 static int bge_chipinit         (struct bge_softc *);
  227 static int bge_blockinit        (struct bge_softc *);
  228 
  229 #ifdef notdef
  230 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
  231 static void bge_vpd_read_res    (struct bge_softc *, struct vpd_res *, int);
  232 static void bge_vpd_read        (struct bge_softc *);
  233 #endif
  234 
  235 static u_int32_t bge_readmem_ind
  236                                 (struct bge_softc *, int);
  237 static void bge_writemem_ind    (struct bge_softc *, int, int);
  238 #ifdef notdef
  239 static u_int32_t bge_readreg_ind
  240                                 (struct bge_softc *, int);
  241 #endif
  242 static void bge_writereg_ind    (struct bge_softc *, int, int);
  243 
  244 static int bge_miibus_readreg   (device_t, int, int);
  245 static int bge_miibus_writereg  (device_t, int, int, int);
  246 static void bge_miibus_statchg  (device_t);
  247 
  248 static void bge_reset           (struct bge_softc *);
  249 
  250 static device_method_t bge_methods[] = {
  251         /* Device interface */
  252         DEVMETHOD(device_probe,         bge_probe),
  253         DEVMETHOD(device_attach,        bge_attach),
  254         DEVMETHOD(device_detach,        bge_detach),
  255         DEVMETHOD(device_shutdown,      bge_shutdown),
  256 
  257         /* bus interface */
  258         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  259         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  260 
  261         /* MII interface */
  262         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
  263         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
  264         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
  265 
  266         { 0, 0 }
  267 };
  268 
  269 static driver_t bge_driver = {
  270         "bge",
  271         bge_methods,
  272         sizeof(struct bge_softc)
  273 };
  274 
  275 static devclass_t bge_devclass;
  276 
  277 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
  278 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
  279 
  280 static u_int32_t
  281 bge_readmem_ind(sc, off)
  282         struct bge_softc *sc;
  283         int off;
  284 {
  285         device_t dev;
  286 
  287         dev = sc->bge_dev;
  288 
  289         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  290         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
  291 }
  292 
  293 static void
  294 bge_writemem_ind(sc, off, val)
  295         struct bge_softc *sc;
  296         int off, val;
  297 {
  298         device_t dev;
  299 
  300         dev = sc->bge_dev;
  301 
  302         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  303         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
  304 
  305         return;
  306 }
  307 
  308 #ifdef notdef
  309 static u_int32_t
  310 bge_readreg_ind(sc, off)
  311         struct bge_softc *sc;
  312         int off;
  313 {
  314         device_t dev;
  315 
  316         dev = sc->bge_dev;
  317 
  318         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  319         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
  320 }
  321 #endif
  322 
  323 static void
  324 bge_writereg_ind(sc, off, val)
  325         struct bge_softc *sc;
  326         int off, val;
  327 {
  328         device_t dev;
  329 
  330         dev = sc->bge_dev;
  331 
  332         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  333         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
  334 
  335         return;
  336 }
  337 
  338 /*
  339  * Map a single buffer address.
  340  */
  341 
  342 static void
  343 bge_dma_map_addr(arg, segs, nseg, error)
  344         void *arg;
  345         bus_dma_segment_t *segs;
  346         int nseg;
  347         int error;
  348 {
  349         struct bge_dmamap_arg *ctx;
  350 
  351         if (error)
  352                 return;
  353 
  354         ctx = arg;
  355 
  356         if (nseg > ctx->bge_maxsegs) {
  357                 ctx->bge_maxsegs = 0;
  358                 return;
  359         }
  360 
  361         ctx->bge_busaddr = segs->ds_addr;
  362 
  363         return;
  364 }
  365 
  366 /*
  367  * Map an mbuf chain into an TX ring.
  368  */
  369 
  370 static void
  371 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
  372         void *arg;
  373         bus_dma_segment_t *segs;
  374         int nseg;
  375         bus_size_t mapsize;
  376         int error;
  377 {
  378         struct bge_dmamap_arg *ctx;
  379         struct bge_tx_bd *d = NULL;
  380         int i = 0, idx;
  381 
  382         if (error)
  383                 return;
  384 
  385         ctx = arg;
  386 
  387         /* Signal error to caller if there's too many segments */
  388         if (nseg > ctx->bge_maxsegs) {
  389                 ctx->bge_maxsegs = 0;
  390                 return;
  391         }
  392 
  393         idx = ctx->bge_idx;
  394         while(1) {
  395                 d = &ctx->bge_ring[idx];
  396                 d->bge_addr.bge_addr_lo =
  397                     htole32(BGE_ADDR_LO(segs[i].ds_addr));
  398                 d->bge_addr.bge_addr_hi =
  399                     htole32(BGE_ADDR_HI(segs[i].ds_addr));
  400                 d->bge_len = htole16(segs[i].ds_len);
  401                 d->bge_flags = htole16(ctx->bge_flags);
  402                 i++;
  403                 if (i == nseg)
  404                         break;
  405                 BGE_INC(idx, BGE_TX_RING_CNT);
  406         }
  407 
  408         d->bge_flags |= htole16(BGE_TXBDFLAG_END);
  409         ctx->bge_maxsegs = nseg;
  410         ctx->bge_idx = idx;
  411 
  412         return;
  413 }
  414 
  415 
  416 #ifdef notdef
  417 static u_int8_t
  418 bge_vpd_readbyte(sc, addr)
  419         struct bge_softc *sc;
  420         int addr;
  421 {
  422         int i;
  423         device_t dev;
  424         u_int32_t val;
  425 
  426         dev = sc->bge_dev;
  427         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
  428         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
  429                 DELAY(10);
  430                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
  431                         break;
  432         }
  433 
  434         if (i == BGE_TIMEOUT) {
  435                 printf("bge%d: VPD read timed out\n", sc->bge_unit);
  436                 return(0);
  437         }
  438 
  439         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
  440 
  441         return((val >> ((addr % 4) * 8)) & 0xFF);
  442 }
  443 
  444 static void
  445 bge_vpd_read_res(sc, res, addr)
  446         struct bge_softc *sc;
  447         struct vpd_res *res;
  448         int addr;
  449 {
  450         int i;
  451         u_int8_t *ptr;
  452 
  453         ptr = (u_int8_t *)res;
  454         for (i = 0; i < sizeof(struct vpd_res); i++)
  455                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
  456 
  457         return;
  458 }
  459 
  460 static void
  461 bge_vpd_read(sc)
  462         struct bge_softc *sc;
  463 {
  464         int pos = 0, i;
  465         struct vpd_res res;
  466 
  467         if (sc->bge_vpd_prodname != NULL)
  468                 free(sc->bge_vpd_prodname, M_DEVBUF);
  469         if (sc->bge_vpd_readonly != NULL)
  470                 free(sc->bge_vpd_readonly, M_DEVBUF);
  471         sc->bge_vpd_prodname = NULL;
  472         sc->bge_vpd_readonly = NULL;
  473 
  474         bge_vpd_read_res(sc, &res, pos);
  475 
  476         if (res.vr_id != VPD_RES_ID) {
  477                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  478                         sc->bge_unit, VPD_RES_ID, res.vr_id);
  479                 return;
  480         }
  481 
  482         pos += sizeof(res);
  483         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
  484         for (i = 0; i < res.vr_len; i++)
  485                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
  486         sc->bge_vpd_prodname[i] = '\0';
  487         pos += i;
  488 
  489         bge_vpd_read_res(sc, &res, pos);
  490 
  491         if (res.vr_id != VPD_RES_READ) {
  492                 printf("bge%d: bad VPD resource id: expected %x got %x\n",
  493                     sc->bge_unit, VPD_RES_READ, res.vr_id);
  494                 return;
  495         }
  496 
  497         pos += sizeof(res);
  498         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
  499         for (i = 0; i < res.vr_len + 1; i++)
  500                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
  501 
  502         return;
  503 }
  504 #endif
  505 
  506 /*
  507  * Read a byte of data stored in the EEPROM at address 'addr.' The
  508  * BCM570x supports both the traditional bitbang interface and an
  509  * auto access interface for reading the EEPROM. We use the auto
  510  * access method.
  511  */
  512 static u_int8_t
  513 bge_eeprom_getbyte(sc, addr, dest)
  514         struct bge_softc *sc;
  515         int addr;
  516         u_int8_t *dest;
  517 {
  518         int i;
  519         u_int32_t byte = 0;
  520 
  521         /*
  522          * Enable use of auto EEPROM access so we can avoid
  523          * having to use the bitbang method.
  524          */
  525         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
  526 
  527         /* Reset the EEPROM, load the clock period. */
  528         CSR_WRITE_4(sc, BGE_EE_ADDR,
  529             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
  530         DELAY(20);
  531 
  532         /* Issue the read EEPROM command. */
  533         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
  534 
  535         /* Wait for completion */
  536         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
  537                 DELAY(10);
  538                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
  539                         break;
  540         }
  541 
  542         if (i == BGE_TIMEOUT) {
  543                 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
  544                 return(0);
  545         }
  546 
  547         /* Get result. */
  548         byte = CSR_READ_4(sc, BGE_EE_DATA);
  549 
  550         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
  551 
  552         return(0);
  553 }
  554 
  555 /*
  556  * Read a sequence of bytes from the EEPROM.
  557  */
  558 static int
  559 bge_read_eeprom(sc, dest, off, cnt)
  560         struct bge_softc *sc;
  561         caddr_t dest;
  562         int off;
  563         int cnt;
  564 {
  565         int err = 0, i;
  566         u_int8_t byte = 0;
  567 
  568         for (i = 0; i < cnt; i++) {
  569                 err = bge_eeprom_getbyte(sc, off + i, &byte);
  570                 if (err)
  571                         break;
  572                 *(dest + i) = byte;
  573         }
  574 
  575         return(err ? 1 : 0);
  576 }
  577 
  578 static int
  579 bge_miibus_readreg(dev, phy, reg)
  580         device_t dev;
  581         int phy, reg;
  582 {
  583         struct bge_softc *sc;
  584         u_int32_t val, autopoll;
  585         int i;
  586 
  587         sc = device_get_softc(dev);
  588 
  589         /*
  590          * Broadcom's own driver always assumes the internal
  591          * PHY is at GMII address 1. On some chips, the PHY responds
  592          * to accesses at all addresses, which could cause us to
  593          * bogusly attach the PHY 32 times at probe type. Always
  594          * restricting the lookup to address 1 is simpler than
  595          * trying to figure out which chips revisions should be
  596          * special-cased.
  597          */
  598         if (phy != 1)
  599                 return(0);
  600 
  601         /* Reading with autopolling on may trigger PCI errors */
  602         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  603         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  604                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  605                 DELAY(40);
  606         }
  607 
  608         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
  609             BGE_MIPHY(phy)|BGE_MIREG(reg));
  610 
  611         for (i = 0; i < BGE_TIMEOUT; i++) {
  612                 val = CSR_READ_4(sc, BGE_MI_COMM);
  613                 if (!(val & BGE_MICOMM_BUSY))
  614                         break;
  615         }
  616 
  617         if (i == BGE_TIMEOUT) {
  618                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  619                 val = 0;
  620                 goto done;
  621         }
  622 
  623         val = CSR_READ_4(sc, BGE_MI_COMM);
  624 
  625 done:
  626         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  627                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  628                 DELAY(40);
  629         }
  630 
  631         if (val & BGE_MICOMM_READFAIL)
  632                 return(0);
  633 
  634         return(val & 0xFFFF);
  635 }
  636 
  637 static int
  638 bge_miibus_writereg(dev, phy, reg, val)
  639         device_t dev;
  640         int phy, reg, val;
  641 {
  642         struct bge_softc *sc;
  643         u_int32_t autopoll;
  644         int i;
  645 
  646         sc = device_get_softc(dev);
  647 
  648         /* Reading with autopolling on may trigger PCI errors */
  649         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  650         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  651                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  652                 DELAY(40);
  653         }
  654 
  655         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
  656             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
  657 
  658         for (i = 0; i < BGE_TIMEOUT; i++) {
  659                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
  660                         break;
  661         }
  662 
  663         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  664                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  665                 DELAY(40);
  666         }
  667 
  668         if (i == BGE_TIMEOUT) {
  669                 printf("bge%d: PHY read timed out\n", sc->bge_unit);
  670                 return(0);
  671         }
  672 
  673         return(0);
  674 }
  675 
  676 static void
  677 bge_miibus_statchg(dev)
  678         device_t dev;
  679 {
  680         struct bge_softc *sc;
  681         struct mii_data *mii;
  682 
  683         sc = device_get_softc(dev);
  684         mii = device_get_softc(sc->bge_miibus);
  685 
  686         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
  687         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
  688                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
  689         } else {
  690                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
  691         }
  692 
  693         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  694                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  695         } else {
  696                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  697         }
  698 
  699         return;
  700 }
  701 
  702 /*
  703  * Handle events that have triggered interrupts.
  704  */
  705 static void
  706 bge_handle_events(sc)
  707         struct bge_softc                *sc;
  708 {
  709 
  710         return;
  711 }
  712 
  713 /*
  714  * Memory management for jumbo frames.
  715  */
  716 
  717 static int
  718 bge_alloc_jumbo_mem(sc)
  719         struct bge_softc                *sc;
  720 {
  721         caddr_t                 ptr;
  722         register int            i, error;
  723         struct bge_jpool_entry   *entry;
  724 
  725         /* Create tag for jumbo buffer block */
  726 
  727         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
  728             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
  729             NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
  730             &sc->bge_cdata.bge_jumbo_tag);
  731 
  732         if (error) {
  733                 printf("bge%d: could not allocate jumbo dma tag\n",
  734                     sc->bge_unit);
  735                 return (ENOMEM);
  736         }
  737 
  738         /* Allocate DMA'able memory for jumbo buffer block */
  739 
  740         error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
  741             (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
  742             &sc->bge_cdata.bge_jumbo_map);
  743 
  744         if (error)
  745                 return (ENOMEM);
  746 
  747         SLIST_INIT(&sc->bge_jfree_listhead);
  748         SLIST_INIT(&sc->bge_jinuse_listhead);
  749 
  750         /*
  751          * Now divide it up into 9K pieces and save the addresses
  752          * in an array.
  753          */
  754         ptr = sc->bge_ldata.bge_jumbo_buf;
  755         for (i = 0; i < BGE_JSLOTS; i++) {
  756                 sc->bge_cdata.bge_jslots[i] = ptr;
  757                 ptr += BGE_JLEN;
  758                 entry = malloc(sizeof(struct bge_jpool_entry), 
  759                     M_DEVBUF, M_NOWAIT);
  760                 if (entry == NULL) {
  761                         bge_free_jumbo_mem(sc);
  762                         sc->bge_ldata.bge_jumbo_buf = NULL;
  763                         printf("bge%d: no memory for jumbo "
  764                             "buffer queue!\n", sc->bge_unit);
  765                         return(ENOBUFS);
  766                 }
  767                 entry->slot = i;
  768                 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
  769                     entry, jpool_entries);
  770         }
  771 
  772         return(0);
  773 }
  774 
  775 static void
  776 bge_free_jumbo_mem(sc)
  777         struct bge_softc *sc;
  778 {
  779         int i;
  780         struct bge_jpool_entry *entry;
  781  
  782         for (i = 0; i < BGE_JSLOTS; i++) {
  783                 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  784                 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  785                 free(entry, M_DEVBUF);
  786         }
  787 
  788         /* Destroy jumbo buffer block */
  789 
  790         if (sc->bge_ldata.bge_rx_jumbo_ring)
  791                 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
  792                     sc->bge_ldata.bge_jumbo_buf,
  793                     sc->bge_cdata.bge_jumbo_map);
  794 
  795         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
  796                 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
  797                     sc->bge_cdata.bge_jumbo_map);
  798 
  799         if (sc->bge_cdata.bge_jumbo_tag)
  800                 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
  801 
  802         return;
  803 }
  804 
  805 /*
  806  * Allocate a jumbo buffer.
  807  */
  808 static void *
  809 bge_jalloc(sc)
  810         struct bge_softc                *sc;
  811 {
  812         struct bge_jpool_entry   *entry;
  813         
  814         entry = SLIST_FIRST(&sc->bge_jfree_listhead);
  815         
  816         if (entry == NULL) {
  817                 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
  818                 return(NULL);
  819         }
  820 
  821         SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
  822         SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
  823         return(sc->bge_cdata.bge_jslots[entry->slot]);
  824 }
  825 
  826 /*
  827  * Release a jumbo buffer.
  828  */
  829 static void
  830 bge_jfree(buf, args)
  831         void *buf;
  832         void *args;
  833 {
  834         struct bge_jpool_entry *entry;
  835         struct bge_softc *sc;
  836         int i;
  837 
  838         /* Extract the softc struct pointer. */
  839         sc = (struct bge_softc *)args;
  840 
  841         if (sc == NULL)
  842                 panic("bge_jfree: can't find softc pointer!");
  843 
  844         /* calculate the slot this buffer belongs to */
  845 
  846         i = ((vm_offset_t)buf
  847              - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
  848 
  849         if ((i < 0) || (i >= BGE_JSLOTS))
  850                 panic("bge_jfree: asked to free buffer that we don't manage!");
  851 
  852         entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
  853         if (entry == NULL)
  854                 panic("bge_jfree: buffer not in use!");
  855         entry->slot = i;
  856         SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
  857         SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
  858 
  859         return;
  860 }
  861 
  862 
  863 /*
  864  * Intialize a standard receive ring descriptor.
  865  */
  866 static int
  867 bge_newbuf_std(sc, i, m)
  868         struct bge_softc        *sc;
  869         int                     i;
  870         struct mbuf             *m;
  871 {
  872         struct mbuf             *m_new = NULL;
  873         struct bge_rx_bd        *r;
  874         struct bge_dmamap_arg   ctx;
  875         int                     error;
  876 
  877         if (m == NULL) {
  878                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  879                 if (m_new == NULL) {
  880                         return(ENOBUFS);
  881                 }
  882 
  883                 MCLGET(m_new, M_DONTWAIT);
  884                 if (!(m_new->m_flags & M_EXT)) {
  885                         m_freem(m_new);
  886                         return(ENOBUFS);
  887                 }
  888                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  889         } else {
  890                 m_new = m;
  891                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  892                 m_new->m_data = m_new->m_ext.ext_buf;
  893         }
  894 
  895         if (!sc->bge_rx_alignment_bug)
  896                 m_adj(m_new, ETHER_ALIGN);
  897         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
  898         r = &sc->bge_ldata.bge_rx_std_ring[i];
  899         ctx.bge_maxsegs = 1;
  900         ctx.sc = sc;
  901         error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
  902             sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
  903             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  904         if (error || ctx.bge_maxsegs == 0) {
  905                 if (m == NULL)
  906                         m_freem(m_new);
  907                 return(ENOMEM);
  908         }
  909         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
  910         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
  911         r->bge_flags = htole16(BGE_RXBDFLAG_END);
  912         r->bge_len = htole16(m_new->m_len);
  913         r->bge_idx = htole16(i);
  914 
  915         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  916             sc->bge_cdata.bge_rx_std_dmamap[i],
  917             BUS_DMASYNC_PREREAD);
  918 
  919         return(0);
  920 }
  921 
  922 /*
  923  * Initialize a jumbo receive ring descriptor. This allocates
  924  * a jumbo buffer from the pool managed internally by the driver.
  925  */
  926 static int
  927 bge_newbuf_jumbo(sc, i, m)
  928         struct bge_softc *sc;
  929         int i;
  930         struct mbuf *m;
  931 {
  932         struct mbuf *m_new = NULL;
  933         struct bge_rx_bd *r;
  934         struct bge_dmamap_arg ctx;
  935         int error;
  936 
  937         if (m == NULL) {
  938                 caddr_t                 *buf = NULL;
  939 
  940                 /* Allocate the mbuf. */
  941                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  942                 if (m_new == NULL) {
  943                         return(ENOBUFS);
  944                 }
  945 
  946                 /* Allocate the jumbo buffer */
  947                 buf = bge_jalloc(sc);
  948                 if (buf == NULL) {
  949                         m_freem(m_new);
  950                         printf("bge%d: jumbo allocation failed "
  951                             "-- packet dropped!\n", sc->bge_unit);
  952                         return(ENOBUFS);
  953                 }
  954 
  955                 /* Attach the buffer to the mbuf. */
  956                 m_new->m_data = (void *) buf;
  957                 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
  958                 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
  959                     (struct bge_softc *)sc, 0, EXT_NET_DRV);
  960         } else {
  961                 m_new = m;
  962                 m_new->m_data = m_new->m_ext.ext_buf;
  963                 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
  964         }
  965 
  966         if (!sc->bge_rx_alignment_bug)
  967                 m_adj(m_new, ETHER_ALIGN);
  968         /* Set up the descriptor. */
  969         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
  970         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
  971         ctx.bge_maxsegs = 1;
  972         ctx.sc = sc;
  973         error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
  974             sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
  975             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  976         if (error || ctx.bge_maxsegs == 0) {
  977                 if (m == NULL)
  978                         m_freem(m_new);
  979                 return(ENOMEM);
  980         }
  981         r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
  982         r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
  983         r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
  984         r->bge_len = htole16(m_new->m_len);
  985         r->bge_idx = htole16(i);
  986 
  987         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  988             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
  989             BUS_DMASYNC_PREREAD);
  990 
  991         return(0);
  992 }
  993 
  994 /*
  995  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
  996  * that's 1MB or memory, which is a lot. For now, we fill only the first
  997  * 256 ring entries and hope that our CPU is fast enough to keep up with
  998  * the NIC.
  999  */
 1000 static int
 1001 bge_init_rx_ring_std(sc)
 1002         struct bge_softc *sc;
 1003 {
 1004         int i;
 1005 
 1006         for (i = 0; i < BGE_SSLOTS; i++) {
 1007                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
 1008                         return(ENOBUFS);
 1009         };
 1010 
 1011         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1012             sc->bge_cdata.bge_rx_std_ring_map,
 1013             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1014 
 1015         sc->bge_std = i - 1;
 1016         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 1017 
 1018         return(0);
 1019 }
 1020 
 1021 static void
 1022 bge_free_rx_ring_std(sc)
 1023         struct bge_softc *sc;
 1024 {
 1025         int i;
 1026 
 1027         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1028                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
 1029                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
 1030                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
 1031                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1032                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1033                 }
 1034                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
 1035                     sizeof(struct bge_rx_bd));
 1036         }
 1037 
 1038         return;
 1039 }
 1040 
 1041 static int
 1042 bge_init_rx_ring_jumbo(sc)
 1043         struct bge_softc *sc;
 1044 {
 1045         int i;
 1046         struct bge_rcb *rcb;
 1047 
 1048         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1049                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
 1050                         return(ENOBUFS);
 1051         };
 1052 
 1053         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1054             sc->bge_cdata.bge_rx_jumbo_ring_map,
 1055             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1056 
 1057         sc->bge_jumbo = i - 1;
 1058 
 1059         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1060         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
 1061         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1062 
 1063         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 1064 
 1065         return(0);
 1066 }
 1067 
 1068 static void
 1069 bge_free_rx_ring_jumbo(sc)
 1070         struct bge_softc *sc;
 1071 {
 1072         int i;
 1073 
 1074         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1075                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
 1076                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
 1077                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
 1078                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 1079                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1080                 }
 1081                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
 1082                     sizeof(struct bge_rx_bd));
 1083         }
 1084 
 1085         return;
 1086 }
 1087 
 1088 static void
 1089 bge_free_tx_ring(sc)
 1090         struct bge_softc *sc;
 1091 {
 1092         int i;
 1093 
 1094         if (sc->bge_ldata.bge_tx_ring == NULL)
 1095                 return;
 1096 
 1097         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1098                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
 1099                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
 1100                         sc->bge_cdata.bge_tx_chain[i] = NULL;
 1101                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 1102                             sc->bge_cdata.bge_tx_dmamap[i]);
 1103                 }
 1104                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
 1105                     sizeof(struct bge_tx_bd));
 1106         }
 1107 
 1108         return;
 1109 }
 1110 
 1111 static int
 1112 bge_init_tx_ring(sc)
 1113         struct bge_softc *sc;
 1114 {
 1115         sc->bge_txcnt = 0;
 1116         sc->bge_tx_saved_considx = 0;
 1117 
 1118         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1119         /* 5700 b2 errata */
 1120         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1121                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
 1122 
 1123         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1124         /* 5700 b2 errata */
 1125         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 1126                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
 1127 
 1128         return(0);
 1129 }
 1130 
 1131 #define BGE_POLY        0xEDB88320
 1132 
 1133 static u_int32_t
 1134 bge_mchash(addr)
 1135         caddr_t addr;
 1136 {
 1137         u_int32_t crc;
 1138         int idx, bit;
 1139         u_int8_t data;
 1140 
 1141         /* Compute CRC for the address value. */
 1142         crc = 0xFFFFFFFF; /* initial value */
 1143 
 1144         for (idx = 0; idx < 6; idx++) {
 1145                 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
 1146                         crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
 1147         }
 1148 
 1149         return(crc & 0x7F);
 1150 }
 1151 
 1152 static void
 1153 bge_setmulti(sc)
 1154         struct bge_softc *sc;
 1155 {
 1156         struct ifnet *ifp;
 1157         struct ifmultiaddr *ifma;
 1158         u_int32_t hashes[4] = { 0, 0, 0, 0 };
 1159         int h, i;
 1160 
 1161         BGE_LOCK_ASSERT(sc);
 1162 
 1163         ifp = &sc->arpcom.ac_if;
 1164 
 1165         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
 1166                 for (i = 0; i < 4; i++)
 1167                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
 1168                 return;
 1169         }
 1170 
 1171         /* First, zot all the existing filters. */
 1172         for (i = 0; i < 4; i++)
 1173                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
 1174 
 1175         /* Now program new ones. */
 1176         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1177                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1178                         continue;
 1179                 h = bge_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
 1180                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
 1181         }
 1182 
 1183         for (i = 0; i < 4; i++)
 1184                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
 1185 
 1186         return;
 1187 }
 1188 
 1189 /*
 1190  * Do endian, PCI and DMA initialization. Also check the on-board ROM
 1191  * self-test results.
 1192  */
 1193 static int
 1194 bge_chipinit(sc)
 1195         struct bge_softc *sc;
 1196 {
 1197         int                     i;
 1198         u_int32_t               dma_rw_ctl;
 1199 
 1200         /* Set endianness before we access any non-PCI registers. */
 1201 #if BYTE_ORDER == BIG_ENDIAN
 1202         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1203             BGE_BIGENDIAN_INIT, 4);
 1204 #else
 1205         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
 1206             BGE_LITTLEENDIAN_INIT, 4);
 1207 #endif
 1208 
 1209         /*
 1210          * Check the 'ROM failed' bit on the RX CPU to see if
 1211          * self-tests passed.
 1212          */
 1213         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
 1214                 printf("bge%d: RX CPU self-diagnostics failed!\n",
 1215                     sc->bge_unit);
 1216                 return(ENODEV);
 1217         }
 1218 
 1219         /* Clear the MAC control register */
 1220         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 1221 
 1222         /*
 1223          * Clear the MAC statistics block in the NIC's
 1224          * internal memory.
 1225          */
 1226         for (i = BGE_STATS_BLOCK;
 1227             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1228                 BGE_MEMWIN_WRITE(sc, i, 0);
 1229 
 1230         for (i = BGE_STATUS_BLOCK;
 1231             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1232                 BGE_MEMWIN_WRITE(sc, i, 0);
 1233 
 1234         /* Set up the PCI DMA control register. */
 1235         if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
 1236             BGE_PCISTATE_PCI_BUSMODE) {
 1237                 /* Conventional PCI bus */
 1238                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1239                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1240                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1241                     (0x0F);
 1242         } else {
 1243                 /* PCI-X bus */
 1244                 /*
 1245                  * The 5704 uses a different encoding of read/write
 1246                  * watermarks.
 1247                  */
 1248                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1249                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1250                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1251                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1252                 else
 1253                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1254                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1255                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1256                             (0x0F);
 1257 
 1258                 /*
 1259                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
 1260                  * for hardware bugs.
 1261                  */
 1262                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1263                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 1264                         u_int32_t tmp;
 1265 
 1266                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
 1267                         if (tmp == 0x6 || tmp == 0x7)
 1268                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
 1269                 }
 1270         }
 1271 
 1272         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1273             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
 1274             sc->bge_asicrev == BGE_ASICREV_BCM5705)
 1275                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
 1276         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
 1277 
 1278         /*
 1279          * Set up general mode register.
 1280          */
 1281         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
 1282             BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
 1283             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
 1284             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
 1285 
 1286         /*
 1287          * Disable memory write invalidate.  Apparently it is not supported
 1288          * properly by these devices.
 1289          */
 1290         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
 1291 
 1292 #ifdef __brokenalpha__
 1293         /*
 1294          * Must insure that we do not cross an 8K (bytes) boundary
 1295          * for DMA reads.  Our highest limit is 1K bytes.  This is a 
 1296          * restriction on some ALPHA platforms with early revision 
 1297          * 21174 PCI chipsets, such as the AlphaPC 164lx 
 1298          */
 1299         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
 1300             BGE_PCI_READ_BNDRY_1024BYTES, 4);
 1301 #endif
 1302 
 1303         /* Set the timer prescaler (always 66Mhz) */
 1304         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
 1305 
 1306         return(0);
 1307 }
 1308 
 1309 static int
 1310 bge_blockinit(sc)
 1311         struct bge_softc *sc;
 1312 {
 1313         struct bge_rcb *rcb;
 1314         volatile struct bge_rcb *vrcb;
 1315         int i;
 1316 
 1317         /*
 1318          * Initialize the memory window pointer register so that
 1319          * we can access the first 32K of internal NIC RAM. This will
 1320          * allow us to set up the TX send ring RCBs and the RX return
 1321          * ring RCBs, plus other things which live in NIC memory.
 1322          */
 1323         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
 1324 
 1325         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
 1326 
 1327         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 1328                 /* Configure mbuf memory pool */
 1329                 if (sc->bge_extram) {
 1330                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1331                             BGE_EXT_SSRAM);
 1332                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1333                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1334                         else
 1335                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1336                 } else {
 1337                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1338                             BGE_BUFFPOOL_1);
 1339                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1340                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1341                         else
 1342                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1343                 }
 1344 
 1345                 /* Configure DMA resource pool */
 1346                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
 1347                     BGE_DMA_DESCRIPTORS);
 1348                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
 1349         }
 1350 
 1351         /* Configure mbuf pool watermarks */
 1352         if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
 1353                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
 1354                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
 1355         } else {
 1356                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
 1357                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
 1358         }
 1359         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
 1360 
 1361         /* Configure DMA resource watermarks */
 1362         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
 1363         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
 1364 
 1365         /* Enable buffer manager */
 1366         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 1367                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
 1368                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
 1369 
 1370                 /* Poll for buffer manager start indication */
 1371                 for (i = 0; i < BGE_TIMEOUT; i++) {
 1372                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
 1373                                 break;
 1374                         DELAY(10);
 1375                 }
 1376 
 1377                 if (i == BGE_TIMEOUT) {
 1378                         printf("bge%d: buffer manager failed to start\n",
 1379                             sc->bge_unit);
 1380                         return(ENXIO);
 1381                 }
 1382         }
 1383 
 1384         /* Enable flow-through queues */
 1385         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 1386         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 1387 
 1388         /* Wait until queue initialization is complete */
 1389         for (i = 0; i < BGE_TIMEOUT; i++) {
 1390                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
 1391                         break;
 1392                 DELAY(10);
 1393         }
 1394 
 1395         if (i == BGE_TIMEOUT) {
 1396                 printf("bge%d: flow-through queue init failed\n",
 1397                     sc->bge_unit);
 1398                 return(ENXIO);
 1399         }
 1400 
 1401         /* Initialize the standard RX ring control block */
 1402         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
 1403         rcb->bge_hostaddr.bge_addr_lo =
 1404             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
 1405         rcb->bge_hostaddr.bge_addr_hi =
 1406             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
 1407         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1408             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
 1409         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
 1410                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
 1411         else
 1412                 rcb->bge_maxlen_flags =
 1413                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
 1414         if (sc->bge_extram)
 1415                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
 1416         else
 1417                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
 1418         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
 1419         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
 1420 
 1421         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1422         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
 1423 
 1424         /*
 1425          * Initialize the jumbo RX ring control block
 1426          * We set the 'ring disabled' bit in the flags
 1427          * field until we're actually ready to start
 1428          * using this ring (i.e. once we set the MTU
 1429          * high enough to require it).
 1430          */
 1431         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 1432                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1433 
 1434                 rcb->bge_hostaddr.bge_addr_lo =
 1435                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1436                 rcb->bge_hostaddr.bge_addr_hi =
 1437                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1438                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1439                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 1440                     BUS_DMASYNC_PREREAD);
 1441                 rcb->bge_maxlen_flags =
 1442                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
 1443                     BGE_RCB_FLAG_RING_DISABLED);
 1444                 if (sc->bge_extram)
 1445                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
 1446                 else
 1447                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
 1448                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
 1449                     rcb->bge_hostaddr.bge_addr_hi);
 1450                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
 1451                     rcb->bge_hostaddr.bge_addr_lo);
 1452 
 1453                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
 1454                     rcb->bge_maxlen_flags);
 1455                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
 1456 
 1457                 /* Set up dummy disabled mini ring RCB */
 1458                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
 1459                 rcb->bge_maxlen_flags =
 1460                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1461                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
 1462                     rcb->bge_maxlen_flags);
 1463         }
 1464 
 1465         /*
 1466          * Set the BD ring replentish thresholds. The recommended
 1467          * values are 1/8th the number of descriptors allocated to
 1468          * each ring.
 1469          */
 1470         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
 1471         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
 1472 
 1473         /*
 1474          * Disable all unused send rings by setting the 'ring disabled'
 1475          * bit in the flags field of all the TX send ring control blocks.
 1476          * These are located in NIC memory.
 1477          */
 1478         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1479             BGE_SEND_RING_RCB);
 1480         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
 1481                 vrcb->bge_maxlen_flags =
 1482                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1483                 vrcb->bge_nicaddr = 0;
 1484                 vrcb++;
 1485         }
 1486 
 1487         /* Configure TX RCB 0 (we use only the first ring) */
 1488         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1489             BGE_SEND_RING_RCB);
 1490         vrcb->bge_hostaddr.bge_addr_lo =
 1491             htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
 1492         vrcb->bge_hostaddr.bge_addr_hi =
 1493             htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
 1494         vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
 1495         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 1496                 vrcb->bge_maxlen_flags =
 1497                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
 1498 
 1499         /* Disable all unused RX return rings */
 1500         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1501             BGE_RX_RETURN_RING_RCB);
 1502         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
 1503                 vrcb->bge_hostaddr.bge_addr_hi = 0;
 1504                 vrcb->bge_hostaddr.bge_addr_lo = 0;
 1505                 vrcb->bge_maxlen_flags =
 1506                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
 1507                     BGE_RCB_FLAG_RING_DISABLED);
 1508                 vrcb->bge_nicaddr = 0;
 1509                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
 1510                     (i * (sizeof(u_int64_t))), 0);
 1511                 vrcb++;
 1512         }
 1513 
 1514         /* Initialize RX ring indexes */
 1515         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
 1516         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
 1517         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
 1518 
 1519         /*
 1520          * Set up RX return ring 0
 1521          * Note that the NIC address for RX return rings is 0x00000000.
 1522          * The return rings live entirely within the host, so the
 1523          * nicaddr field in the RCB isn't used.
 1524          */
 1525         vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
 1526             BGE_RX_RETURN_RING_RCB);
 1527         vrcb->bge_hostaddr.bge_addr_lo =
 1528             BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
 1529         vrcb->bge_hostaddr.bge_addr_hi =
 1530             BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
 1531         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 1532             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 1533         vrcb->bge_nicaddr = 0x00000000;
 1534         vrcb->bge_maxlen_flags =
 1535             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
 1536 
 1537         /* Set random backoff seed for TX */
 1538         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
 1539             sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
 1540             sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
 1541             sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
 1542             BGE_TX_BACKOFF_SEED_MASK);
 1543 
 1544         /* Set inter-packet gap */
 1545         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
 1546 
 1547         /*
 1548          * Specify which ring to use for packets that don't match
 1549          * any RX rules.
 1550          */
 1551         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
 1552 
 1553         /*
 1554          * Configure number of RX lists. One interrupt distribution
 1555          * list, sixteen active lists, one bad frames class.
 1556          */
 1557         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
 1558 
 1559         /* Inialize RX list placement stats mask. */
 1560         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
 1561         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
 1562 
 1563         /* Disable host coalescing until we get it set up */
 1564         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
 1565 
 1566         /* Poll to make sure it's shut down. */
 1567         for (i = 0; i < BGE_TIMEOUT; i++) {
 1568                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
 1569                         break;
 1570                 DELAY(10);
 1571         }
 1572 
 1573         if (i == BGE_TIMEOUT) {
 1574                 printf("bge%d: host coalescing engine failed to idle\n",
 1575                     sc->bge_unit);
 1576                 return(ENXIO);
 1577         }
 1578 
 1579         /* Set up host coalescing defaults */
 1580         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
 1581         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
 1582         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
 1583         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
 1584         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 1585                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
 1586                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
 1587         }
 1588         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
 1589         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
 1590 
 1591         /* Set up address of statistics block */
 1592         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 1593                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
 1594                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
 1595                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
 1596                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
 1597                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
 1598                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
 1599                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
 1600         }
 1601 
 1602         /* Set up address of status block */
 1603         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
 1604             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
 1605         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
 1606             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
 1607         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 1608             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 1609         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
 1610         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
 1611 
 1612         /* Turn on host coalescing state machine */
 1613         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 1614 
 1615         /* Turn on RX BD completion state machine and enable attentions */
 1616         CSR_WRITE_4(sc, BGE_RBDC_MODE,
 1617             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
 1618 
 1619         /* Turn on RX list placement state machine */
 1620         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 1621 
 1622         /* Turn on RX list selector state machine. */
 1623         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 1624                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 1625 
 1626         /* Turn on DMA, clear stats */
 1627         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
 1628             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
 1629             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
 1630             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
 1631             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
 1632 
 1633         /* Set misc. local control, enable interrupts on attentions */
 1634         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
 1635 
 1636 #ifdef notdef
 1637         /* Assert GPIO pins for PHY reset */
 1638         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
 1639             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
 1640         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
 1641             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
 1642 #endif
 1643 
 1644         /* Turn on DMA completion state machine */
 1645         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 1646                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 1647 
 1648         /* Turn on write DMA state machine */
 1649         CSR_WRITE_4(sc, BGE_WDMA_MODE,
 1650             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
 1651         
 1652         /* Turn on read DMA state machine */
 1653         CSR_WRITE_4(sc, BGE_RDMA_MODE,
 1654             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
 1655 
 1656         /* Turn on RX data completion state machine */
 1657         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 1658 
 1659         /* Turn on RX BD initiator state machine */
 1660         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 1661 
 1662         /* Turn on RX data and RX BD initiator state machine */
 1663         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
 1664 
 1665         /* Turn on Mbuf cluster free state machine */
 1666         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 1667                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 1668 
 1669         /* Turn on send BD completion state machine */
 1670         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 1671 
 1672         /* Turn on send data completion state machine */
 1673         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 1674 
 1675         /* Turn on send data initiator state machine */
 1676         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 1677 
 1678         /* Turn on send BD initiator state machine */
 1679         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 1680 
 1681         /* Turn on send BD selector state machine */
 1682         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 1683 
 1684         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
 1685         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
 1686             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
 1687 
 1688         /* ack/clear link change events */
 1689         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 1690             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 1691             BGE_MACSTAT_LINK_CHANGED);
 1692         CSR_WRITE_4(sc, BGE_MI_STS, 0);
 1693 
 1694         /* Enable PHY auto polling (for MII/GMII only) */
 1695         if (sc->bge_tbi) {
 1696                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
 1697         } else {
 1698                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
 1699                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
 1700                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 1701                             BGE_EVTENB_MI_INTERRUPT);
 1702         }
 1703 
 1704         /* Enable link state change attentions. */
 1705         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
 1706 
 1707         return(0);
 1708 }
 1709 
 1710 /*
 1711  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
 1712  * against our list and return its name if we find a match. Note
 1713  * that since the Broadcom controller contains VPD support, we
 1714  * can get the device name string from the controller itself instead
 1715  * of the compiled-in string. This is a little slow, but it guarantees
 1716  * we'll always announce the right product name.
 1717  */
 1718 static int
 1719 bge_probe(dev)
 1720         device_t dev;
 1721 {
 1722         struct bge_type *t;
 1723         struct bge_softc *sc;
 1724         char *descbuf;
 1725 
 1726         t = bge_devs;
 1727 
 1728         sc = device_get_softc(dev);
 1729         bzero(sc, sizeof(struct bge_softc));
 1730         sc->bge_unit = device_get_unit(dev);
 1731         sc->bge_dev = dev;
 1732 
 1733         while(t->bge_name != NULL) {
 1734                 if ((pci_get_vendor(dev) == t->bge_vid) &&
 1735                     (pci_get_device(dev) == t->bge_did)) {
 1736 #ifdef notdef
 1737                         bge_vpd_read(sc);
 1738                         device_set_desc(dev, sc->bge_vpd_prodname);
 1739 #endif
 1740                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
 1741                         if (descbuf == NULL)
 1742                                 return(ENOMEM);
 1743                         snprintf(descbuf, BGE_DEVDESC_MAX,
 1744                             "%s, ASIC rev. %#04x", t->bge_name,
 1745                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
 1746                         device_set_desc_copy(dev, descbuf);
 1747                         if (pci_get_subvendor(dev) == DELL_VENDORID)
 1748                                 sc->bge_no_3_led = 1;
 1749                         free(descbuf, M_TEMP);
 1750                         return(0);
 1751                 }
 1752                 t++;
 1753         }
 1754 
 1755         return(ENXIO);
 1756 }
 1757 
 1758 static void
 1759 bge_dma_free(sc)
 1760         struct bge_softc *sc;
 1761 {
 1762         int i;
 1763 
 1764 
 1765         /* Destroy DMA maps for RX buffers */
 1766 
 1767         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1768                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
 1769                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1770                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1771         }
 1772 
 1773         /* Destroy DMA maps for jumbo RX buffers */
 1774 
 1775         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1776                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
 1777                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
 1778                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1779         }
 1780 
 1781         /* Destroy DMA maps for TX buffers */
 1782 
 1783         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1784                 if (sc->bge_cdata.bge_tx_dmamap[i])
 1785                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1786                             sc->bge_cdata.bge_tx_dmamap[i]);
 1787         }
 1788 
 1789         if (sc->bge_cdata.bge_mtag)
 1790                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
 1791 
 1792 
 1793         /* Destroy standard RX ring */
 1794 
 1795         if (sc->bge_ldata.bge_rx_std_ring)
 1796                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
 1797                     sc->bge_ldata.bge_rx_std_ring,
 1798                     sc->bge_cdata.bge_rx_std_ring_map);
 1799 
 1800         if (sc->bge_cdata.bge_rx_std_ring_map) {
 1801                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
 1802                     sc->bge_cdata.bge_rx_std_ring_map);
 1803                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
 1804                     sc->bge_cdata.bge_rx_std_ring_map);
 1805         }
 1806 
 1807         if (sc->bge_cdata.bge_rx_std_ring_tag)
 1808                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
 1809 
 1810         /* Destroy jumbo RX ring */
 1811 
 1812         if (sc->bge_ldata.bge_rx_jumbo_ring)
 1813                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1814                     sc->bge_ldata.bge_rx_jumbo_ring,
 1815                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1816 
 1817         if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
 1818                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1819                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1820                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1821                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1822         }
 1823 
 1824         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
 1825                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
 1826 
 1827         /* Destroy RX return ring */
 1828 
 1829         if (sc->bge_ldata.bge_rx_return_ring)
 1830                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
 1831                     sc->bge_ldata.bge_rx_return_ring,
 1832                     sc->bge_cdata.bge_rx_return_ring_map);
 1833 
 1834         if (sc->bge_cdata.bge_rx_return_ring_map) {
 1835                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
 1836                     sc->bge_cdata.bge_rx_return_ring_map);
 1837                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
 1838                     sc->bge_cdata.bge_rx_return_ring_map);
 1839         }
 1840 
 1841         if (sc->bge_cdata.bge_rx_return_ring_tag)
 1842                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
 1843 
 1844         /* Destroy TX ring */
 1845 
 1846         if (sc->bge_ldata.bge_tx_ring)
 1847                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
 1848                     sc->bge_ldata.bge_tx_ring,
 1849                     sc->bge_cdata.bge_tx_ring_map);
 1850 
 1851         if (sc->bge_cdata.bge_tx_ring_map) {
 1852                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
 1853                     sc->bge_cdata.bge_tx_ring_map);
 1854                 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
 1855                     sc->bge_cdata.bge_tx_ring_map);
 1856         }
 1857 
 1858         if (sc->bge_cdata.bge_tx_ring_tag)
 1859                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
 1860 
 1861         /* Destroy status block */
 1862 
 1863         if (sc->bge_ldata.bge_status_block)
 1864                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
 1865                     sc->bge_ldata.bge_status_block,
 1866                     sc->bge_cdata.bge_status_map);
 1867 
 1868         if (sc->bge_cdata.bge_status_map) {
 1869                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
 1870                     sc->bge_cdata.bge_status_map);
 1871                 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
 1872                     sc->bge_cdata.bge_status_map);
 1873         }
 1874 
 1875         if (sc->bge_cdata.bge_status_tag)
 1876                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
 1877 
 1878         /* Destroy statistics block */
 1879 
 1880         if (sc->bge_ldata.bge_stats)
 1881                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
 1882                     sc->bge_ldata.bge_stats,
 1883                     sc->bge_cdata.bge_stats_map);
 1884 
 1885         if (sc->bge_cdata.bge_stats_map) {
 1886                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
 1887                     sc->bge_cdata.bge_stats_map);
 1888                 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
 1889                     sc->bge_cdata.bge_stats_map);
 1890         }
 1891 
 1892         if (sc->bge_cdata.bge_stats_tag)
 1893                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
 1894 
 1895         /* Destroy the parent tag */
 1896 
 1897         if (sc->bge_cdata.bge_parent_tag)
 1898                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
 1899 
 1900         return;
 1901 }
 1902 
 1903 static int
 1904 bge_dma_alloc(dev)
 1905         device_t dev;
 1906 {
 1907         struct bge_softc *sc;
 1908         int nseg, i, error;
 1909         struct bge_dmamap_arg ctx;
 1910 
 1911         sc = device_get_softc(dev);
 1912 
 1913         /*
 1914          * Allocate the parent bus DMA tag appropriate for PCI.
 1915          */
 1916 #define BGE_NSEG_NEW 32
 1917         error = bus_dma_tag_create(NULL,        /* parent */
 1918                         PAGE_SIZE, 0,           /* alignment, boundary */
 1919                         BUS_SPACE_MAXADDR,      /* lowaddr */
 1920                         BUS_SPACE_MAXADDR_32BIT,/* highaddr */
 1921                         NULL, NULL,             /* filter, filterarg */
 1922                         MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
 1923                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
 1924                         BUS_DMA_ALLOCNOW,       /* flags */
 1925                         NULL, NULL,             /* lockfunc, lockarg */
 1926                         &sc->bge_cdata.bge_parent_tag);
 1927 
 1928         /*
 1929          * Create tag for RX mbufs.
 1930          */
 1931         nseg = 32;
 1932         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN,
 1933             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1934             NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL,
 1935             &sc->bge_cdata.bge_mtag);
 1936 
 1937         if (error) {
 1938                 device_printf(dev, "could not allocate dma tag\n");
 1939                 return (ENOMEM);
 1940         }
 1941 
 1942         /* Create DMA maps for RX buffers */
 1943 
 1944         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1945                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1946                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
 1947                 if (error) {
 1948                         device_printf(dev, "can't create DMA map for RX\n");
 1949                         return(ENOMEM);
 1950                 }
 1951         }
 1952 
 1953         /* Create DMA maps for TX buffers */
 1954 
 1955         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1956                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1957                             &sc->bge_cdata.bge_tx_dmamap[i]);
 1958                 if (error) {
 1959                         device_printf(dev, "can't create DMA map for RX\n");
 1960                         return(ENOMEM);
 1961                 }
 1962         }
 1963 
 1964         /* Create tag for standard RX ring */
 1965 
 1966         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1967             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1968             NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
 1969             NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
 1970 
 1971         if (error) {
 1972                 device_printf(dev, "could not allocate dma tag\n");
 1973                 return (ENOMEM);
 1974         }
 1975 
 1976         /* Allocate DMA'able memory for standard RX ring */
 1977 
 1978         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
 1979             (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
 1980             &sc->bge_cdata.bge_rx_std_ring_map);
 1981         if (error)
 1982                 return (ENOMEM);
 1983 
 1984         bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
 1985 
 1986         /* Load the address of the standard RX ring */
 1987 
 1988         ctx.bge_maxsegs = 1;
 1989         ctx.sc = sc;
 1990 
 1991         error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
 1992             sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
 1993             BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 1994 
 1995         if (error)
 1996                 return (ENOMEM);
 1997 
 1998         sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
 1999 
 2000         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 2001 
 2002                 /*
 2003                  * Create tag for jumbo mbufs.
 2004                  * This is really a bit of a kludge. We allocate a special
 2005                  * jumbo buffer pool which (thanks to the way our DMA
 2006                  * memory allocation works) will consist of contiguous
 2007                  * pages. This means that even though a jumbo buffer might
 2008                  * be larger than a page size, we don't really need to
 2009                  * map it into more than one DMA segment. However, the
 2010                  * default mbuf tag will result in multi-segment mappings,
 2011                  * so we have to create a special jumbo mbuf tag that
 2012                  * lets us get away with mapping the jumbo buffers as
 2013                  * a single segment. I think eventually the driver should
 2014                  * be changed so that it uses ordinary mbufs and cluster
 2015                  * buffers, i.e. jumbo frames can span multiple DMA
 2016                  * descriptors. But that's a project for another day.
 2017                  */
 2018 
 2019                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2020                     ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2021                     NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
 2022                     &sc->bge_cdata.bge_mtag_jumbo);
 2023 
 2024                 if (error) {
 2025                         device_printf(dev, "could not allocate dma tag\n");
 2026                         return (ENOMEM);
 2027                 }
 2028 
 2029                 /* Create tag for jumbo RX ring */
 2030 
 2031                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2032                     PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2033                     NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
 2034                     NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
 2035 
 2036                 if (error) {
 2037                         device_printf(dev, "could not allocate dma tag\n");
 2038                         return (ENOMEM);
 2039                 }
 2040 
 2041                 /* Allocate DMA'able memory for jumbo RX ring */
 2042 
 2043                 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2044                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
 2045                     &sc->bge_cdata.bge_rx_jumbo_ring_map);
 2046                 if (error)
 2047                         return (ENOMEM);
 2048 
 2049                 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
 2050                     BGE_JUMBO_RX_RING_SZ);
 2051 
 2052                 /* Load the address of the jumbo RX ring */
 2053 
 2054                 ctx.bge_maxsegs = 1;
 2055                 ctx.sc = sc;
 2056 
 2057                 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2058                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2059                     sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
 2060                     bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2061 
 2062                 if (error)
 2063                         return (ENOMEM);
 2064 
 2065                 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
 2066 
 2067                 /* Create DMA maps for jumbo RX buffers */
 2068 
 2069                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 2070                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
 2071                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 2072                         if (error) {
 2073                                 device_printf(dev,
 2074                                     "can't create DMA map for RX\n");
 2075                                 return(ENOMEM);
 2076                         }
 2077                 }
 2078 
 2079         }
 2080 
 2081         /* Create tag for RX return ring */
 2082 
 2083         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2084             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2085             NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
 2086             NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
 2087 
 2088         if (error) {
 2089                 device_printf(dev, "could not allocate dma tag\n");
 2090                 return (ENOMEM);
 2091         }
 2092 
 2093         /* Allocate DMA'able memory for RX return ring */
 2094 
 2095         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
 2096             (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
 2097             &sc->bge_cdata.bge_rx_return_ring_map);
 2098         if (error)
 2099                 return (ENOMEM);
 2100 
 2101         bzero((char *)sc->bge_ldata.bge_rx_return_ring,
 2102             BGE_RX_RTN_RING_SZ(sc));
 2103 
 2104         /* Load the address of the RX return ring */
 2105 
 2106         ctx.bge_maxsegs = 1;
 2107         ctx.sc = sc;
 2108 
 2109         error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
 2110             sc->bge_cdata.bge_rx_return_ring_map,
 2111             sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
 2112             bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2113 
 2114         if (error)
 2115                 return (ENOMEM);
 2116 
 2117         sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
 2118 
 2119         /* Create tag for TX ring */
 2120 
 2121         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2122             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2123             NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
 2124             &sc->bge_cdata.bge_tx_ring_tag);
 2125 
 2126         if (error) {
 2127                 device_printf(dev, "could not allocate dma tag\n");
 2128                 return (ENOMEM);
 2129         }
 2130 
 2131         /* Allocate DMA'able memory for TX ring */
 2132 
 2133         error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
 2134             (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
 2135             &sc->bge_cdata.bge_tx_ring_map);
 2136         if (error)
 2137                 return (ENOMEM);
 2138 
 2139         bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
 2140 
 2141         /* Load the address of the TX ring */
 2142 
 2143         ctx.bge_maxsegs = 1;
 2144         ctx.sc = sc;
 2145 
 2146         error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
 2147             sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
 2148             BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2149 
 2150         if (error)
 2151                 return (ENOMEM);
 2152 
 2153         sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
 2154 
 2155         /* Create tag for status block */
 2156 
 2157         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2158             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2159             NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
 2160             NULL, NULL, &sc->bge_cdata.bge_status_tag);
 2161 
 2162         if (error) {
 2163                 device_printf(dev, "could not allocate dma tag\n");
 2164                 return (ENOMEM);
 2165         }
 2166 
 2167         /* Allocate DMA'able memory for status block */
 2168 
 2169         error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
 2170             (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
 2171             &sc->bge_cdata.bge_status_map);
 2172         if (error)
 2173                 return (ENOMEM);
 2174 
 2175         bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
 2176 
 2177         /* Load the address of the status block */
 2178 
 2179         ctx.sc = sc;
 2180         ctx.bge_maxsegs = 1;
 2181 
 2182         error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
 2183             sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
 2184             BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2185 
 2186         if (error)
 2187                 return (ENOMEM);
 2188 
 2189         sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
 2190 
 2191         /* Create tag for statistics block */
 2192 
 2193         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2194             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2195             NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 
 2196             &sc->bge_cdata.bge_stats_tag);
 2197 
 2198         if (error) {
 2199                 device_printf(dev, "could not allocate dma tag\n");
 2200                 return (ENOMEM);
 2201         }
 2202 
 2203         /* Allocate DMA'able memory for statistics block */
 2204 
 2205         error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
 2206             (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
 2207             &sc->bge_cdata.bge_stats_map);
 2208         if (error)
 2209                 return (ENOMEM);
 2210 
 2211         bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
 2212 
 2213         /* Load the address of the statstics block */
 2214 
 2215         ctx.sc = sc;
 2216         ctx.bge_maxsegs = 1;
 2217 
 2218         error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
 2219             sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
 2220             BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2221 
 2222         if (error)
 2223                 return (ENOMEM);
 2224 
 2225         sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
 2226 
 2227         return(0);
 2228 }
 2229 
 2230 static int
 2231 bge_attach(dev)
 2232         device_t dev;
 2233 {
 2234         struct ifnet *ifp;
 2235         struct bge_softc *sc;
 2236         u_int32_t hwcfg = 0;
 2237         u_int32_t mac_addr = 0;
 2238         int unit, error = 0, rid;
 2239 
 2240         sc = device_get_softc(dev);
 2241         unit = device_get_unit(dev);
 2242         sc->bge_dev = dev;
 2243         sc->bge_unit = unit;
 2244 
 2245         /*
 2246          * Map control/status registers.
 2247          */
 2248         pci_enable_busmaster(dev);
 2249 
 2250         rid = BGE_PCI_BAR0;
 2251         sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
 2252             0, ~0, 1, RF_ACTIVE|PCI_RF_DENSE);
 2253 
 2254         if (sc->bge_res == NULL) {
 2255                 printf ("bge%d: couldn't map memory\n", unit);
 2256                 error = ENXIO;
 2257                 goto fail;
 2258         }
 2259 
 2260         sc->bge_btag = rman_get_bustag(sc->bge_res);
 2261         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
 2262         sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
 2263 
 2264         /* Allocate interrupt */
 2265         rid = 0;
 2266         
 2267         sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
 2268             RF_SHAREABLE | RF_ACTIVE);
 2269 
 2270         if (sc->bge_irq == NULL) {
 2271                 printf("bge%d: couldn't map interrupt\n", unit);
 2272                 error = ENXIO;
 2273                 goto fail;
 2274         }
 2275 
 2276         sc->bge_unit = unit;
 2277 
 2278         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
 2279 
 2280         /* Try to reset the chip. */
 2281         bge_reset(sc);
 2282 
 2283         if (bge_chipinit(sc)) {
 2284                 printf("bge%d: chip initialization failed\n", sc->bge_unit);
 2285                 bge_release_resources(sc);
 2286                 error = ENXIO;
 2287                 goto fail;
 2288         }
 2289 
 2290         /*
 2291          * Get station address from the EEPROM.
 2292          */
 2293         mac_addr = bge_readmem_ind(sc, 0x0c14);
 2294         if ((mac_addr >> 16) == 0x484b) {
 2295                 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
 2296                 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
 2297                 mac_addr = bge_readmem_ind(sc, 0x0c18);
 2298                 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
 2299                 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
 2300                 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
 2301                 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
 2302         } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
 2303             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
 2304                 printf("bge%d: failed to read station address\n", unit);
 2305                 bge_release_resources(sc);
 2306                 error = ENXIO;
 2307                 goto fail;
 2308         }
 2309 
 2310         /*
 2311          * A Broadcom chip was detected. Inform the world.
 2312          */
 2313         printf("bge%d: Ethernet address: %6D\n", unit,
 2314             sc->arpcom.ac_enaddr, ":");
 2315 
 2316         /* Save ASIC rev. */
 2317 
 2318         sc->bge_chipid =
 2319             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
 2320             BGE_PCIMISCCTL_ASICREV;
 2321         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
 2322         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
 2323 
 2324         /* 5705 limits RX return ring to 512 entries. */
 2325         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
 2326                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
 2327         else
 2328                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
 2329 
 2330         if (bge_dma_alloc(dev)) {
 2331                 printf ("bge%d: failed to allocate DMA resources\n",
 2332                     sc->bge_unit);
 2333                 bge_release_resources(sc);
 2334                 error = ENXIO;
 2335                 goto fail;
 2336         }
 2337 
 2338         /*
 2339          * Try to allocate memory for jumbo buffers.
 2340          * The 5705 does not appear to support jumbo frames.
 2341          */
 2342         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 2343                 if (bge_alloc_jumbo_mem(sc)) {
 2344                         printf("bge%d: jumbo buffer allocation "
 2345                             "failed\n", sc->bge_unit);
 2346                         bge_release_resources(sc);
 2347                         error = ENXIO;
 2348                         goto fail;
 2349                 }
 2350         }
 2351 
 2352         /* Set default tuneable values. */
 2353         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
 2354         sc->bge_rx_coal_ticks = 150;
 2355         sc->bge_tx_coal_ticks = 150;
 2356         sc->bge_rx_max_coal_bds = 64;
 2357         sc->bge_tx_max_coal_bds = 128;
 2358 
 2359         /* Set up ifnet structure */
 2360         ifp = &sc->arpcom.ac_if;
 2361         ifp->if_softc = sc;
 2362         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2363         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2364         ifp->if_ioctl = bge_ioctl;
 2365         ifp->if_output = ether_output;
 2366         ifp->if_start = bge_start;
 2367         ifp->if_watchdog = bge_watchdog;
 2368         ifp->if_init = bge_init;
 2369         ifp->if_mtu = ETHERMTU;
 2370         ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
 2371         ifp->if_hwassist = BGE_CSUM_FEATURES;
 2372         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
 2373             IFCAP_VLAN_MTU;
 2374         ifp->if_capenable = ifp->if_capabilities;
 2375 
 2376         /*
 2377          * Figure out what sort of media we have by checking the
 2378          * hardware config word in the first 32k of NIC internal memory,
 2379          * or fall back to examining the EEPROM if necessary.
 2380          * Note: on some BCM5700 cards, this value appears to be unset.
 2381          * If that's the case, we have to rely on identifying the NIC
 2382          * by its PCI subsystem ID, as we do below for the SysKonnect
 2383          * SK-9D41.
 2384          */
 2385         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
 2386                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
 2387         else {
 2388                 bge_read_eeprom(sc, (caddr_t)&hwcfg,
 2389                                 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
 2390                 hwcfg = ntohl(hwcfg);
 2391         }
 2392 
 2393         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
 2394                 sc->bge_tbi = 1;
 2395 
 2396         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
 2397         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
 2398                 sc->bge_tbi = 1;
 2399 
 2400         if (sc->bge_tbi) {
 2401                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
 2402                     bge_ifmedia_upd, bge_ifmedia_sts);
 2403                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
 2404                 ifmedia_add(&sc->bge_ifmedia,
 2405                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
 2406                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
 2407                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
 2408         } else {
 2409                 /*
 2410                  * Do transceiver setup.
 2411                  */
 2412                 if (mii_phy_probe(dev, &sc->bge_miibus,
 2413                     bge_ifmedia_upd, bge_ifmedia_sts)) {
 2414                         printf("bge%d: MII without any PHY!\n", sc->bge_unit);
 2415                         bge_release_resources(sc);
 2416                         bge_free_jumbo_mem(sc);
 2417                         error = ENXIO;
 2418                         goto fail;
 2419                 }
 2420         }
 2421 
 2422         /*
 2423          * When using the BCM5701 in PCI-X mode, data corruption has
 2424          * been observed in the first few bytes of some received packets.
 2425          * Aligning the packet buffer in memory eliminates the corruption.
 2426          * Unfortunately, this misaligns the packet payloads.  On platforms
 2427          * which do not support unaligned accesses, we will realign the
 2428          * payloads by copying the received packets.
 2429          */
 2430         switch (sc->bge_chipid) {
 2431         case BGE_CHIPID_BCM5701_A0:
 2432         case BGE_CHIPID_BCM5701_B0:
 2433         case BGE_CHIPID_BCM5701_B2:
 2434         case BGE_CHIPID_BCM5701_B5:
 2435                 /* If in PCI-X mode, work around the alignment bug. */
 2436                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
 2437                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
 2438                     BGE_PCISTATE_PCI_BUSSPEED)
 2439                         sc->bge_rx_alignment_bug = 1;
 2440                 break;
 2441         }
 2442 
 2443         /*
 2444          * Call MI attach routine.
 2445          */
 2446         ether_ifattach(ifp, sc->arpcom.ac_enaddr);
 2447         callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
 2448 
 2449         /*
 2450          * Hookup IRQ last.
 2451          */
 2452         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
 2453            bge_intr, sc, &sc->bge_intrhand);
 2454 
 2455         if (error) {
 2456                 bge_release_resources(sc);
 2457                 printf("bge%d: couldn't set up irq\n", unit);
 2458         }
 2459 
 2460 fail:
 2461         return(error);
 2462 }
 2463 
 2464 static int
 2465 bge_detach(dev)
 2466         device_t dev;
 2467 {
 2468         struct bge_softc *sc;
 2469         struct ifnet *ifp;
 2470 
 2471         sc = device_get_softc(dev);
 2472         ifp = &sc->arpcom.ac_if;
 2473 
 2474         BGE_LOCK(sc);
 2475         bge_stop(sc);
 2476         bge_reset(sc);
 2477         BGE_UNLOCK(sc);
 2478 
 2479         ether_ifdetach(ifp);
 2480 
 2481         if (sc->bge_tbi) {
 2482                 ifmedia_removeall(&sc->bge_ifmedia);
 2483         } else {
 2484                 bus_generic_detach(dev);
 2485                 device_delete_child(dev, sc->bge_miibus);
 2486         }
 2487 
 2488         bge_release_resources(sc);
 2489         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 2490                 bge_free_jumbo_mem(sc);
 2491 
 2492         return(0);
 2493 }
 2494 
 2495 static void
 2496 bge_release_resources(sc)
 2497         struct bge_softc *sc;
 2498 {
 2499         device_t dev;
 2500 
 2501         dev = sc->bge_dev;
 2502 
 2503         if (sc->bge_vpd_prodname != NULL)
 2504                 free(sc->bge_vpd_prodname, M_DEVBUF);
 2505 
 2506         if (sc->bge_vpd_readonly != NULL)
 2507                 free(sc->bge_vpd_readonly, M_DEVBUF);
 2508 
 2509         if (sc->bge_intrhand != NULL)
 2510                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
 2511 
 2512         if (sc->bge_irq != NULL)
 2513                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
 2514 
 2515         if (sc->bge_res != NULL)
 2516                 bus_release_resource(dev, SYS_RES_MEMORY,
 2517                     BGE_PCI_BAR0, sc->bge_res);
 2518 
 2519         bge_dma_free(sc);
 2520 
 2521         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
 2522                 BGE_LOCK_DESTROY(sc);
 2523 
 2524         return;
 2525 }
 2526 
 2527 static void
 2528 bge_reset(sc)
 2529         struct bge_softc *sc;
 2530 {
 2531         device_t dev;
 2532         u_int32_t cachesize, command, pcistate;
 2533         int i, val = 0;
 2534 
 2535         dev = sc->bge_dev;
 2536 
 2537         /* Save some important PCI state. */
 2538         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
 2539         command = pci_read_config(dev, BGE_PCI_CMD, 4);
 2540         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
 2541 
 2542         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2543             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2544             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2545 
 2546         /* Issue global reset */
 2547         bge_writereg_ind(sc, BGE_MISC_CFG,
 2548             BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
 2549 
 2550         DELAY(1000);
 2551 
 2552         /* Reset some of the PCI state that got zapped by reset */
 2553         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2554             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2555             BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2556         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
 2557         pci_write_config(dev, BGE_PCI_CMD, command, 4);
 2558         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
 2559 
 2560         /*
 2561          * Prevent PXE restart: write a magic number to the
 2562          * general communications memory at 0xB50.
 2563          */
 2564         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
 2565         /*
 2566          * Poll the value location we just wrote until
 2567          * we see the 1's complement of the magic number.
 2568          * This indicates that the firmware initialization
 2569          * is complete.
 2570          */
 2571         for (i = 0; i < BGE_TIMEOUT; i++) {
 2572                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
 2573                 if (val == ~BGE_MAGIC_NUMBER)
 2574                         break;
 2575                 DELAY(10);
 2576         }
 2577         
 2578         if (i == BGE_TIMEOUT) {
 2579                 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
 2580                 return;
 2581         }
 2582 
 2583         /*
 2584          * XXX Wait for the value of the PCISTATE register to
 2585          * return to its original pre-reset state. This is a
 2586          * fairly good indicator of reset completion. If we don't
 2587          * wait for the reset to fully complete, trying to read
 2588          * from the device's non-PCI registers may yield garbage
 2589          * results.
 2590          */
 2591         for (i = 0; i < BGE_TIMEOUT; i++) {
 2592                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
 2593                         break;
 2594                 DELAY(10);
 2595         }
 2596 
 2597         /* Enable memory arbiter. */
 2598         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 2599                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 2600 
 2601         /* Fix up byte swapping */
 2602         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
 2603             BGE_MODECTL_BYTESWAP_DATA);
 2604 
 2605         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 2606 
 2607         DELAY(10000);
 2608 
 2609         return;
 2610 }
 2611 
 2612 /*
 2613  * Frame reception handling. This is called if there's a frame
 2614  * on the receive return list.
 2615  *
 2616  * Note: we have to be able to handle two possibilities here:
 2617  * 1) the frame is from the jumbo recieve ring
 2618  * 2) the frame is from the standard receive ring
 2619  */
 2620 
 2621 static void
 2622 bge_rxeof(sc)
 2623         struct bge_softc *sc;
 2624 {
 2625         struct ifnet *ifp;
 2626         int stdcnt = 0, jumbocnt = 0;
 2627 
 2628         BGE_LOCK_ASSERT(sc);
 2629 
 2630         ifp = &sc->arpcom.ac_if;
 2631 
 2632         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2633             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
 2634         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2635             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
 2636         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 2637                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2638                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2639                     BUS_DMASYNC_POSTREAD);
 2640         }
 2641 
 2642         while(sc->bge_rx_saved_considx !=
 2643             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
 2644                 struct bge_rx_bd        *cur_rx;
 2645                 u_int32_t               rxidx;
 2646                 struct ether_header     *eh;
 2647                 struct mbuf             *m = NULL;
 2648                 u_int16_t               vlan_tag = 0;
 2649                 int                     have_tag = 0;
 2650 
 2651                 cur_rx =
 2652             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
 2653 
 2654                 rxidx = cur_rx->bge_idx;
 2655                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
 2656 
 2657                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
 2658                         have_tag = 1;
 2659                         vlan_tag = cur_rx->bge_vlan_tag;
 2660                 }
 2661 
 2662                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
 2663                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
 2664                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
 2665                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
 2666                             BUS_DMASYNC_POSTREAD);
 2667                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 2668                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
 2669                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
 2670                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
 2671                         jumbocnt++;
 2672                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2673                                 ifp->if_ierrors++;
 2674                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2675                                 continue;
 2676                         }
 2677                         if (bge_newbuf_jumbo(sc,
 2678                             sc->bge_jumbo, NULL) == ENOBUFS) {
 2679                                 ifp->if_ierrors++;
 2680                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2681                                 continue;
 2682                         }
 2683                 } else {
 2684                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
 2685                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 2686                             sc->bge_cdata.bge_rx_std_dmamap[rxidx],
 2687                             BUS_DMASYNC_POSTREAD);
 2688                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2689                             sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
 2690                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
 2691                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
 2692                         stdcnt++;
 2693                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2694                                 ifp->if_ierrors++;
 2695                                 bge_newbuf_std(sc, sc->bge_std, m);
 2696                                 continue;
 2697                         }
 2698                         if (bge_newbuf_std(sc, sc->bge_std,
 2699                             NULL) == ENOBUFS) {
 2700                                 ifp->if_ierrors++;
 2701                                 bge_newbuf_std(sc, sc->bge_std, m);
 2702                                 continue;
 2703                         }
 2704                 }
 2705 
 2706                 ifp->if_ipackets++;
 2707 #ifndef __i386__
 2708                 /*
 2709                  * The i386 allows unaligned accesses, but for other
 2710                  * platforms we must make sure the payload is aligned.
 2711                  */
 2712                 if (sc->bge_rx_alignment_bug) {
 2713                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
 2714                             cur_rx->bge_len);
 2715                         m->m_data += ETHER_ALIGN;
 2716                 }
 2717 #endif
 2718                 eh = mtod(m, struct ether_header *);
 2719                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
 2720                 m->m_pkthdr.rcvif = ifp;
 2721 
 2722 #if 0 /* currently broken for some packets, possibly related to TCP options */
 2723                 if (ifp->if_hwassist) {
 2724                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2725                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
 2726                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2727                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
 2728                                 m->m_pkthdr.csum_data =
 2729                                     cur_rx->bge_tcp_udp_csum;
 2730                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 2731                         }
 2732                 }
 2733 #endif
 2734 
 2735                 /*
 2736                  * If we received a packet with a vlan tag,
 2737                  * attach that information to the packet.
 2738                  */
 2739                 if (have_tag)
 2740                         VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
 2741 
 2742                 BGE_UNLOCK(sc);
 2743                 (*ifp->if_input)(ifp, m);
 2744                 BGE_LOCK(sc);
 2745         }
 2746 
 2747         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2748             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
 2749         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2750             sc->bge_cdata.bge_rx_std_ring_map,
 2751             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
 2752         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 2753                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2754                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2755                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 2756         }
 2757 
 2758         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
 2759         if (stdcnt)
 2760                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 2761         if (jumbocnt)
 2762                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 2763 
 2764         return;
 2765 }
 2766 
 2767 static void
 2768 bge_txeof(sc)
 2769         struct bge_softc *sc;
 2770 {
 2771         struct bge_tx_bd *cur_tx = NULL;
 2772         struct ifnet *ifp;
 2773 
 2774         BGE_LOCK_ASSERT(sc);
 2775 
 2776         ifp = &sc->arpcom.ac_if;
 2777 
 2778         /*
 2779          * Go through our tx ring and free mbufs for those
 2780          * frames that have been sent.
 2781          */
 2782         while (sc->bge_tx_saved_considx !=
 2783             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
 2784                 u_int32_t               idx = 0;
 2785 
 2786                 idx = sc->bge_tx_saved_considx;
 2787                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
 2788                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
 2789                         ifp->if_opackets++;
 2790                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
 2791                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
 2792                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
 2793                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2794                             sc->bge_cdata.bge_tx_dmamap[idx]);
 2795                 }
 2796                 sc->bge_txcnt--;
 2797                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
 2798                 ifp->if_timer = 0;
 2799         }
 2800 
 2801         if (cur_tx != NULL)
 2802                 ifp->if_flags &= ~IFF_OACTIVE;
 2803 
 2804         return;
 2805 }
 2806 
 2807 static void
 2808 bge_intr(xsc)
 2809         void *xsc;
 2810 {
 2811         struct bge_softc *sc;
 2812         struct ifnet *ifp;
 2813         u_int32_t statusword;
 2814         u_int32_t status;
 2815 
 2816         sc = xsc;
 2817         ifp = &sc->arpcom.ac_if;
 2818 
 2819         BGE_LOCK(sc);
 2820 
 2821         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2822             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
 2823 
 2824         statusword =
 2825             atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
 2826 
 2827 #ifdef notdef
 2828         /* Avoid this for now -- checking this register is expensive. */
 2829         /* Make sure this is really our interrupt. */
 2830         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
 2831                 return;
 2832 #endif
 2833         /* Ack interrupt and stop others from occuring. */
 2834         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 2835 
 2836         /*
 2837          * Process link state changes.
 2838          * Grrr. The link status word in the status block does
 2839          * not work correctly on the BCM5700 rev AX and BX chips,
 2840          * according to all avaibable information. Hence, we have
 2841          * to enable MII interrupts in order to properly obtain
 2842          * async link changes. Unfortunately, this also means that
 2843          * we have to read the MAC status register to detect link
 2844          * changes, thereby adding an additional register access to
 2845          * the interrupt handler.
 2846          */
 2847 
 2848         if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
 2849 
 2850                 status = CSR_READ_4(sc, BGE_MAC_STS);
 2851                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
 2852                         sc->bge_link = 0;
 2853                         callout_stop(&sc->bge_stat_ch);
 2854                         bge_tick_locked(sc);
 2855                         /* Clear the interrupt */
 2856                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 2857                             BGE_EVTENB_MI_INTERRUPT);
 2858                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
 2859                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
 2860                             BRGPHY_INTRS);
 2861                 }
 2862         } else {
 2863                 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
 2864                         /*
 2865                          * Sometimes PCS encoding errors are detected in
 2866                          * TBI mode (on fiber NICs), and for some reason
 2867                          * the chip will signal them as link changes.
 2868                          * If we get a link change event, but the 'PCS
 2869                          * encoding error' bit in the MAC status register
 2870                          * is set, don't bother doing a link check.
 2871                          * This avoids spurious "gigabit link up" messages
 2872                          * that sometimes appear on fiber NICs during
 2873                          * periods of heavy traffic. (There should be no
 2874                          * effect on copper NICs.)
 2875                          */
 2876                         status = CSR_READ_4(sc, BGE_MAC_STS);
 2877                         if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
 2878                             BGE_MACSTAT_MI_COMPLETE))) {
 2879                                 sc->bge_link = 0;
 2880                                 callout_stop(&sc->bge_stat_ch);
 2881                                 bge_tick_locked(sc);
 2882                         }
 2883                         /* Clear the interrupt */
 2884                         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 2885                             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 2886                             BGE_MACSTAT_LINK_CHANGED);
 2887 
 2888                         /* Force flush the status block cached by PCI bridge */
 2889                         CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
 2890                 }
 2891         }
 2892 
 2893         if (ifp->if_flags & IFF_RUNNING) {
 2894                 /* Check RX return ring producer/consumer */
 2895                 bge_rxeof(sc);
 2896 
 2897                 /* Check TX ring producer/consumer */
 2898                 bge_txeof(sc);
 2899         }
 2900 
 2901         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2902             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
 2903 
 2904         bge_handle_events(sc);
 2905 
 2906         /* Re-enable interrupts. */
 2907         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 2908 
 2909         if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
 2910                 bge_start_locked(ifp);
 2911 
 2912         BGE_UNLOCK(sc);
 2913 
 2914         return;
 2915 }
 2916 
 2917 static void
 2918 bge_tick_locked(sc)
 2919         struct bge_softc *sc;
 2920 {
 2921         struct mii_data *mii = NULL;
 2922         struct ifmedia *ifm = NULL;
 2923         struct ifnet *ifp;
 2924 
 2925         ifp = &sc->arpcom.ac_if;
 2926 
 2927         BGE_LOCK_ASSERT(sc);
 2928 
 2929         if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
 2930                 bge_stats_update_regs(sc);
 2931         else
 2932                 bge_stats_update(sc);
 2933         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 2934         if (sc->bge_link)
 2935                 return;
 2936 
 2937         if (sc->bge_tbi) {
 2938                 ifm = &sc->bge_ifmedia;
 2939                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 2940                     BGE_MACSTAT_TBI_PCS_SYNCHED) {
 2941                         sc->bge_link++;
 2942                         CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
 2943                         printf("bge%d: gigabit link up\n", sc->bge_unit);
 2944                         if (ifp->if_snd.ifq_head != NULL)
 2945                                 bge_start_locked(ifp);
 2946                 }
 2947                 return;
 2948         }
 2949 
 2950         mii = device_get_softc(sc->bge_miibus);
 2951         mii_tick(mii);
 2952  
 2953         if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
 2954             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 2955                 sc->bge_link++;
 2956                 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
 2957                     IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
 2958                         printf("bge%d: gigabit link up\n",
 2959                            sc->bge_unit);
 2960                 if (ifp->if_snd.ifq_head != NULL)
 2961                         bge_start_locked(ifp);
 2962         }
 2963 
 2964         return;
 2965 }
 2966 
 2967 static void
 2968 bge_tick(xsc)
 2969         void *xsc;
 2970 {
 2971         struct bge_softc *sc;
 2972 
 2973         sc = xsc;
 2974 
 2975         BGE_LOCK(sc);
 2976         bge_tick_locked(sc);
 2977         BGE_UNLOCK(sc);
 2978 }
 2979 
 2980 static void
 2981 bge_stats_update_regs(sc)
 2982         struct bge_softc *sc;
 2983 {
 2984         struct ifnet *ifp;
 2985         struct bge_mac_stats_regs stats;
 2986         u_int32_t *s;
 2987         int i;
 2988 
 2989         ifp = &sc->arpcom.ac_if;
 2990 
 2991         s = (u_int32_t *)&stats;
 2992         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
 2993                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
 2994                 s++;
 2995         }
 2996 
 2997         ifp->if_collisions +=
 2998            (stats.dot3StatsSingleCollisionFrames +
 2999            stats.dot3StatsMultipleCollisionFrames +
 3000            stats.dot3StatsExcessiveCollisions +
 3001            stats.dot3StatsLateCollisions) -
 3002            ifp->if_collisions;
 3003 
 3004         return;
 3005 }
 3006 
 3007 static void
 3008 bge_stats_update(sc)
 3009         struct bge_softc *sc;
 3010 {
 3011         struct ifnet *ifp;
 3012         struct bge_stats *stats;
 3013 
 3014         ifp = &sc->arpcom.ac_if;
 3015 
 3016         stats = (struct bge_stats *)(sc->bge_vhandle +
 3017             BGE_MEMWIN_START + BGE_STATS_BLOCK);
 3018 
 3019         ifp->if_collisions +=
 3020            (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
 3021            stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
 3022            stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
 3023            stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
 3024            ifp->if_collisions;
 3025 
 3026 #ifdef notdef
 3027         ifp->if_collisions +=
 3028            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
 3029            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
 3030            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
 3031            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
 3032            ifp->if_collisions;
 3033 #endif
 3034 
 3035         return;
 3036 }
 3037 
 3038 /*
 3039  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
 3040  * pointers to descriptors.
 3041  */
 3042 static int
 3043 bge_encap(sc, m_head, txidx)
 3044         struct bge_softc *sc;
 3045         struct mbuf *m_head;
 3046         u_int32_t *txidx;
 3047 {
 3048         struct bge_tx_bd        *f = NULL;
 3049         u_int16_t               csum_flags = 0;
 3050         struct m_tag            *mtag;
 3051         struct bge_dmamap_arg   ctx;
 3052         bus_dmamap_t            map;
 3053         int                     error;
 3054 
 3055 
 3056         if (m_head->m_pkthdr.csum_flags) {
 3057                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 3058                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
 3059                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
 3060                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
 3061                 if (m_head->m_flags & M_LASTFRAG)
 3062                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
 3063                 else if (m_head->m_flags & M_FRAG)
 3064                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
 3065         }
 3066 
 3067         mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
 3068 
 3069         ctx.sc = sc;
 3070         ctx.bge_idx = *txidx;
 3071         ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
 3072         ctx.bge_flags = csum_flags;
 3073         /*
 3074          * Sanity check: avoid coming within 16 descriptors
 3075          * of the end of the ring.
 3076          */
 3077         ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
 3078 
 3079         map = sc->bge_cdata.bge_tx_dmamap[*txidx];
 3080         error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
 3081             m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
 3082 
 3083         if (error || ctx.bge_maxsegs == 0 /*||
 3084             ctx.bge_idx == sc->bge_tx_saved_considx*/)
 3085                 return (ENOBUFS);
 3086 
 3087         /*
 3088          * Insure that the map for this transmission
 3089          * is placed at the array index of the last descriptor
 3090          * in this chain.
 3091          */
 3092         sc->bge_cdata.bge_tx_dmamap[*txidx] =
 3093             sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
 3094         sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
 3095         sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
 3096         sc->bge_txcnt += ctx.bge_maxsegs;
 3097         f = &sc->bge_ldata.bge_tx_ring[*txidx];
 3098         if (mtag != NULL) {
 3099                 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
 3100                 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
 3101         } else {
 3102                 f->bge_vlan_tag = 0;
 3103         }
 3104 
 3105         BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
 3106         *txidx = ctx.bge_idx;
 3107 
 3108         return(0);
 3109 }
 3110 
 3111 /*
 3112  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3113  * to the mbuf data regions directly in the transmit descriptors.
 3114  */
 3115 static void
 3116 bge_start_locked(ifp)
 3117         struct ifnet *ifp;
 3118 {
 3119         struct bge_softc *sc;
 3120         struct mbuf *m_head = NULL;
 3121         u_int32_t prodidx = 0;
 3122 
 3123         sc = ifp->if_softc;
 3124 
 3125         if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
 3126                 return;
 3127 
 3128         prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
 3129 
 3130         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
 3131                 IF_DEQUEUE(&ifp->if_snd, m_head);
 3132                 if (m_head == NULL)
 3133                         break;
 3134 
 3135                 /*
 3136                  * XXX
 3137                  * safety overkill.  If this is a fragmented packet chain
 3138                  * with delayed TCP/UDP checksums, then only encapsulate
 3139                  * it if we have enough descriptors to handle the entire
 3140                  * chain at once.
 3141                  * (paranoia -- may not actually be needed)
 3142                  */
 3143                 if (m_head->m_flags & M_FIRSTFRAG &&
 3144                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
 3145                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
 3146                             m_head->m_pkthdr.csum_data + 16) {
 3147                                 IF_PREPEND(&ifp->if_snd, m_head);
 3148                                 ifp->if_flags |= IFF_OACTIVE;
 3149                                 break;
 3150                         }
 3151                 }
 3152 
 3153                 /*
 3154                  * Pack the data into the transmit ring. If we
 3155                  * don't have room, set the OACTIVE flag and wait
 3156                  * for the NIC to drain the ring.
 3157                  */
 3158                 if (bge_encap(sc, m_head, &prodidx)) {
 3159                         IF_PREPEND(&ifp->if_snd, m_head);
 3160                         ifp->if_flags |= IFF_OACTIVE;
 3161                         break;
 3162                 }
 3163 
 3164                 /*
 3165                  * If there's a BPF listener, bounce a copy of this frame
 3166                  * to him.
 3167                  */
 3168                 BPF_MTAP(ifp, m_head);
 3169         }
 3170 
 3171         /* Transmit */
 3172         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3173         /* 5700 b2 errata */
 3174         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 3175                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3176 
 3177         /*
 3178          * Set a timeout in case the chip goes out to lunch.
 3179          */
 3180         ifp->if_timer = 5;
 3181 
 3182         return;
 3183 }
 3184 
 3185 /*
 3186  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3187  * to the mbuf data regions directly in the transmit descriptors.
 3188  */
 3189 static void
 3190 bge_start(ifp)
 3191         struct ifnet *ifp;
 3192 {
 3193         struct bge_softc *sc;
 3194 
 3195         sc = ifp->if_softc;
 3196         BGE_LOCK(sc);
 3197         bge_start_locked(ifp);
 3198         BGE_UNLOCK(sc);
 3199 }
 3200 
 3201 static void
 3202 bge_init_locked(sc)
 3203         struct bge_softc *sc;
 3204 {
 3205         struct ifnet *ifp;
 3206         u_int16_t *m;
 3207 
 3208         BGE_LOCK_ASSERT(sc);
 3209 
 3210         ifp = &sc->arpcom.ac_if;
 3211 
 3212         if (ifp->if_flags & IFF_RUNNING)
 3213                 return;
 3214 
 3215         /* Cancel pending I/O and flush buffers. */
 3216         bge_stop(sc);
 3217         bge_reset(sc);
 3218         bge_chipinit(sc);
 3219 
 3220         /*
 3221          * Init the various state machines, ring
 3222          * control blocks and firmware.
 3223          */
 3224         if (bge_blockinit(sc)) {
 3225                 printf("bge%d: initialization failure\n", sc->bge_unit);
 3226                 return;
 3227         }
 3228 
 3229         ifp = &sc->arpcom.ac_if;
 3230 
 3231         /* Specify MTU. */
 3232         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
 3233             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
 3234 
 3235         /* Load our MAC address. */
 3236         m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
 3237         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
 3238         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
 3239 
 3240         /* Enable or disable promiscuous mode as needed. */
 3241         if (ifp->if_flags & IFF_PROMISC) {
 3242                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3243         } else {
 3244                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3245         }
 3246 
 3247         /* Program multicast filter. */
 3248         bge_setmulti(sc);
 3249 
 3250         /* Init RX ring. */
 3251         bge_init_rx_ring_std(sc);
 3252 
 3253         /*
 3254          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
 3255          * memory to insure that the chip has in fact read the first
 3256          * entry of the ring.
 3257          */
 3258         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
 3259                 u_int32_t               v, i;
 3260                 for (i = 0; i < 10; i++) {
 3261                         DELAY(20);
 3262                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
 3263                         if (v == (MCLBYTES - ETHER_ALIGN))
 3264                                 break;
 3265                 }
 3266                 if (i == 10)
 3267                         printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
 3268                             sc->bge_unit);
 3269         }
 3270 
 3271         /* Init jumbo RX ring. */
 3272         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
 3273                 bge_init_rx_ring_jumbo(sc);
 3274 
 3275         /* Init our RX return ring index */
 3276         sc->bge_rx_saved_considx = 0;
 3277 
 3278         /* Init TX ring. */
 3279         bge_init_tx_ring(sc);
 3280 
 3281         /* Turn on transmitter */
 3282         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
 3283 
 3284         /* Turn on receiver */
 3285         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3286 
 3287         /* Tell firmware we're alive. */
 3288         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3289 
 3290         /* Enable host interrupts. */
 3291         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
 3292         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3293         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3294 
 3295         bge_ifmedia_upd(ifp);
 3296 
 3297         ifp->if_flags |= IFF_RUNNING;
 3298         ifp->if_flags &= ~IFF_OACTIVE;
 3299 
 3300         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3301 
 3302         return;
 3303 }
 3304 
 3305 static void
 3306 bge_init(xsc)
 3307         void *xsc;
 3308 {
 3309         struct bge_softc *sc = xsc;
 3310 
 3311         BGE_LOCK(sc);
 3312         bge_init_locked(sc);
 3313         BGE_UNLOCK(sc);
 3314 
 3315         return;
 3316 }
 3317 
 3318 /*
 3319  * Set media options.
 3320  */
 3321 static int
 3322 bge_ifmedia_upd(ifp)
 3323         struct ifnet *ifp;
 3324 {
 3325         struct bge_softc *sc;
 3326         struct mii_data *mii;
 3327         struct ifmedia *ifm;
 3328 
 3329         sc = ifp->if_softc;
 3330         ifm = &sc->bge_ifmedia;
 3331 
 3332         /* If this is a 1000baseX NIC, enable the TBI port. */
 3333         if (sc->bge_tbi) {
 3334                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 3335                         return(EINVAL);
 3336                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
 3337                 case IFM_AUTO:
 3338                         break;
 3339                 case IFM_1000_SX:
 3340                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3341                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3342                                     BGE_MACMODE_HALF_DUPLEX);
 3343                         } else {
 3344                                 BGE_SETBIT(sc, BGE_MAC_MODE,
 3345                                     BGE_MACMODE_HALF_DUPLEX);
 3346                         }
 3347                         break;
 3348                 default:
 3349                         return(EINVAL);
 3350                 }
 3351                 return(0);
 3352         }
 3353 
 3354         mii = device_get_softc(sc->bge_miibus);
 3355         sc->bge_link = 0;
 3356         if (mii->mii_instance) {
 3357                 struct mii_softc *miisc;
 3358                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
 3359                     miisc = LIST_NEXT(miisc, mii_list))
 3360                         mii_phy_reset(miisc);
 3361         }
 3362         mii_mediachg(mii);
 3363 
 3364         return(0);
 3365 }
 3366 
 3367 /*
 3368  * Report current media status.
 3369  */
 3370 static void
 3371 bge_ifmedia_sts(ifp, ifmr)
 3372         struct ifnet *ifp;
 3373         struct ifmediareq *ifmr;
 3374 {
 3375         struct bge_softc *sc;
 3376         struct mii_data *mii;
 3377 
 3378         sc = ifp->if_softc;
 3379 
 3380         if (sc->bge_tbi) {
 3381                 ifmr->ifm_status = IFM_AVALID;
 3382                 ifmr->ifm_active = IFM_ETHER;
 3383                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3384                     BGE_MACSTAT_TBI_PCS_SYNCHED)
 3385                         ifmr->ifm_status |= IFM_ACTIVE;
 3386                 ifmr->ifm_active |= IFM_1000_SX;
 3387                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
 3388                         ifmr->ifm_active |= IFM_HDX;    
 3389                 else
 3390                         ifmr->ifm_active |= IFM_FDX;
 3391                 return;
 3392         }
 3393 
 3394         mii = device_get_softc(sc->bge_miibus);
 3395         mii_pollstat(mii);
 3396         ifmr->ifm_active = mii->mii_media_active;
 3397         ifmr->ifm_status = mii->mii_media_status;
 3398 
 3399         return;
 3400 }
 3401 
 3402 static int
 3403 bge_ioctl(ifp, command, data)
 3404         struct ifnet *ifp;
 3405         u_long command;
 3406         caddr_t data;
 3407 {
 3408         struct bge_softc *sc = ifp->if_softc;
 3409         struct ifreq *ifr = (struct ifreq *) data;
 3410         int mask, error = 0;
 3411         struct mii_data *mii;
 3412 
 3413         switch(command) {
 3414         case SIOCSIFMTU:
 3415                 /* Disallow jumbo frames on 5705. */
 3416                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
 3417                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
 3418                         error = EINVAL;
 3419                 else {
 3420                         ifp->if_mtu = ifr->ifr_mtu;
 3421                         ifp->if_flags &= ~IFF_RUNNING;
 3422                         bge_init(sc);
 3423                 }
 3424                 break;
 3425         case SIOCSIFFLAGS:
 3426                 BGE_LOCK(sc);
 3427                 if (ifp->if_flags & IFF_UP) {
 3428                         /*
 3429                          * If only the state of the PROMISC flag changed,
 3430                          * then just use the 'set promisc mode' command
 3431                          * instead of reinitializing the entire NIC. Doing
 3432                          * a full re-init means reloading the firmware and
 3433                          * waiting for it to start up, which may take a
 3434                          * second or two.
 3435                          */
 3436                         if (ifp->if_flags & IFF_RUNNING &&
 3437                             ifp->if_flags & IFF_PROMISC &&
 3438                             !(sc->bge_if_flags & IFF_PROMISC)) {
 3439                                 BGE_SETBIT(sc, BGE_RX_MODE,
 3440                                     BGE_RXMODE_RX_PROMISC);
 3441                         } else if (ifp->if_flags & IFF_RUNNING &&
 3442                             !(ifp->if_flags & IFF_PROMISC) &&
 3443                             sc->bge_if_flags & IFF_PROMISC) {
 3444                                 BGE_CLRBIT(sc, BGE_RX_MODE,
 3445                                     BGE_RXMODE_RX_PROMISC);
 3446                         } else
 3447                                 bge_init_locked(sc);
 3448                 } else {
 3449                         if (ifp->if_flags & IFF_RUNNING) {
 3450                                 bge_stop(sc);
 3451                         }
 3452                 }
 3453                 sc->bge_if_flags = ifp->if_flags;
 3454                 BGE_UNLOCK(sc);
 3455                 error = 0;
 3456                 break;
 3457         case SIOCADDMULTI:
 3458         case SIOCDELMULTI:
 3459                 if (ifp->if_flags & IFF_RUNNING) {
 3460                         BGE_LOCK(sc);
 3461                         bge_setmulti(sc);
 3462                         BGE_UNLOCK(sc);
 3463                         error = 0;
 3464                 }
 3465                 break;
 3466         case SIOCSIFMEDIA:
 3467         case SIOCGIFMEDIA:
 3468                 if (sc->bge_tbi) {
 3469                         error = ifmedia_ioctl(ifp, ifr,
 3470                             &sc->bge_ifmedia, command);
 3471                 } else {
 3472                         mii = device_get_softc(sc->bge_miibus);
 3473                         error = ifmedia_ioctl(ifp, ifr,
 3474                             &mii->mii_media, command);
 3475                 }
 3476                 break;
 3477         case SIOCSIFCAP:
 3478                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3479                 if (mask & IFCAP_HWCSUM) {
 3480                         if (IFCAP_HWCSUM & ifp->if_capenable)
 3481                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
 3482                         else
 3483                                 ifp->if_capenable |= IFCAP_HWCSUM;
 3484                 }
 3485                 error = 0;
 3486                 break;
 3487         default:
 3488                 error = ether_ioctl(ifp, command, data);
 3489                 break;
 3490         }
 3491 
 3492         return(error);
 3493 }
 3494 
 3495 static void
 3496 bge_watchdog(ifp)
 3497         struct ifnet *ifp;
 3498 {
 3499         struct bge_softc *sc;
 3500 
 3501         sc = ifp->if_softc;
 3502 
 3503         printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
 3504 
 3505         ifp->if_flags &= ~IFF_RUNNING;
 3506         bge_init(sc);
 3507 
 3508         ifp->if_oerrors++;
 3509 
 3510         return;
 3511 }
 3512 
 3513 /*
 3514  * Stop the adapter and free any mbufs allocated to the
 3515  * RX and TX lists.
 3516  */
 3517 static void
 3518 bge_stop(sc)
 3519         struct bge_softc *sc;
 3520 {
 3521         struct ifnet *ifp;
 3522         struct ifmedia_entry *ifm;
 3523         struct mii_data *mii = NULL;
 3524         int mtmp, itmp;
 3525 
 3526         BGE_LOCK_ASSERT(sc);
 3527 
 3528         ifp = &sc->arpcom.ac_if;
 3529 
 3530         if (!sc->bge_tbi)
 3531                 mii = device_get_softc(sc->bge_miibus);
 3532 
 3533         callout_stop(&sc->bge_stat_ch);
 3534 
 3535         /*
 3536          * Disable all of the receiver blocks
 3537          */
 3538         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3539         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 3540         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 3541         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 3542                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 3543         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
 3544         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 3545         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
 3546 
 3547         /*
 3548          * Disable all of the transmit blocks
 3549          */
 3550         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 3551         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 3552         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 3553         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
 3554         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 3555         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 3556                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 3557         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 3558 
 3559         /*
 3560          * Shut down all of the memory managers and related
 3561          * state machines.
 3562          */
 3563         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 3564         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
 3565         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 3566                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 3567         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 3568         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 3569         if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
 3570                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
 3571                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 3572         }
 3573 
 3574         /* Disable host interrupts. */
 3575         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3576         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3577 
 3578         /*
 3579          * Tell firmware we're shutting down.
 3580          */
 3581         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3582 
 3583         /* Free the RX lists. */
 3584         bge_free_rx_ring_std(sc);
 3585 
 3586         /* Free jumbo RX list. */
 3587         if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
 3588                 bge_free_rx_ring_jumbo(sc);
 3589 
 3590         /* Free TX buffers. */
 3591         bge_free_tx_ring(sc);
 3592 
 3593         /*
 3594          * Isolate/power down the PHY, but leave the media selection
 3595          * unchanged so that things will be put back to normal when
 3596          * we bring the interface back up.
 3597          */
 3598         if (!sc->bge_tbi) {
 3599                 itmp = ifp->if_flags;
 3600                 ifp->if_flags |= IFF_UP;
 3601                 ifm = mii->mii_media.ifm_cur;
 3602                 mtmp = ifm->ifm_media;
 3603                 ifm->ifm_media = IFM_ETHER|IFM_NONE;
 3604                 mii_mediachg(mii);
 3605                 ifm->ifm_media = mtmp;
 3606                 ifp->if_flags = itmp;
 3607         }
 3608 
 3609         sc->bge_link = 0;
 3610 
 3611         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
 3612 
 3613         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 3614 
 3615         return;
 3616 }
 3617 
 3618 /*
 3619  * Stop all chip I/O so that the kernel's probe routines don't
 3620  * get confused by errant DMAs when rebooting.
 3621  */
 3622 static void
 3623 bge_shutdown(dev)
 3624         device_t dev;
 3625 {
 3626         struct bge_softc *sc;
 3627 
 3628         sc = device_get_softc(dev);
 3629 
 3630         BGE_LOCK(sc);
 3631         bge_stop(sc); 
 3632         bge_reset(sc);
 3633         BGE_UNLOCK(sc);
 3634 
 3635         return;
 3636 }

Cache object: 294c814102b0f0f16b078646169ed287


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.