The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001 Wind River Systems
    3  * Copyright (c) 1997, 1998, 1999, 2001
    4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. All advertising materials mentioning features or use of this software
   15  *    must display the following acknowledgement:
   16  *      This product includes software developed by Bill Paul.
   17  * 4. Neither the name of the author nor the names of any co-contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   31  * THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/6.1/sys/dev/bge/if_bge.c 156276 2006-03-04 09:34:48Z oleg $");
   36 
   37 /*
   38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
   39  *
   40  * The Broadcom BCM5700 is based on technology originally developed by
   41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
   42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
   43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
   44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
   45  * frames, highly configurable RX filtering, and 16 RX and TX queues
   46  * (which, along with RX filter rules, can be used for QOS applications).
   47  * Other features, such as TCP segmentation, may be available as part
   48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
   49  * firmware images can be stored in hardware and need not be compiled
   50  * into the driver.
   51  *
   52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
   53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
   54  *
   55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
   56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
   57  * does not support external SSRAM.
   58  *
   59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
   60  * brand name, which is functionally similar but lacks PCI-X support.
   61  *
   62  * Without external SSRAM, you can only have at most 4 TX rings,
   63  * and the use of the mini RX ring is disabled. This seems to imply
   64  * that these features are simply not available on the BCM5701. As a
   65  * result, this driver does not implement any support for the mini RX
   66  * ring.
   67  */
   68 
   69 #ifdef HAVE_KERNEL_OPTION_HEADERS
   70 #include "opt_device_polling.h"
   71 #endif
   72 
   73 #include <sys/param.h>
   74 #include <sys/endian.h>
   75 #include <sys/systm.h>
   76 #include <sys/sockio.h>
   77 #include <sys/mbuf.h>
   78 #include <sys/malloc.h>
   79 #include <sys/kernel.h>
   80 #include <sys/module.h>
   81 #include <sys/socket.h>
   82 
   83 #include <net/if.h>
   84 #include <net/if_arp.h>
   85 #include <net/ethernet.h>
   86 #include <net/if_dl.h>
   87 #include <net/if_media.h>
   88 
   89 #include <net/bpf.h>
   90 
   91 #include <net/if_types.h>
   92 #include <net/if_vlan_var.h>
   93 
   94 #include <netinet/in_systm.h>
   95 #include <netinet/in.h>
   96 #include <netinet/ip.h>
   97 
   98 #include <machine/clock.h>      /* for DELAY */
   99 #include <machine/bus.h>
  100 #include <machine/resource.h>
  101 #include <sys/bus.h>
  102 #include <sys/rman.h>
  103 
  104 #include <dev/mii/mii.h>
  105 #include <dev/mii/miivar.h>
  106 #include "miidevs.h"
  107 #include <dev/mii/brgphyreg.h>
  108 
  109 #include <dev/pci/pcireg.h>
  110 #include <dev/pci/pcivar.h>
  111 
  112 #include <dev/bge/if_bgereg.h>
  113 
  114 #include "opt_bge.h"
  115 
  116 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
  117 #define ETHER_MIN_NOPAD         (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
  118 
  119 MODULE_DEPEND(bge, pci, 1, 1, 1);
  120 MODULE_DEPEND(bge, ether, 1, 1, 1);
  121 MODULE_DEPEND(bge, miibus, 1, 1, 1);
  122 
  123 /* "device miibus" required.  See GENERIC if you get errors here. */
  124 #include "miibus_if.h"
  125 
  126 /*
  127  * Various supported device vendors/types and their names. Note: the
  128  * spec seems to indicate that the hardware still has Alteon's vendor
  129  * ID burned into it, though it will always be overriden by the vendor
  130  * ID in the EEPROM. Just to be safe, we cover all possibilities.
  131  */
  132 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
  133 
  134 static struct bge_type bge_devs[] = {
  135         { ALT_VENDORID, ALT_DEVICEID_BCM5700,
  136                 "Broadcom BCM5700 Gigabit Ethernet" },
  137         { ALT_VENDORID, ALT_DEVICEID_BCM5701,
  138                 "Broadcom BCM5701 Gigabit Ethernet" },
  139         { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
  140                 "Broadcom BCM5700 Gigabit Ethernet" },
  141         { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
  142                 "Broadcom BCM5701 Gigabit Ethernet" },
  143         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
  144                 "Broadcom BCM5702 Gigabit Ethernet" },
  145         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
  146                 "Broadcom BCM5702X Gigabit Ethernet" },
  147         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
  148                 "Broadcom BCM5703 Gigabit Ethernet" },
  149         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
  150                 "Broadcom BCM5703X Gigabit Ethernet" },
  151         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
  152                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
  153         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
  154                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
  155         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
  156                 "Broadcom BCM5705 Gigabit Ethernet" },
  157         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
  158                 "Broadcom BCM5705K Gigabit Ethernet" },
  159         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
  160                 "Broadcom BCM5705M Gigabit Ethernet" },
  161         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
  162                 "Broadcom BCM5705M Gigabit Ethernet" },
  163         { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
  164                 "Broadcom BCM5714C Gigabit Ethernet" },
  165         { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
  166                 "Broadcom BCM5721 Gigabit Ethernet" },
  167         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
  168                 "Broadcom BCM5750 Gigabit Ethernet" },
  169         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
  170                 "Broadcom BCM5750M Gigabit Ethernet" },
  171         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
  172                 "Broadcom BCM5751 Gigabit Ethernet" },
  173         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
  174                 "Broadcom BCM5751M Gigabit Ethernet" },
  175         { BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
  176                 "Broadcom BCM5752 Gigabit Ethernet" },
  177         { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
  178                 "Broadcom BCM5782 Gigabit Ethernet" },
  179         { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
  180                 "Broadcom BCM5788 Gigabit Ethernet" },
  181         { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
  182                 "Broadcom BCM5789 Gigabit Ethernet" },
  183         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
  184                 "Broadcom BCM5901 Fast Ethernet" },
  185         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
  186                 "Broadcom BCM5901A2 Fast Ethernet" },
  187         { SK_VENDORID, SK_DEVICEID_ALTIMA,
  188                 "SysKonnect Gigabit Ethernet" },
  189         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
  190                 "Altima AC1000 Gigabit Ethernet" },
  191         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
  192                 "Altima AC1002 Gigabit Ethernet" },
  193         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
  194                 "Altima AC9100 Gigabit Ethernet" },
  195         { 0, 0, NULL }
  196 };
  197 
  198 static int bge_probe            (device_t);
  199 static int bge_attach           (device_t);
  200 static int bge_detach           (device_t);
  201 static int bge_suspend          (device_t);
  202 static int bge_resume           (device_t);
  203 static void bge_release_resources
  204                                 (struct bge_softc *);
  205 static void bge_dma_map_addr    (void *, bus_dma_segment_t *, int, int);
  206 static int bge_dma_alloc        (device_t);
  207 static void bge_dma_free        (struct bge_softc *);
  208 
  209 static void bge_txeof           (struct bge_softc *);
  210 static void bge_rxeof           (struct bge_softc *);
  211 
  212 static void bge_tick_locked     (struct bge_softc *);
  213 static void bge_tick            (void *);
  214 static void bge_stats_update    (struct bge_softc *);
  215 static void bge_stats_update_regs
  216                                 (struct bge_softc *);
  217 static int bge_encap            (struct bge_softc *, struct mbuf *,
  218                                         u_int32_t *);
  219 
  220 static void bge_intr            (void *);
  221 static void bge_start_locked    (struct ifnet *);
  222 static void bge_start           (struct ifnet *);
  223 static int bge_ioctl            (struct ifnet *, u_long, caddr_t);
  224 static void bge_init_locked     (struct bge_softc *);
  225 static void bge_init            (void *);
  226 static void bge_stop            (struct bge_softc *);
  227 static void bge_watchdog                (struct ifnet *);
  228 static void bge_shutdown                (device_t);
  229 static int bge_ifmedia_upd      (struct ifnet *);
  230 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
  231 
  232 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *, int, u_int8_t *);
  233 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
  234 
  235 static void bge_setmulti        (struct bge_softc *);
  236 
  237 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
  238 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
  239 static int bge_init_rx_ring_std (struct bge_softc *);
  240 static void bge_free_rx_ring_std        (struct bge_softc *);
  241 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
  242 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
  243 static void bge_free_tx_ring    (struct bge_softc *);
  244 static int bge_init_tx_ring     (struct bge_softc *);
  245 
  246 static int bge_chipinit         (struct bge_softc *);
  247 static int bge_blockinit        (struct bge_softc *);
  248 
  249 #ifdef notdef
  250 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
  251 static void bge_vpd_read_res    (struct bge_softc *, struct vpd_res *, int);
  252 static void bge_vpd_read        (struct bge_softc *);
  253 #endif
  254 
  255 static u_int32_t bge_readmem_ind
  256                                 (struct bge_softc *, int);
  257 static void bge_writemem_ind    (struct bge_softc *, int, int);
  258 #ifdef notdef
  259 static u_int32_t bge_readreg_ind
  260                                 (struct bge_softc *, int);
  261 #endif
  262 static void bge_writereg_ind    (struct bge_softc *, int, int);
  263 
  264 static int bge_miibus_readreg   (device_t, int, int);
  265 static int bge_miibus_writereg  (device_t, int, int, int);
  266 static void bge_miibus_statchg  (device_t);
  267 #ifdef DEVICE_POLLING
  268 static void bge_poll            (struct ifnet *ifp, enum poll_cmd cmd,
  269                                     int count);
  270 static void bge_poll_locked     (struct ifnet *ifp, enum poll_cmd cmd,
  271                                     int count);
  272 #endif
  273 
  274 static void bge_reset           (struct bge_softc *);
  275 static void bge_link_upd        (struct bge_softc *);
  276 
  277 static device_method_t bge_methods[] = {
  278         /* Device interface */
  279         DEVMETHOD(device_probe,         bge_probe),
  280         DEVMETHOD(device_attach,        bge_attach),
  281         DEVMETHOD(device_detach,        bge_detach),
  282         DEVMETHOD(device_shutdown,      bge_shutdown),
  283         DEVMETHOD(device_suspend,       bge_suspend),
  284         DEVMETHOD(device_resume,        bge_resume),
  285 
  286         /* bus interface */
  287         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  288         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  289 
  290         /* MII interface */
  291         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
  292         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
  293         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
  294 
  295         { 0, 0 }
  296 };
  297 
  298 static driver_t bge_driver = {
  299         "bge",
  300         bge_methods,
  301         sizeof(struct bge_softc)
  302 };
  303 
  304 static devclass_t bge_devclass;
  305 
  306 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
  307 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
  308 
  309 static u_int32_t
  310 bge_readmem_ind(sc, off)
  311         struct bge_softc *sc;
  312         int off;
  313 {
  314         device_t dev;
  315 
  316         dev = sc->bge_dev;
  317 
  318         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  319         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
  320 }
  321 
  322 static void
  323 bge_writemem_ind(sc, off, val)
  324         struct bge_softc *sc;
  325         int off, val;
  326 {
  327         device_t dev;
  328 
  329         dev = sc->bge_dev;
  330 
  331         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
  332         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
  333 
  334         return;
  335 }
  336 
  337 #ifdef notdef
  338 static u_int32_t
  339 bge_readreg_ind(sc, off)
  340         struct bge_softc *sc;
  341         int off;
  342 {
  343         device_t dev;
  344 
  345         dev = sc->bge_dev;
  346 
  347         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  348         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
  349 }
  350 #endif
  351 
  352 static void
  353 bge_writereg_ind(sc, off, val)
  354         struct bge_softc *sc;
  355         int off, val;
  356 {
  357         device_t dev;
  358 
  359         dev = sc->bge_dev;
  360 
  361         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
  362         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
  363 
  364         return;
  365 }
  366 
  367 /*
  368  * Map a single buffer address.
  369  */
  370 
  371 static void
  372 bge_dma_map_addr(arg, segs, nseg, error)
  373         void *arg;
  374         bus_dma_segment_t *segs;
  375         int nseg;
  376         int error;
  377 {
  378         struct bge_dmamap_arg *ctx;
  379 
  380         if (error)
  381                 return;
  382 
  383         ctx = arg;
  384 
  385         if (nseg > ctx->bge_maxsegs) {
  386                 ctx->bge_maxsegs = 0;
  387                 return;
  388         }
  389 
  390         ctx->bge_busaddr = segs->ds_addr;
  391 
  392         return;
  393 }
  394 
  395 #ifdef notdef
  396 static u_int8_t
  397 bge_vpd_readbyte(sc, addr)
  398         struct bge_softc *sc;
  399         int addr;
  400 {
  401         int i;
  402         device_t dev;
  403         u_int32_t val;
  404 
  405         dev = sc->bge_dev;
  406         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
  407         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
  408                 DELAY(10);
  409                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
  410                         break;
  411         }
  412 
  413         if (i == BGE_TIMEOUT) {
  414                 device_printf(sc->bge_dev, "VPD read timed out\n");
  415                 return(0);
  416         }
  417 
  418         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
  419 
  420         return((val >> ((addr % 4) * 8)) & 0xFF);
  421 }
  422 
  423 static void
  424 bge_vpd_read_res(sc, res, addr)
  425         struct bge_softc *sc;
  426         struct vpd_res *res;
  427         int addr;
  428 {
  429         int i;
  430         u_int8_t *ptr;
  431 
  432         ptr = (u_int8_t *)res;
  433         for (i = 0; i < sizeof(struct vpd_res); i++)
  434                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
  435 
  436         return;
  437 }
  438 
  439 static void
  440 bge_vpd_read(sc)
  441         struct bge_softc *sc;
  442 {
  443         int pos = 0, i;
  444         struct vpd_res res;
  445 
  446         if (sc->bge_vpd_prodname != NULL)
  447                 free(sc->bge_vpd_prodname, M_DEVBUF);
  448         if (sc->bge_vpd_readonly != NULL)
  449                 free(sc->bge_vpd_readonly, M_DEVBUF);
  450         sc->bge_vpd_prodname = NULL;
  451         sc->bge_vpd_readonly = NULL;
  452 
  453         bge_vpd_read_res(sc, &res, pos);
  454 
  455         if (res.vr_id != VPD_RES_ID) {
  456                 device_printf(sc->bge_dev,
  457                     "bad VPD resource id: expected %x got %x\n", VPD_RES_ID,
  458                     res.vr_id);
  459                 return;
  460         }
  461 
  462         pos += sizeof(res);
  463         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
  464         for (i = 0; i < res.vr_len; i++)
  465                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
  466         sc->bge_vpd_prodname[i] = '\0';
  467         pos += i;
  468 
  469         bge_vpd_read_res(sc, &res, pos);
  470 
  471         if (res.vr_id != VPD_RES_READ) {
  472                 device_printf(sc->bge_dev,
  473                     "bad VPD resource id: expected %x got %x\n", VPD_RES_READ,
  474                     res.vr_id);
  475                 return;
  476         }
  477 
  478         pos += sizeof(res);
  479         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
  480         for (i = 0; i < res.vr_len + 1; i++)
  481                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
  482 
  483         return;
  484 }
  485 #endif
  486 
  487 /*
  488  * Read a byte of data stored in the EEPROM at address 'addr.' The
  489  * BCM570x supports both the traditional bitbang interface and an
  490  * auto access interface for reading the EEPROM. We use the auto
  491  * access method.
  492  */
  493 static u_int8_t
  494 bge_eeprom_getbyte(sc, addr, dest)
  495         struct bge_softc *sc;
  496         int addr;
  497         u_int8_t *dest;
  498 {
  499         int i;
  500         u_int32_t byte = 0;
  501 
  502         /*
  503          * Enable use of auto EEPROM access so we can avoid
  504          * having to use the bitbang method.
  505          */
  506         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
  507 
  508         /* Reset the EEPROM, load the clock period. */
  509         CSR_WRITE_4(sc, BGE_EE_ADDR,
  510             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
  511         DELAY(20);
  512 
  513         /* Issue the read EEPROM command. */
  514         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
  515 
  516         /* Wait for completion */
  517         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
  518                 DELAY(10);
  519                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
  520                         break;
  521         }
  522 
  523         if (i == BGE_TIMEOUT) {
  524                 device_printf(sc->bge_dev, "EEPROM read timed out\n");
  525                 return(1);
  526         }
  527 
  528         /* Get result. */
  529         byte = CSR_READ_4(sc, BGE_EE_DATA);
  530 
  531         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
  532 
  533         return(0);
  534 }
  535 
  536 /*
  537  * Read a sequence of bytes from the EEPROM.
  538  */
  539 static int
  540 bge_read_eeprom(sc, dest, off, cnt)
  541         struct bge_softc *sc;
  542         caddr_t dest;
  543         int off;
  544         int cnt;
  545 {
  546         int err = 0, i;
  547         u_int8_t byte = 0;
  548 
  549         for (i = 0; i < cnt; i++) {
  550                 err = bge_eeprom_getbyte(sc, off + i, &byte);
  551                 if (err)
  552                         break;
  553                 *(dest + i) = byte;
  554         }
  555 
  556         return(err ? 1 : 0);
  557 }
  558 
  559 static int
  560 bge_miibus_readreg(dev, phy, reg)
  561         device_t dev;
  562         int phy, reg;
  563 {
  564         struct bge_softc *sc;
  565         u_int32_t val, autopoll;
  566         int i;
  567 
  568         sc = device_get_softc(dev);
  569 
  570         /*
  571          * Broadcom's own driver always assumes the internal
  572          * PHY is at GMII address 1. On some chips, the PHY responds
  573          * to accesses at all addresses, which could cause us to
  574          * bogusly attach the PHY 32 times at probe type. Always
  575          * restricting the lookup to address 1 is simpler than
  576          * trying to figure out which chips revisions should be
  577          * special-cased.
  578          */
  579         if (phy != 1)
  580                 return(0);
  581 
  582         /* Reading with autopolling on may trigger PCI errors */
  583         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  584         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  585                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  586                 DELAY(40);
  587         }
  588 
  589         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
  590             BGE_MIPHY(phy)|BGE_MIREG(reg));
  591 
  592         for (i = 0; i < BGE_TIMEOUT; i++) {
  593                 val = CSR_READ_4(sc, BGE_MI_COMM);
  594                 if (!(val & BGE_MICOMM_BUSY))
  595                         break;
  596         }
  597 
  598         if (i == BGE_TIMEOUT) {
  599                 if_printf(sc->bge_ifp, "PHY read timed out\n");
  600                 val = 0;
  601                 goto done;
  602         }
  603 
  604         val = CSR_READ_4(sc, BGE_MI_COMM);
  605 
  606 done:
  607         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  608                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  609                 DELAY(40);
  610         }
  611 
  612         if (val & BGE_MICOMM_READFAIL)
  613                 return(0);
  614 
  615         return(val & 0xFFFF);
  616 }
  617 
  618 static int
  619 bge_miibus_writereg(dev, phy, reg, val)
  620         device_t dev;
  621         int phy, reg, val;
  622 {
  623         struct bge_softc *sc;
  624         u_int32_t autopoll;
  625         int i;
  626 
  627         sc = device_get_softc(dev);
  628 
  629         /* Reading with autopolling on may trigger PCI errors */
  630         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
  631         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  632                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  633                 DELAY(40);
  634         }
  635 
  636         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
  637             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
  638 
  639         for (i = 0; i < BGE_TIMEOUT; i++) {
  640                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
  641                         break;
  642         }
  643 
  644         if (autopoll & BGE_MIMODE_AUTOPOLL) {
  645                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
  646                 DELAY(40);
  647         }
  648 
  649         if (i == BGE_TIMEOUT) {
  650                 if_printf(sc->bge_ifp, "PHY read timed out\n");
  651                 return(0);
  652         }
  653 
  654         return(0);
  655 }
  656 
  657 static void
  658 bge_miibus_statchg(dev)
  659         device_t dev;
  660 {
  661         struct bge_softc *sc;
  662         struct mii_data *mii;
  663 
  664         sc = device_get_softc(dev);
  665         mii = device_get_softc(sc->bge_miibus);
  666 
  667         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
  668         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
  669                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
  670         } else {
  671                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
  672         }
  673 
  674         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  675                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  676         } else {
  677                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
  678         }
  679 
  680         return;
  681 }
  682 
  683 /*
  684  * Intialize a standard receive ring descriptor.
  685  */
  686 static int
  687 bge_newbuf_std(sc, i, m)
  688         struct bge_softc        *sc;
  689         int                     i;
  690         struct mbuf             *m;
  691 {
  692         struct mbuf             *m_new = NULL;
  693         struct bge_rx_bd        *r;
  694         struct bge_dmamap_arg   ctx;
  695         int                     error;
  696 
  697         if (m == NULL) {
  698                 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  699                 if (m_new == NULL)
  700                         return(ENOBUFS);
  701                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  702         } else {
  703                 m_new = m;
  704                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  705                 m_new->m_data = m_new->m_ext.ext_buf;
  706         }
  707 
  708         if (!sc->bge_rx_alignment_bug)
  709                 m_adj(m_new, ETHER_ALIGN);
  710         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
  711         r = &sc->bge_ldata.bge_rx_std_ring[i];
  712         ctx.bge_maxsegs = 1;
  713         ctx.sc = sc;
  714         error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
  715             sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
  716             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
  717         if (error || ctx.bge_maxsegs == 0) {
  718                 if (m == NULL) {
  719                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
  720                         m_freem(m_new);
  721                 }
  722                 return(ENOMEM);
  723         }
  724         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
  725         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
  726         r->bge_flags = BGE_RXBDFLAG_END;
  727         r->bge_len = m_new->m_len;
  728         r->bge_idx = i;
  729 
  730         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  731             sc->bge_cdata.bge_rx_std_dmamap[i],
  732             BUS_DMASYNC_PREREAD);
  733 
  734         return(0);
  735 }
  736 
  737 /*
  738  * Initialize a jumbo receive ring descriptor. This allocates
  739  * a jumbo buffer from the pool managed internally by the driver.
  740  */
  741 static int
  742 bge_newbuf_jumbo(sc, i, m)
  743         struct bge_softc *sc;
  744         int i;
  745         struct mbuf *m;
  746 {
  747         bus_dma_segment_t segs[BGE_NSEG_JUMBO];
  748         struct bge_extrx_bd *r;
  749         struct mbuf *m_new = NULL;
  750         int nsegs;
  751         int error;
  752 
  753         if (m == NULL) {
  754                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  755                 if (m_new == NULL)
  756                         return(ENOBUFS);
  757 
  758                 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
  759                 if (!(m_new->m_flags & M_EXT)) {
  760                         m_freem(m_new);
  761                         return(ENOBUFS);
  762                 }
  763                 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
  764         } else {
  765                 m_new = m;
  766                 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
  767                 m_new->m_data = m_new->m_ext.ext_buf;
  768         }
  769 
  770         if (!sc->bge_rx_alignment_bug)
  771                 m_adj(m_new, ETHER_ALIGN);
  772 
  773         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
  774             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
  775             m_new, segs, &nsegs, BUS_DMA_NOWAIT);
  776         if (error) {
  777                 if (m == NULL)
  778                         m_freem(m_new);
  779                 return(error);
  780         }
  781         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
  782 
  783         /*
  784          * Fill in the extended RX buffer descriptor.
  785          */
  786         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
  787         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
  788         r->bge_idx = i;
  789         r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
  790         switch (nsegs) {
  791         case 4:
  792                 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
  793                 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
  794                 r->bge_len3 = segs[3].ds_len;
  795         case 3:
  796                 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
  797                 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
  798                 r->bge_len2 = segs[2].ds_len;
  799         case 2:
  800                 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
  801                 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
  802                 r->bge_len1 = segs[1].ds_len;
  803         case 1:
  804                 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
  805                 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
  806                 r->bge_len0 = segs[0].ds_len;
  807                 break;
  808         default:
  809                 panic("%s: %d segments\n", __func__, nsegs);
  810         }
  811 
  812         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  813             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
  814             BUS_DMASYNC_PREREAD);
  815 
  816         return (0);
  817 }
  818 
  819 /*
  820  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
  821  * that's 1MB or memory, which is a lot. For now, we fill only the first
  822  * 256 ring entries and hope that our CPU is fast enough to keep up with
  823  * the NIC.
  824  */
  825 static int
  826 bge_init_rx_ring_std(sc)
  827         struct bge_softc *sc;
  828 {
  829         int i;
  830 
  831         for (i = 0; i < BGE_SSLOTS; i++) {
  832                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
  833                         return(ENOBUFS);
  834         };
  835 
  836         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
  837             sc->bge_cdata.bge_rx_std_ring_map,
  838             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  839 
  840         sc->bge_std = i - 1;
  841         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
  842 
  843         return(0);
  844 }
  845 
  846 static void
  847 bge_free_rx_ring_std(sc)
  848         struct bge_softc *sc;
  849 {
  850         int i;
  851 
  852         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
  853                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
  854                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  855                             sc->bge_cdata.bge_rx_std_dmamap[i],
  856                             BUS_DMASYNC_POSTREAD);
  857                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
  858                             sc->bge_cdata.bge_rx_std_dmamap[i]);
  859                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
  860                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
  861                 }
  862                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
  863                     sizeof(struct bge_rx_bd));
  864         }
  865 
  866         return;
  867 }
  868 
  869 static int
  870 bge_init_rx_ring_jumbo(sc)
  871         struct bge_softc *sc;
  872 {
  873         struct bge_rcb *rcb;
  874         int i;
  875 
  876         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
  877                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
  878                         return(ENOBUFS);
  879         };
  880 
  881         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
  882             sc->bge_cdata.bge_rx_jumbo_ring_map,
  883             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  884 
  885         sc->bge_jumbo = i - 1;
  886 
  887         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
  888         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
  889                                     BGE_RCB_FLAG_USE_EXT_RX_BD);
  890         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
  891 
  892         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
  893 
  894         return(0);
  895 }
  896 
  897 static void
  898 bge_free_rx_ring_jumbo(sc)
  899         struct bge_softc *sc;
  900 {
  901         int i;
  902 
  903         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
  904                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
  905                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
  906                             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
  907                             BUS_DMASYNC_POSTREAD);
  908                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
  909                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
  910                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
  911                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
  912                 }
  913                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
  914                     sizeof(struct bge_extrx_bd));
  915         }
  916 
  917         return;
  918 }
  919 
  920 static void
  921 bge_free_tx_ring(sc)
  922         struct bge_softc *sc;
  923 {
  924         int i;
  925 
  926         if (sc->bge_ldata.bge_tx_ring == NULL)
  927                 return;
  928 
  929         for (i = 0; i < BGE_TX_RING_CNT; i++) {
  930                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
  931                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
  932                             sc->bge_cdata.bge_tx_dmamap[i],
  933                             BUS_DMASYNC_POSTWRITE);
  934                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
  935                             sc->bge_cdata.bge_tx_dmamap[i]);
  936                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
  937                         sc->bge_cdata.bge_tx_chain[i] = NULL;
  938                 }
  939                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
  940                     sizeof(struct bge_tx_bd));
  941         }
  942 
  943         return;
  944 }
  945 
  946 static int
  947 bge_init_tx_ring(sc)
  948         struct bge_softc *sc;
  949 {
  950         sc->bge_txcnt = 0;
  951         sc->bge_tx_saved_considx = 0;
  952 
  953         /* Initialize transmit producer index for host-memory send ring. */
  954         sc->bge_tx_prodidx = 0;
  955         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
  956 
  957         /* 5700 b2 errata */
  958         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
  959                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
  960 
  961         /* NIC-memory send ring not used; initialize to zero. */
  962         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
  963         /* 5700 b2 errata */
  964         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
  965                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
  966 
  967         return(0);
  968 }
  969 
  970 static void
  971 bge_setmulti(sc)
  972         struct bge_softc *sc;
  973 {
  974         struct ifnet *ifp;
  975         struct ifmultiaddr *ifma;
  976         u_int32_t hashes[4] = { 0, 0, 0, 0 };
  977         int h, i;
  978 
  979         BGE_LOCK_ASSERT(sc);
  980 
  981         ifp = sc->bge_ifp;
  982 
  983         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
  984                 for (i = 0; i < 4; i++)
  985                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
  986                 return;
  987         }
  988 
  989         /* First, zot all the existing filters. */
  990         for (i = 0; i < 4; i++)
  991                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
  992 
  993         /* Now program new ones. */
  994         IF_ADDR_LOCK(ifp);
  995         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  996                 if (ifma->ifma_addr->sa_family != AF_LINK)
  997                         continue;
  998                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
  999                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
 1000                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
 1001         }
 1002         IF_ADDR_UNLOCK(ifp);
 1003 
 1004         for (i = 0; i < 4; i++)
 1005                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
 1006 
 1007         return;
 1008 }
 1009 
 1010 /*
 1011  * Do endian, PCI and DMA initialization. Also check the on-board ROM
 1012  * self-test results.
 1013  */
 1014 static int
 1015 bge_chipinit(sc)
 1016         struct bge_softc *sc;
 1017 {
 1018         int                     i;
 1019         u_int32_t               dma_rw_ctl;
 1020 
 1021         /* Set endian type before we access any non-PCI registers. */
 1022         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
 1023 
 1024         /*
 1025          * Check the 'ROM failed' bit on the RX CPU to see if
 1026          * self-tests passed.
 1027          */
 1028         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
 1029                 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
 1030                 return(ENODEV);
 1031         }
 1032 
 1033         /* Clear the MAC control register */
 1034         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 1035 
 1036         /*
 1037          * Clear the MAC statistics block in the NIC's
 1038          * internal memory.
 1039          */
 1040         for (i = BGE_STATS_BLOCK;
 1041             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1042                 BGE_MEMWIN_WRITE(sc, i, 0);
 1043 
 1044         for (i = BGE_STATUS_BLOCK;
 1045             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
 1046                 BGE_MEMWIN_WRITE(sc, i, 0);
 1047 
 1048         /* Set up the PCI DMA control register. */
 1049         if (sc->bge_pcie) {
 1050                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1051                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1052                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1053         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
 1054             BGE_PCISTATE_PCI_BUSMODE) {
 1055                 /* Conventional PCI bus */
 1056                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1057                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1058                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1059                     (0x0F);
 1060         } else {
 1061                 /* PCI-X bus */
 1062                 /*
 1063                  * The 5704 uses a different encoding of read/write
 1064                  * watermarks.
 1065                  */
 1066                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1067                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1068                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1069                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
 1070                 else
 1071                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
 1072                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
 1073                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
 1074                             (0x0F);
 1075 
 1076                 /*
 1077                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
 1078                  * for hardware bugs.
 1079                  */
 1080                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1081                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 1082                         u_int32_t tmp;
 1083 
 1084                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
 1085                         if (tmp == 0x6 || tmp == 0x7)
 1086                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
 1087                 }
 1088         }
 1089 
 1090         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
 1091             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
 1092             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1093             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1094                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
 1095         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
 1096 
 1097         /*
 1098          * Set up general mode register.
 1099          */
 1100         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
 1101             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
 1102             BGE_MODECTL_TX_NO_PHDR_CSUM);
 1103 
 1104         /*
 1105          * Disable memory write invalidate.  Apparently it is not supported
 1106          * properly by these devices.
 1107          */
 1108         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
 1109 
 1110 #ifdef __brokenalpha__
 1111         /*
 1112          * Must insure that we do not cross an 8K (bytes) boundary
 1113          * for DMA reads.  Our highest limit is 1K bytes.  This is a
 1114          * restriction on some ALPHA platforms with early revision
 1115          * 21174 PCI chipsets, such as the AlphaPC 164lx
 1116          */
 1117         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
 1118             BGE_PCI_READ_BNDRY_1024BYTES, 4);
 1119 #endif
 1120 
 1121         /* Set the timer prescaler (always 66Mhz) */
 1122         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
 1123 
 1124         return(0);
 1125 }
 1126 
 1127 static int
 1128 bge_blockinit(sc)
 1129         struct bge_softc *sc;
 1130 {
 1131         struct bge_rcb *rcb;
 1132         bus_size_t vrcb;
 1133         bge_hostaddr taddr;
 1134         int i;
 1135 
 1136         /*
 1137          * Initialize the memory window pointer register so that
 1138          * we can access the first 32K of internal NIC RAM. This will
 1139          * allow us to set up the TX send ring RCBs and the RX return
 1140          * ring RCBs, plus other things which live in NIC memory.
 1141          */
 1142         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
 1143 
 1144         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
 1145 
 1146         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1147             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1148                 /* Configure mbuf memory pool */
 1149                 if (sc->bge_extram) {
 1150                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1151                             BGE_EXT_SSRAM);
 1152                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1153                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1154                         else
 1155                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1156                 } else {
 1157                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
 1158                             BGE_BUFFPOOL_1);
 1159                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 1160                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
 1161                         else
 1162                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
 1163                 }
 1164 
 1165                 /* Configure DMA resource pool */
 1166                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
 1167                     BGE_DMA_DESCRIPTORS);
 1168                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
 1169         }
 1170 
 1171         /* Configure mbuf pool watermarks */
 1172         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1173             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 1174                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
 1175                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
 1176         } else {
 1177                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
 1178                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
 1179         }
 1180         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
 1181 
 1182         /* Configure DMA resource watermarks */
 1183         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
 1184         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
 1185 
 1186         /* Enable buffer manager */
 1187         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1188             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1189                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
 1190                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
 1191 
 1192                 /* Poll for buffer manager start indication */
 1193                 for (i = 0; i < BGE_TIMEOUT; i++) {
 1194                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
 1195                                 break;
 1196                         DELAY(10);
 1197                 }
 1198 
 1199                 if (i == BGE_TIMEOUT) {
 1200                         device_printf(sc->bge_dev,
 1201                             "buffer manager failed to start\n");
 1202                         return(ENXIO);
 1203                 }
 1204         }
 1205 
 1206         /* Enable flow-through queues */
 1207         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 1208         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 1209 
 1210         /* Wait until queue initialization is complete */
 1211         for (i = 0; i < BGE_TIMEOUT; i++) {
 1212                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
 1213                         break;
 1214                 DELAY(10);
 1215         }
 1216 
 1217         if (i == BGE_TIMEOUT) {
 1218                 device_printf(sc->bge_dev, "flow-through queue init failed\n");
 1219                 return(ENXIO);
 1220         }
 1221 
 1222         /* Initialize the standard RX ring control block */
 1223         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
 1224         rcb->bge_hostaddr.bge_addr_lo =
 1225             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
 1226         rcb->bge_hostaddr.bge_addr_hi =
 1227             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
 1228         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 1229             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
 1230         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 1231             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 1232                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
 1233         else
 1234                 rcb->bge_maxlen_flags =
 1235                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
 1236         if (sc->bge_extram)
 1237                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
 1238         else
 1239                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
 1240         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
 1241         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
 1242 
 1243         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
 1244         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
 1245 
 1246         /*
 1247          * Initialize the jumbo RX ring control block
 1248          * We set the 'ring disabled' bit in the flags
 1249          * field until we're actually ready to start
 1250          * using this ring (i.e. once we set the MTU
 1251          * high enough to require it).
 1252          */
 1253         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1254             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1255                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
 1256 
 1257                 rcb->bge_hostaddr.bge_addr_lo =
 1258                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1259                 rcb->bge_hostaddr.bge_addr_hi =
 1260                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
 1261                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1262                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 1263                     BUS_DMASYNC_PREREAD);
 1264                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
 1265                     BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
 1266                 if (sc->bge_extram)
 1267                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
 1268                 else
 1269                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
 1270                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
 1271                     rcb->bge_hostaddr.bge_addr_hi);
 1272                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
 1273                     rcb->bge_hostaddr.bge_addr_lo);
 1274 
 1275                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
 1276                     rcb->bge_maxlen_flags);
 1277                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
 1278 
 1279                 /* Set up dummy disabled mini ring RCB */
 1280                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
 1281                 rcb->bge_maxlen_flags =
 1282                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
 1283                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
 1284                     rcb->bge_maxlen_flags);
 1285         }
 1286 
 1287         /*
 1288          * Set the BD ring replentish thresholds. The recommended
 1289          * values are 1/8th the number of descriptors allocated to
 1290          * each ring.
 1291          */
 1292         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
 1293         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
 1294 
 1295         /*
 1296          * Disable all unused send rings by setting the 'ring disabled'
 1297          * bit in the flags field of all the TX send ring control blocks.
 1298          * These are located in NIC memory.
 1299          */
 1300         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
 1301         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
 1302                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
 1303                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
 1304                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
 1305                 vrcb += sizeof(struct bge_rcb);
 1306         }
 1307 
 1308         /* Configure TX RCB 0 (we use only the first ring) */
 1309         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
 1310         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
 1311         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
 1312         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
 1313         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
 1314             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
 1315         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1316             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1317                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
 1318                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
 1319 
 1320         /* Disable all unused RX return rings */
 1321         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
 1322         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
 1323                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
 1324                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
 1325                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
 1326                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
 1327                     BGE_RCB_FLAG_RING_DISABLED));
 1328                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
 1329                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
 1330                     (i * (sizeof(u_int64_t))), 0);
 1331                 vrcb += sizeof(struct bge_rcb);
 1332         }
 1333 
 1334         /* Initialize RX ring indexes */
 1335         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
 1336         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
 1337         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
 1338 
 1339         /*
 1340          * Set up RX return ring 0
 1341          * Note that the NIC address for RX return rings is 0x00000000.
 1342          * The return rings live entirely within the host, so the
 1343          * nicaddr field in the RCB isn't used.
 1344          */
 1345         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
 1346         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
 1347         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
 1348         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
 1349         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
 1350         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
 1351             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));  
 1352 
 1353         /* Set random backoff seed for TX */
 1354         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
 1355             IFP2ENADDR(sc->bge_ifp)[0] + IFP2ENADDR(sc->bge_ifp)[1] +
 1356             IFP2ENADDR(sc->bge_ifp)[2] + IFP2ENADDR(sc->bge_ifp)[3] +
 1357             IFP2ENADDR(sc->bge_ifp)[4] + IFP2ENADDR(sc->bge_ifp)[5] +
 1358             BGE_TX_BACKOFF_SEED_MASK);
 1359 
 1360         /* Set inter-packet gap */
 1361         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
 1362 
 1363         /*
 1364          * Specify which ring to use for packets that don't match
 1365          * any RX rules.
 1366          */
 1367         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
 1368 
 1369         /*
 1370          * Configure number of RX lists. One interrupt distribution
 1371          * list, sixteen active lists, one bad frames class.
 1372          */
 1373         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
 1374 
 1375         /* Inialize RX list placement stats mask. */
 1376         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
 1377         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
 1378 
 1379         /* Disable host coalescing until we get it set up */
 1380         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
 1381 
 1382         /* Poll to make sure it's shut down. */
 1383         for (i = 0; i < BGE_TIMEOUT; i++) {
 1384                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
 1385                         break;
 1386                 DELAY(10);
 1387         }
 1388 
 1389         if (i == BGE_TIMEOUT) {
 1390                 device_printf(sc->bge_dev,
 1391                     "host coalescing engine failed to idle\n");
 1392                 return(ENXIO);
 1393         }
 1394 
 1395         /* Set up host coalescing defaults */
 1396         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
 1397         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
 1398         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
 1399         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
 1400         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1401             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1402                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
 1403                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
 1404         }
 1405         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
 1406         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
 1407 
 1408         /* Set up address of statistics block */
 1409         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1410             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1411                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
 1412                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
 1413                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
 1414                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
 1415                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
 1416                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
 1417                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
 1418         }
 1419 
 1420         /* Set up address of status block */
 1421         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
 1422             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
 1423         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
 1424             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
 1425         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
 1426         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
 1427 
 1428         /* Turn on host coalescing state machine */
 1429         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 1430 
 1431         /* Turn on RX BD completion state machine and enable attentions */
 1432         CSR_WRITE_4(sc, BGE_RBDC_MODE,
 1433             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
 1434 
 1435         /* Turn on RX list placement state machine */
 1436         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 1437 
 1438         /* Turn on RX list selector state machine. */
 1439         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1440             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1441                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 1442 
 1443         /* Turn on DMA, clear stats */
 1444         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
 1445             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
 1446             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
 1447             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
 1448             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
 1449 
 1450         /* Set misc. local control, enable interrupts on attentions */
 1451         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
 1452 
 1453 #ifdef notdef
 1454         /* Assert GPIO pins for PHY reset */
 1455         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
 1456             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
 1457         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
 1458             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
 1459 #endif
 1460 
 1461         /* Turn on DMA completion state machine */
 1462         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1463             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1464                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 1465 
 1466         /* Turn on write DMA state machine */
 1467         CSR_WRITE_4(sc, BGE_WDMA_MODE,
 1468             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
 1469 
 1470         /* Turn on read DMA state machine */
 1471         CSR_WRITE_4(sc, BGE_RDMA_MODE,
 1472             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
 1473 
 1474         /* Turn on RX data completion state machine */
 1475         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 1476 
 1477         /* Turn on RX BD initiator state machine */
 1478         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 1479 
 1480         /* Turn on RX data and RX BD initiator state machine */
 1481         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
 1482 
 1483         /* Turn on Mbuf cluster free state machine */
 1484         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1485             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 1486                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 1487 
 1488         /* Turn on send BD completion state machine */
 1489         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 1490 
 1491         /* Turn on send data completion state machine */
 1492         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 1493 
 1494         /* Turn on send data initiator state machine */
 1495         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 1496 
 1497         /* Turn on send BD initiator state machine */
 1498         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 1499 
 1500         /* Turn on send BD selector state machine */
 1501         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 1502 
 1503         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
 1504         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
 1505             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
 1506 
 1507         /* ack/clear link change events */
 1508         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 1509             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 1510             BGE_MACSTAT_LINK_CHANGED);
 1511         CSR_WRITE_4(sc, BGE_MI_STS, 0);
 1512 
 1513         /* Enable PHY auto polling (for MII/GMII only) */
 1514         if (sc->bge_tbi) {
 1515                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
 1516         } else {
 1517                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
 1518                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
 1519                     sc->bge_chipid != BGE_CHIPID_BCM5700_B1)
 1520                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 1521                             BGE_EVTENB_MI_INTERRUPT);
 1522         }
 1523 
 1524         /*
 1525          * Clear any pending link state attention.
 1526          * Otherwise some link state change events may be lost until attention
 1527          * is cleared by bge_intr() -> bge_link_upd() sequence.
 1528          * It's not necessary on newer BCM chips - perhaps enabling link
 1529          * state change attentions implies clearing pending attention.
 1530          */
 1531         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 1532             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 1533             BGE_MACSTAT_LINK_CHANGED);
 1534 
 1535         /* Enable link state change attentions. */
 1536         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
 1537 
 1538         return(0);
 1539 }
 1540 
 1541 /*
 1542  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
 1543  * against our list and return its name if we find a match. Note
 1544  * that since the Broadcom controller contains VPD support, we
 1545  * can get the device name string from the controller itself instead
 1546  * of the compiled-in string. This is a little slow, but it guarantees
 1547  * we'll always announce the right product name.
 1548  */
 1549 static int
 1550 bge_probe(dev)
 1551         device_t dev;
 1552 {
 1553         struct bge_type *t;
 1554         struct bge_softc *sc;
 1555         char *descbuf;
 1556 
 1557         t = bge_devs;
 1558 
 1559         sc = device_get_softc(dev);
 1560         bzero(sc, sizeof(struct bge_softc));
 1561         sc->bge_dev = dev;
 1562 
 1563         while(t->bge_name != NULL) {
 1564                 if ((pci_get_vendor(dev) == t->bge_vid) &&
 1565                     (pci_get_device(dev) == t->bge_did)) {
 1566 #ifdef notdef
 1567                         bge_vpd_read(sc);
 1568                         device_set_desc(dev, sc->bge_vpd_prodname);
 1569 #endif
 1570                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
 1571                         if (descbuf == NULL)
 1572                                 return(ENOMEM);
 1573                         snprintf(descbuf, BGE_DEVDESC_MAX,
 1574                             "%s, ASIC rev. %#04x", t->bge_name,
 1575                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
 1576                         device_set_desc_copy(dev, descbuf);
 1577                         if (pci_get_subvendor(dev) == DELL_VENDORID)
 1578                                 sc->bge_no_3_led = 1;
 1579                         free(descbuf, M_TEMP);
 1580                         return(0);
 1581                 }
 1582                 t++;
 1583         }
 1584 
 1585         return(ENXIO);
 1586 }
 1587 
 1588 static void
 1589 bge_dma_free(sc)
 1590         struct bge_softc *sc;
 1591 {
 1592         int i;
 1593 
 1594 
 1595         /* Destroy DMA maps for RX buffers */
 1596 
 1597         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1598                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
 1599                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1600                             sc->bge_cdata.bge_rx_std_dmamap[i]);
 1601         }
 1602 
 1603         /* Destroy DMA maps for jumbo RX buffers */
 1604 
 1605         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1606                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
 1607                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
 1608                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1609         }
 1610 
 1611         /* Destroy DMA maps for TX buffers */
 1612 
 1613         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1614                 if (sc->bge_cdata.bge_tx_dmamap[i])
 1615                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
 1616                             sc->bge_cdata.bge_tx_dmamap[i]);
 1617         }
 1618 
 1619         if (sc->bge_cdata.bge_mtag)
 1620                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
 1621 
 1622 
 1623         /* Destroy standard RX ring */
 1624 
 1625         if (sc->bge_cdata.bge_rx_std_ring_map)
 1626                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
 1627                     sc->bge_cdata.bge_rx_std_ring_map);
 1628         if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
 1629                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
 1630                     sc->bge_ldata.bge_rx_std_ring,
 1631                     sc->bge_cdata.bge_rx_std_ring_map);
 1632 
 1633         if (sc->bge_cdata.bge_rx_std_ring_tag)
 1634                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
 1635 
 1636         /* Destroy jumbo RX ring */
 1637 
 1638         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
 1639                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1640                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1641 
 1642         if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
 1643             sc->bge_ldata.bge_rx_jumbo_ring)
 1644                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1645                     sc->bge_ldata.bge_rx_jumbo_ring,
 1646                     sc->bge_cdata.bge_rx_jumbo_ring_map);
 1647 
 1648         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
 1649                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
 1650 
 1651         /* Destroy RX return ring */
 1652 
 1653         if (sc->bge_cdata.bge_rx_return_ring_map)
 1654                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
 1655                     sc->bge_cdata.bge_rx_return_ring_map);
 1656 
 1657         if (sc->bge_cdata.bge_rx_return_ring_map &&
 1658             sc->bge_ldata.bge_rx_return_ring)
 1659                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
 1660                     sc->bge_ldata.bge_rx_return_ring,
 1661                     sc->bge_cdata.bge_rx_return_ring_map);
 1662 
 1663         if (sc->bge_cdata.bge_rx_return_ring_tag)
 1664                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
 1665 
 1666         /* Destroy TX ring */
 1667 
 1668         if (sc->bge_cdata.bge_tx_ring_map)
 1669                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
 1670                     sc->bge_cdata.bge_tx_ring_map);
 1671 
 1672         if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
 1673                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
 1674                     sc->bge_ldata.bge_tx_ring,
 1675                     sc->bge_cdata.bge_tx_ring_map);
 1676 
 1677         if (sc->bge_cdata.bge_tx_ring_tag)
 1678                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
 1679 
 1680         /* Destroy status block */
 1681 
 1682         if (sc->bge_cdata.bge_status_map)
 1683                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
 1684                     sc->bge_cdata.bge_status_map);
 1685 
 1686         if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
 1687                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
 1688                     sc->bge_ldata.bge_status_block,
 1689                     sc->bge_cdata.bge_status_map);
 1690 
 1691         if (sc->bge_cdata.bge_status_tag)
 1692                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
 1693 
 1694         /* Destroy statistics block */
 1695 
 1696         if (sc->bge_cdata.bge_stats_map)
 1697                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
 1698                     sc->bge_cdata.bge_stats_map);
 1699 
 1700         if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
 1701                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
 1702                     sc->bge_ldata.bge_stats,
 1703                     sc->bge_cdata.bge_stats_map);
 1704 
 1705         if (sc->bge_cdata.bge_stats_tag)
 1706                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
 1707 
 1708         /* Destroy the parent tag */
 1709 
 1710         if (sc->bge_cdata.bge_parent_tag)
 1711                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
 1712 
 1713         return;
 1714 }
 1715 
 1716 static int
 1717 bge_dma_alloc(dev)
 1718         device_t dev;
 1719 {
 1720         struct bge_softc *sc;
 1721         int i, error;
 1722         struct bge_dmamap_arg ctx;
 1723 
 1724         sc = device_get_softc(dev);
 1725 
 1726         /*
 1727          * Allocate the parent bus DMA tag appropriate for PCI.
 1728          */
 1729         error = bus_dma_tag_create(NULL,        /* parent */
 1730                         PAGE_SIZE, 0,           /* alignment, boundary */
 1731                         BUS_SPACE_MAXADDR,      /* lowaddr */
 1732                         BUS_SPACE_MAXADDR,      /* highaddr */
 1733                         NULL, NULL,             /* filter, filterarg */
 1734                         MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
 1735                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
 1736                         0,                      /* flags */
 1737                         NULL, NULL,             /* lockfunc, lockarg */
 1738                         &sc->bge_cdata.bge_parent_tag);
 1739 
 1740         if (error != 0) {
 1741                 device_printf(sc->bge_dev,
 1742                     "could not allocate parent dma tag\n");
 1743                 return (ENOMEM);
 1744         }
 1745 
 1746         /*
 1747          * Create tag for RX mbufs.
 1748          */
 1749         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
 1750             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1751             NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
 1752             BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
 1753 
 1754         if (error) {
 1755                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
 1756                 return (ENOMEM);
 1757         }
 1758 
 1759         /* Create DMA maps for RX buffers */
 1760 
 1761         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
 1762                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1763                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
 1764                 if (error) {
 1765                         device_printf(sc->bge_dev,
 1766                             "can't create DMA map for RX\n");
 1767                         return(ENOMEM);
 1768                 }
 1769         }
 1770 
 1771         /* Create DMA maps for TX buffers */
 1772 
 1773         for (i = 0; i < BGE_TX_RING_CNT; i++) {
 1774                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
 1775                             &sc->bge_cdata.bge_tx_dmamap[i]);
 1776                 if (error) {
 1777                         device_printf(sc->bge_dev,
 1778                             "can't create DMA map for RX\n");
 1779                         return(ENOMEM);
 1780                 }
 1781         }
 1782 
 1783         /* Create tag for standard RX ring */
 1784 
 1785         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1786             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1787             NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
 1788             NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
 1789 
 1790         if (error) {
 1791                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
 1792                 return (ENOMEM);
 1793         }
 1794 
 1795         /* Allocate DMA'able memory for standard RX ring */
 1796 
 1797         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
 1798             (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
 1799             &sc->bge_cdata.bge_rx_std_ring_map);
 1800         if (error)
 1801                 return (ENOMEM);
 1802 
 1803         bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
 1804 
 1805         /* Load the address of the standard RX ring */
 1806 
 1807         ctx.bge_maxsegs = 1;
 1808         ctx.sc = sc;
 1809 
 1810         error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
 1811             sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
 1812             BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 1813 
 1814         if (error)
 1815                 return (ENOMEM);
 1816 
 1817         sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
 1818 
 1819         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 1820             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 1821 
 1822                 /*
 1823                  * Create tag for jumbo mbufs.
 1824                  * This is really a bit of a kludge. We allocate a special
 1825                  * jumbo buffer pool which (thanks to the way our DMA
 1826                  * memory allocation works) will consist of contiguous
 1827                  * pages. This means that even though a jumbo buffer might
 1828                  * be larger than a page size, we don't really need to
 1829                  * map it into more than one DMA segment. However, the
 1830                  * default mbuf tag will result in multi-segment mappings,
 1831                  * so we have to create a special jumbo mbuf tag that
 1832                  * lets us get away with mapping the jumbo buffers as
 1833                  * a single segment. I think eventually the driver should
 1834                  * be changed so that it uses ordinary mbufs and cluster
 1835                  * buffers, i.e. jumbo frames can span multiple DMA
 1836                  * descriptors. But that's a project for another day.
 1837                  */
 1838 
 1839                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1840                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1841                     NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
 1842                     0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
 1843 
 1844                 if (error) {
 1845                         device_printf(sc->bge_dev,
 1846                             "could not allocate dma tag\n");
 1847                         return (ENOMEM);
 1848                 }
 1849 
 1850                 /* Create tag for jumbo RX ring */
 1851                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1852                     PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1853                     NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
 1854                     NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
 1855 
 1856                 if (error) {
 1857                         device_printf(sc->bge_dev,
 1858                             "could not allocate dma tag\n");
 1859                         return (ENOMEM);
 1860                 }
 1861 
 1862                 /* Allocate DMA'able memory for jumbo RX ring */
 1863                 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1864                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
 1865                     BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1866                     &sc->bge_cdata.bge_rx_jumbo_ring_map);
 1867                 if (error)
 1868                         return (ENOMEM);
 1869 
 1870                 /* Load the address of the jumbo RX ring */
 1871                 ctx.bge_maxsegs = 1;
 1872                 ctx.sc = sc;
 1873 
 1874                 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 1875                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 1876                     sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
 1877                     bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 1878 
 1879                 if (error)
 1880                         return (ENOMEM);
 1881 
 1882                 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
 1883 
 1884                 /* Create DMA maps for jumbo RX buffers */
 1885 
 1886                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
 1887                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
 1888                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
 1889                         if (error) {
 1890                                 device_printf(sc->bge_dev,
 1891                                     "can't create DMA map for RX\n");
 1892                                 return(ENOMEM);
 1893                         }
 1894                 }
 1895 
 1896         }
 1897 
 1898         /* Create tag for RX return ring */
 1899 
 1900         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1901             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1902             NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
 1903             NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
 1904 
 1905         if (error) {
 1906                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
 1907                 return (ENOMEM);
 1908         }
 1909 
 1910         /* Allocate DMA'able memory for RX return ring */
 1911 
 1912         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
 1913             (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
 1914             &sc->bge_cdata.bge_rx_return_ring_map);
 1915         if (error)
 1916                 return (ENOMEM);
 1917 
 1918         bzero((char *)sc->bge_ldata.bge_rx_return_ring,
 1919             BGE_RX_RTN_RING_SZ(sc));
 1920 
 1921         /* Load the address of the RX return ring */
 1922 
 1923         ctx.bge_maxsegs = 1;
 1924         ctx.sc = sc;
 1925 
 1926         error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
 1927             sc->bge_cdata.bge_rx_return_ring_map,
 1928             sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
 1929             bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 1930 
 1931         if (error)
 1932                 return (ENOMEM);
 1933 
 1934         sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
 1935 
 1936         /* Create tag for TX ring */
 1937 
 1938         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1939             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1940             NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
 1941             &sc->bge_cdata.bge_tx_ring_tag);
 1942 
 1943         if (error) {
 1944                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
 1945                 return (ENOMEM);
 1946         }
 1947 
 1948         /* Allocate DMA'able memory for TX ring */
 1949 
 1950         error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
 1951             (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
 1952             &sc->bge_cdata.bge_tx_ring_map);
 1953         if (error)
 1954                 return (ENOMEM);
 1955 
 1956         bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
 1957 
 1958         /* Load the address of the TX ring */
 1959 
 1960         ctx.bge_maxsegs = 1;
 1961         ctx.sc = sc;
 1962 
 1963         error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
 1964             sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
 1965             BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 1966 
 1967         if (error)
 1968                 return (ENOMEM);
 1969 
 1970         sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
 1971 
 1972         /* Create tag for status block */
 1973 
 1974         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 1975             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1976             NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
 1977             NULL, NULL, &sc->bge_cdata.bge_status_tag);
 1978 
 1979         if (error) {
 1980                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
 1981                 return (ENOMEM);
 1982         }
 1983 
 1984         /* Allocate DMA'able memory for status block */
 1985 
 1986         error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
 1987             (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
 1988             &sc->bge_cdata.bge_status_map);
 1989         if (error)
 1990                 return (ENOMEM);
 1991 
 1992         bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
 1993 
 1994         /* Load the address of the status block */
 1995 
 1996         ctx.sc = sc;
 1997         ctx.bge_maxsegs = 1;
 1998 
 1999         error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
 2000             sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
 2001             BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2002 
 2003         if (error)
 2004                 return (ENOMEM);
 2005 
 2006         sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
 2007 
 2008         /* Create tag for statistics block */
 2009 
 2010         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
 2011             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 2012             NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
 2013             &sc->bge_cdata.bge_stats_tag);
 2014 
 2015         if (error) {
 2016                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
 2017                 return (ENOMEM);
 2018         }
 2019 
 2020         /* Allocate DMA'able memory for statistics block */
 2021 
 2022         error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
 2023             (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
 2024             &sc->bge_cdata.bge_stats_map);
 2025         if (error)
 2026                 return (ENOMEM);
 2027 
 2028         bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
 2029 
 2030         /* Load the address of the statstics block */
 2031 
 2032         ctx.sc = sc;
 2033         ctx.bge_maxsegs = 1;
 2034 
 2035         error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
 2036             sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
 2037             BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
 2038 
 2039         if (error)
 2040                 return (ENOMEM);
 2041 
 2042         sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
 2043 
 2044         return(0);
 2045 }
 2046 
 2047 static int
 2048 bge_attach(dev)
 2049         device_t dev;
 2050 {
 2051         struct ifnet *ifp;
 2052         struct bge_softc *sc;
 2053         u_int32_t hwcfg = 0;
 2054         u_int32_t mac_tmp = 0;
 2055         u_char eaddr[6];
 2056         int error = 0, rid;
 2057 
 2058         sc = device_get_softc(dev);
 2059         sc->bge_dev = dev;
 2060 
 2061         /*
 2062          * Map control/status registers.
 2063          */
 2064         pci_enable_busmaster(dev);
 2065 
 2066         rid = BGE_PCI_BAR0;
 2067         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 2068             RF_ACTIVE|PCI_RF_DENSE);
 2069 
 2070         if (sc->bge_res == NULL) {
 2071                 device_printf (sc->bge_dev, "couldn't map memory\n");
 2072                 error = ENXIO;
 2073                 goto fail;
 2074         }
 2075 
 2076         sc->bge_btag = rman_get_bustag(sc->bge_res);
 2077         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
 2078 
 2079         /* Allocate interrupt */
 2080         rid = 0;
 2081 
 2082         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 2083             RF_SHAREABLE | RF_ACTIVE);
 2084 
 2085         if (sc->bge_irq == NULL) {
 2086                 device_printf(sc->bge_dev, "couldn't map interrupt\n");
 2087                 error = ENXIO;
 2088                 goto fail;
 2089         }
 2090 
 2091         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
 2092 
 2093         /* Save ASIC rev. */
 2094 
 2095         sc->bge_chipid =
 2096             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
 2097             BGE_PCIMISCCTL_ASICREV;
 2098         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
 2099         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
 2100 
 2101         /*
 2102          * Treat the 5714 and the 5752 like the 5750 until we have more info
 2103          * on this chip.
 2104          */
 2105         if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 
 2106             sc->bge_asicrev == BGE_ASICREV_BCM5752)
 2107                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
 2108 
 2109         /*
 2110          * XXX: Broadcom Linux driver.  Not in specs or eratta.
 2111          * PCI-Express?
 2112          */
 2113         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
 2114                 u_int32_t v;
 2115 
 2116                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
 2117                 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
 2118                         v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
 2119                         if ((v & 0xff) == BGE_PCIE_CAPID)
 2120                                 sc->bge_pcie = 1;
 2121                 }
 2122         }
 2123 
 2124         /* Try to reset the chip. */
 2125         bge_reset(sc);
 2126 
 2127         if (bge_chipinit(sc)) {
 2128                 device_printf(sc->bge_dev, "chip initialization failed\n");
 2129                 bge_release_resources(sc);
 2130                 error = ENXIO;
 2131                 goto fail;
 2132         }
 2133 
 2134         /*
 2135          * Get station address from the EEPROM.
 2136          */
 2137         mac_tmp = bge_readmem_ind(sc, 0x0c14);
 2138         if ((mac_tmp >> 16) == 0x484b) {
 2139                 eaddr[0] = (u_char)(mac_tmp >> 8);
 2140                 eaddr[1] = (u_char)mac_tmp;
 2141                 mac_tmp = bge_readmem_ind(sc, 0x0c18);
 2142                 eaddr[2] = (u_char)(mac_tmp >> 24);
 2143                 eaddr[3] = (u_char)(mac_tmp >> 16);
 2144                 eaddr[4] = (u_char)(mac_tmp >> 8);
 2145                 eaddr[5] = (u_char)mac_tmp;
 2146         } else if (bge_read_eeprom(sc, eaddr,
 2147             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
 2148                 device_printf(sc->bge_dev, "failed to read station address\n");
 2149                 bge_release_resources(sc);
 2150                 error = ENXIO;
 2151                 goto fail;
 2152         }
 2153 
 2154         /* 5705 limits RX return ring to 512 entries. */
 2155         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 2156             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 2157                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
 2158         else
 2159                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
 2160 
 2161         if (bge_dma_alloc(dev)) {
 2162                 device_printf(sc->bge_dev,
 2163                     "failed to allocate DMA resources\n");
 2164                 bge_release_resources(sc);
 2165                 error = ENXIO;
 2166                 goto fail;
 2167         }
 2168 
 2169         /* Set default tuneable values. */
 2170         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
 2171         sc->bge_rx_coal_ticks = 150;
 2172         sc->bge_tx_coal_ticks = 150;
 2173         sc->bge_rx_max_coal_bds = 64;
 2174         sc->bge_tx_max_coal_bds = 128;
 2175 
 2176         /* Set up ifnet structure */
 2177         ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
 2178         if (ifp == NULL) {
 2179                 device_printf(sc->bge_dev, "failed to if_alloc()\n");
 2180                 bge_release_resources(sc);
 2181                 error = ENXIO;
 2182                 goto fail;
 2183         }
 2184         ifp->if_softc = sc;
 2185         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2186         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2187         ifp->if_ioctl = bge_ioctl;
 2188         ifp->if_start = bge_start;
 2189         ifp->if_watchdog = bge_watchdog;
 2190         ifp->if_init = bge_init;
 2191         ifp->if_mtu = ETHERMTU;
 2192         ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
 2193         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
 2194         IFQ_SET_READY(&ifp->if_snd);
 2195         ifp->if_hwassist = BGE_CSUM_FEATURES;
 2196         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
 2197             IFCAP_VLAN_MTU;
 2198         ifp->if_capenable = ifp->if_capabilities;
 2199 #ifdef DEVICE_POLLING
 2200         ifp->if_capabilities |= IFCAP_POLLING;
 2201 #endif
 2202 
 2203         /*
 2204          * 5700 B0 chips do not support checksumming correctly due
 2205          * to hardware bugs.
 2206          */
 2207         if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
 2208                 ifp->if_capabilities &= ~IFCAP_HWCSUM;
 2209                 ifp->if_capenable &= IFCAP_HWCSUM;
 2210                 ifp->if_hwassist = 0;
 2211         }
 2212 
 2213         /*
 2214          * Figure out what sort of media we have by checking the
 2215          * hardware config word in the first 32k of NIC internal memory,
 2216          * or fall back to examining the EEPROM if necessary.
 2217          * Note: on some BCM5700 cards, this value appears to be unset.
 2218          * If that's the case, we have to rely on identifying the NIC
 2219          * by its PCI subsystem ID, as we do below for the SysKonnect
 2220          * SK-9D41.
 2221          */
 2222         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
 2223                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
 2224         else {
 2225                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
 2226                     sizeof(hwcfg))) {
 2227                         device_printf(sc->bge_dev, "failed to read EEPROM\n");
 2228                         bge_release_resources(sc);
 2229                         error = ENXIO;
 2230                         goto fail;
 2231                 }
 2232                 hwcfg = ntohl(hwcfg);
 2233         }
 2234 
 2235         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
 2236                 sc->bge_tbi = 1;
 2237 
 2238         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
 2239         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
 2240                 sc->bge_tbi = 1;
 2241 
 2242         if (sc->bge_tbi) {
 2243                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
 2244                     bge_ifmedia_upd, bge_ifmedia_sts);
 2245                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
 2246                 ifmedia_add(&sc->bge_ifmedia,
 2247                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
 2248                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
 2249                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
 2250                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
 2251         } else {
 2252                 /*
 2253                  * Do transceiver setup.
 2254                  */
 2255                 if (mii_phy_probe(dev, &sc->bge_miibus,
 2256                     bge_ifmedia_upd, bge_ifmedia_sts)) {
 2257                         device_printf(sc->bge_dev, "MII without any PHY!\n");
 2258                         bge_release_resources(sc);
 2259                         error = ENXIO;
 2260                         goto fail;
 2261                 }
 2262         }
 2263 
 2264         /*
 2265          * When using the BCM5701 in PCI-X mode, data corruption has
 2266          * been observed in the first few bytes of some received packets.
 2267          * Aligning the packet buffer in memory eliminates the corruption.
 2268          * Unfortunately, this misaligns the packet payloads.  On platforms
 2269          * which do not support unaligned accesses, we will realign the
 2270          * payloads by copying the received packets.
 2271          */
 2272         switch (sc->bge_chipid) {
 2273         case BGE_CHIPID_BCM5701_A0:
 2274         case BGE_CHIPID_BCM5701_B0:
 2275         case BGE_CHIPID_BCM5701_B2:
 2276         case BGE_CHIPID_BCM5701_B5:
 2277                 /* If in PCI-X mode, work around the alignment bug. */
 2278                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
 2279                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
 2280                     BGE_PCISTATE_PCI_BUSSPEED)
 2281                         sc->bge_rx_alignment_bug = 1;
 2282                 break;
 2283         }
 2284 
 2285         /*
 2286          * Call MI attach routine.
 2287          */
 2288         ether_ifattach(ifp, eaddr);
 2289         callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
 2290 
 2291         /*
 2292          * Hookup IRQ last.
 2293          */
 2294         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
 2295            bge_intr, sc, &sc->bge_intrhand);
 2296 
 2297         if (error) {
 2298                 bge_detach(dev);
 2299                 device_printf(sc->bge_dev, "couldn't set up irq\n");
 2300         }
 2301 
 2302 fail:
 2303         return(error);
 2304 }
 2305 
 2306 static int
 2307 bge_detach(dev)
 2308         device_t dev;
 2309 {
 2310         struct bge_softc *sc;
 2311         struct ifnet *ifp;
 2312 
 2313         sc = device_get_softc(dev);
 2314         ifp = sc->bge_ifp;
 2315 
 2316 #ifdef DEVICE_POLLING
 2317         if (ifp->if_capenable & IFCAP_POLLING)
 2318                 ether_poll_deregister(ifp);
 2319 #endif
 2320 
 2321         BGE_LOCK(sc);
 2322         bge_stop(sc);
 2323         bge_reset(sc);
 2324         BGE_UNLOCK(sc);
 2325 
 2326         ether_ifdetach(ifp);
 2327 
 2328         if (sc->bge_tbi) {
 2329                 ifmedia_removeall(&sc->bge_ifmedia);
 2330         } else {
 2331                 bus_generic_detach(dev);
 2332                 device_delete_child(dev, sc->bge_miibus);
 2333         }
 2334 
 2335         bge_release_resources(sc);
 2336 
 2337         return(0);
 2338 }
 2339 
 2340 static void
 2341 bge_release_resources(sc)
 2342         struct bge_softc *sc;
 2343 {
 2344         device_t dev;
 2345 
 2346         dev = sc->bge_dev;
 2347 
 2348         if (sc->bge_vpd_prodname != NULL)
 2349                 free(sc->bge_vpd_prodname, M_DEVBUF);
 2350 
 2351         if (sc->bge_vpd_readonly != NULL)
 2352                 free(sc->bge_vpd_readonly, M_DEVBUF);
 2353 
 2354         if (sc->bge_intrhand != NULL)
 2355                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
 2356 
 2357         if (sc->bge_irq != NULL)
 2358                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
 2359 
 2360         if (sc->bge_res != NULL)
 2361                 bus_release_resource(dev, SYS_RES_MEMORY,
 2362                     BGE_PCI_BAR0, sc->bge_res);
 2363 
 2364         if (sc->bge_ifp != NULL)
 2365                 if_free(sc->bge_ifp);
 2366 
 2367         bge_dma_free(sc);
 2368 
 2369         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
 2370                 BGE_LOCK_DESTROY(sc);
 2371 
 2372         return;
 2373 }
 2374 
 2375 static void
 2376 bge_reset(sc)
 2377         struct bge_softc *sc;
 2378 {
 2379         device_t dev;
 2380         u_int32_t cachesize, command, pcistate, reset;
 2381         int i, val = 0;
 2382 
 2383         dev = sc->bge_dev;
 2384 
 2385         /* Save some important PCI state. */
 2386         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
 2387         command = pci_read_config(dev, BGE_PCI_CMD, 4);
 2388         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
 2389 
 2390         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2391             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2392         BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2393 
 2394         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
 2395 
 2396         /* XXX: Broadcom Linux driver. */
 2397         if (sc->bge_pcie) {
 2398                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
 2399                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
 2400                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2401                         /* Prevent PCIE link training during global reset */
 2402                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
 2403                         reset |= (1<<29);
 2404                 }
 2405         }
 2406 
 2407         /* Issue global reset */
 2408         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
 2409 
 2410         DELAY(1000);
 2411 
 2412         /* XXX: Broadcom Linux driver. */
 2413         if (sc->bge_pcie) {
 2414                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
 2415                         uint32_t v;
 2416 
 2417                         DELAY(500000); /* wait for link training to complete */
 2418                         v = pci_read_config(dev, 0xc4, 4);
 2419                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
 2420                 }
 2421                 /* Set PCIE max payload size and clear error status. */
 2422                 pci_write_config(dev, 0xd8, 0xf5000, 4);
 2423         }
 2424 
 2425         /* Reset some of the PCI state that got zapped by reset */
 2426         pci_write_config(dev, BGE_PCI_MISC_CTL,
 2427             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
 2428             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
 2429         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
 2430         pci_write_config(dev, BGE_PCI_CMD, command, 4);
 2431         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
 2432 
 2433         /* Enable memory arbiter. */
 2434         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2435             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 2436                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 2437 
 2438         /*
 2439          * Prevent PXE restart: write a magic number to the
 2440          * general communications memory at 0xB50.
 2441          */
 2442         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
 2443         /*
 2444          * Poll the value location we just wrote until
 2445          * we see the 1's complement of the magic number.
 2446          * This indicates that the firmware initialization
 2447          * is complete.
 2448          */
 2449         for (i = 0; i < BGE_TIMEOUT; i++) {
 2450                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
 2451                 if (val == ~BGE_MAGIC_NUMBER)
 2452                         break;
 2453                 DELAY(10);
 2454         }
 2455 
 2456         if (i == BGE_TIMEOUT) {
 2457                 device_printf(sc->bge_dev, "firmware handshake timed out\n");
 2458                 return;
 2459         }
 2460 
 2461         /*
 2462          * XXX Wait for the value of the PCISTATE register to
 2463          * return to its original pre-reset state. This is a
 2464          * fairly good indicator of reset completion. If we don't
 2465          * wait for the reset to fully complete, trying to read
 2466          * from the device's non-PCI registers may yield garbage
 2467          * results.
 2468          */
 2469         for (i = 0; i < BGE_TIMEOUT; i++) {
 2470                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
 2471                         break;
 2472                 DELAY(10);
 2473         }
 2474 
 2475         /* Fix up byte swapping */
 2476         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
 2477             BGE_MODECTL_BYTESWAP_DATA);
 2478 
 2479         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
 2480 
 2481         /*
 2482          * The 5704 in TBI mode apparently needs some special
 2483          * adjustment to insure the SERDES drive level is set
 2484          * to 1.2V.
 2485          */
 2486         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
 2487                 uint32_t serdescfg;
 2488                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
 2489                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
 2490                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
 2491         }
 2492 
 2493         /* XXX: Broadcom Linux driver. */
 2494         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
 2495                 uint32_t v;
 2496 
 2497                 v = CSR_READ_4(sc, 0x7c00);
 2498                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
 2499         }
 2500         DELAY(10000);
 2501 
 2502         return;
 2503 }
 2504 
 2505 /*
 2506  * Frame reception handling. This is called if there's a frame
 2507  * on the receive return list.
 2508  *
 2509  * Note: we have to be able to handle two possibilities here:
 2510  * 1) the frame is from the jumbo receive ring
 2511  * 2) the frame is from the standard receive ring
 2512  */
 2513 
 2514 static void
 2515 bge_rxeof(sc)
 2516         struct bge_softc *sc;
 2517 {
 2518         struct ifnet *ifp;
 2519         int stdcnt = 0, jumbocnt = 0;
 2520 
 2521         BGE_LOCK_ASSERT(sc);
 2522 
 2523         /* Nothing to do */
 2524         if (sc->bge_rx_saved_considx ==
 2525             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
 2526                 return;
 2527 
 2528         ifp = sc->bge_ifp;
 2529 
 2530         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
 2531             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
 2532         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2533             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
 2534         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2535             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2536                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2537                     sc->bge_cdata.bge_rx_jumbo_ring_map,
 2538                     BUS_DMASYNC_POSTREAD);
 2539         }
 2540 
 2541         while(sc->bge_rx_saved_considx !=
 2542             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
 2543                 struct bge_rx_bd        *cur_rx;
 2544                 u_int32_t               rxidx;
 2545                 struct ether_header     *eh;
 2546                 struct mbuf             *m = NULL;
 2547                 u_int16_t               vlan_tag = 0;
 2548                 int                     have_tag = 0;
 2549 
 2550 #ifdef DEVICE_POLLING
 2551                 if (ifp->if_capenable & IFCAP_POLLING) {
 2552                         if (sc->rxcycles <= 0)
 2553                                 break;
 2554                         sc->rxcycles--;
 2555                 }
 2556 #endif
 2557 
 2558                 cur_rx =
 2559             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
 2560 
 2561                 rxidx = cur_rx->bge_idx;
 2562                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
 2563 
 2564                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
 2565                         have_tag = 1;
 2566                         vlan_tag = cur_rx->bge_vlan_tag;
 2567                 }
 2568 
 2569                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
 2570                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
 2571                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
 2572                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
 2573                             BUS_DMASYNC_POSTREAD);
 2574                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
 2575                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
 2576                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
 2577                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
 2578                         jumbocnt++;
 2579                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2580                                 ifp->if_ierrors++;
 2581                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2582                                 continue;
 2583                         }
 2584                         if (bge_newbuf_jumbo(sc,
 2585                             sc->bge_jumbo, NULL) == ENOBUFS) {
 2586                                 ifp->if_ierrors++;
 2587                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
 2588                                 continue;
 2589                         }
 2590                 } else {
 2591                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
 2592                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 2593                             sc->bge_cdata.bge_rx_std_dmamap[rxidx],
 2594                             BUS_DMASYNC_POSTREAD);
 2595                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2596                             sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
 2597                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
 2598                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
 2599                         stdcnt++;
 2600                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
 2601                                 ifp->if_ierrors++;
 2602                                 bge_newbuf_std(sc, sc->bge_std, m);
 2603                                 continue;
 2604                         }
 2605                         if (bge_newbuf_std(sc, sc->bge_std,
 2606                             NULL) == ENOBUFS) {
 2607                                 ifp->if_ierrors++;
 2608                                 bge_newbuf_std(sc, sc->bge_std, m);
 2609                                 continue;
 2610                         }
 2611                 }
 2612 
 2613                 ifp->if_ipackets++;
 2614 #ifndef __NO_STRICT_ALIGNMENT
 2615                 /*
 2616                  * For architectures with strict alignment we must make sure
 2617                  * the payload is aligned.
 2618                  */
 2619                 if (sc->bge_rx_alignment_bug) {
 2620                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
 2621                             cur_rx->bge_len);
 2622                         m->m_data += ETHER_ALIGN;
 2623                 }
 2624 #endif
 2625                 eh = mtod(m, struct ether_header *);
 2626                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
 2627                 m->m_pkthdr.rcvif = ifp;
 2628 
 2629                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2630                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
 2631                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2632                                 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
 2633                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2634                         }
 2635                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
 2636                             m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
 2637                                 m->m_pkthdr.csum_data =
 2638                                     cur_rx->bge_tcp_udp_csum;
 2639                                 m->m_pkthdr.csum_flags |=
 2640                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 2641                         }
 2642                 }
 2643 
 2644                 /*
 2645                  * If we received a packet with a vlan tag,
 2646                  * attach that information to the packet.
 2647                  */
 2648                 if (have_tag) {
 2649                         VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
 2650                         if (m == NULL)
 2651                                 continue;
 2652                 }
 2653 
 2654                 BGE_UNLOCK(sc);
 2655                 (*ifp->if_input)(ifp, m);
 2656                 BGE_LOCK(sc);
 2657         }
 2658 
 2659         if (stdcnt > 0)
 2660                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
 2661                     sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
 2662         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 2663             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 2664                 if (jumbocnt > 0)
 2665                         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
 2666                             sc->bge_cdata.bge_rx_jumbo_ring_map,
 2667                             BUS_DMASYNC_PREWRITE);
 2668         }
 2669 
 2670         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
 2671         if (stdcnt)
 2672                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
 2673         if (jumbocnt)
 2674                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
 2675 }
 2676 
 2677 static void
 2678 bge_txeof(sc)
 2679         struct bge_softc *sc;
 2680 {
 2681         struct bge_tx_bd *cur_tx = NULL;
 2682         struct ifnet *ifp;
 2683 
 2684         BGE_LOCK_ASSERT(sc);
 2685 
 2686         /* Nothing to do */
 2687         if (sc->bge_tx_saved_considx ==
 2688             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
 2689                 return;
 2690 
 2691         ifp = sc->bge_ifp;
 2692 
 2693         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
 2694             sc->bge_cdata.bge_tx_ring_map,
 2695             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 2696         /*
 2697          * Go through our tx ring and free mbufs for those
 2698          * frames that have been sent.
 2699          */
 2700         while (sc->bge_tx_saved_considx !=
 2701             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
 2702                 u_int32_t               idx = 0;
 2703 
 2704                 idx = sc->bge_tx_saved_considx;
 2705                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
 2706                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
 2707                         ifp->if_opackets++;
 2708                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
 2709                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
 2710                             sc->bge_cdata.bge_tx_dmamap[idx],
 2711                             BUS_DMASYNC_POSTWRITE);
 2712                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
 2713                             sc->bge_cdata.bge_tx_dmamap[idx]);
 2714                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
 2715                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
 2716                 }
 2717                 sc->bge_txcnt--;
 2718                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
 2719                 ifp->if_timer = 0;
 2720         }
 2721 
 2722         if (cur_tx != NULL)
 2723                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2724 }
 2725 
 2726 #ifdef DEVICE_POLLING
 2727 static void
 2728 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2729 {
 2730         struct bge_softc *sc = ifp->if_softc;
 2731         
 2732         BGE_LOCK(sc);
 2733         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2734                 bge_poll_locked(ifp, cmd, count);
 2735         BGE_UNLOCK(sc);
 2736 }
 2737 
 2738 static void
 2739 bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2740 {
 2741         struct bge_softc *sc = ifp->if_softc;
 2742         uint32_t statusword;
 2743 
 2744         BGE_LOCK_ASSERT(sc);
 2745 
 2746         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2747             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
 2748 
 2749         statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
 2750 
 2751         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2752             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
 2753 
 2754         /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
 2755         if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
 2756                 sc->bge_link_evt++;
 2757 
 2758         if (cmd == POLL_AND_CHECK_STATUS)
 2759                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
 2760                     sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
 2761                     sc->bge_link_evt || sc->bge_tbi)
 2762                         bge_link_upd(sc);
 2763 
 2764         sc->rxcycles = count;
 2765         bge_rxeof(sc);
 2766         bge_txeof(sc);
 2767         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2768                 bge_start_locked(ifp);
 2769 }
 2770 #endif /* DEVICE_POLLING */
 2771 
 2772 static void
 2773 bge_intr(xsc)
 2774         void *xsc;
 2775 {
 2776         struct bge_softc *sc;
 2777         struct ifnet *ifp;
 2778         uint32_t statusword;
 2779 
 2780         sc = xsc;
 2781 
 2782         BGE_LOCK(sc);
 2783 
 2784         ifp = sc->bge_ifp;
 2785 
 2786 #ifdef DEVICE_POLLING
 2787         if (ifp->if_capenable & IFCAP_POLLING) {
 2788                 BGE_UNLOCK(sc);
 2789                 return;
 2790         }
 2791 #endif
 2792 
 2793         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2794             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
 2795 
 2796         statusword =
 2797             atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
 2798 
 2799         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
 2800             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
 2801 
 2802 #ifdef notdef
 2803         /* Avoid this for now -- checking this register is expensive. */
 2804         /* Make sure this is really our interrupt. */
 2805         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
 2806                 return;
 2807 #endif
 2808         /* Ack interrupt and stop others from occuring. */
 2809         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 2810 
 2811         if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
 2812             sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
 2813             statusword & BGE_STATFLAG_LINKSTATE_CHANGED || sc->bge_link_evt)
 2814                 bge_link_upd(sc);
 2815 
 2816         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2817                 /* Check RX return ring producer/consumer */
 2818                 bge_rxeof(sc);
 2819 
 2820                 /* Check TX ring producer/consumer */
 2821                 bge_txeof(sc);
 2822         }
 2823 
 2824         /* Re-enable interrupts. */
 2825         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 2826 
 2827         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 2828             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2829                 bge_start_locked(ifp);
 2830 
 2831         BGE_UNLOCK(sc);
 2832 
 2833         return;
 2834 }
 2835 
 2836 static void
 2837 bge_tick_locked(sc)
 2838         struct bge_softc *sc;
 2839 {
 2840         struct mii_data *mii = NULL;
 2841 
 2842         BGE_LOCK_ASSERT(sc);
 2843 
 2844         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 2845             sc->bge_asicrev == BGE_ASICREV_BCM5750)
 2846                 bge_stats_update_regs(sc);
 2847         else
 2848                 bge_stats_update(sc);
 2849 
 2850         if (!sc->bge_tbi) {
 2851                 mii = device_get_softc(sc->bge_miibus);
 2852                 mii_tick(mii);
 2853         } else {
 2854                 /*
 2855                  * Since in TBI mode auto-polling can't be used we should poll
 2856                  * link status manually. Here we register pending link event
 2857                  * and trigger interrupt.
 2858                  */
 2859 #ifdef DEVICE_POLLING
 2860                 /* In polling mode we poll link state in bge_poll_locked() */
 2861                 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
 2862 #endif
 2863                 {
 2864                 sc->bge_link_evt++;
 2865                 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
 2866                 }
 2867         }
 2868 
 2869         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 2870 }
 2871 
 2872 static void
 2873 bge_tick(xsc)
 2874         void *xsc;
 2875 {
 2876         struct bge_softc *sc;
 2877 
 2878         sc = xsc;
 2879 
 2880         BGE_LOCK(sc);
 2881         bge_tick_locked(sc);
 2882         BGE_UNLOCK(sc);
 2883 }
 2884 
 2885 static void
 2886 bge_stats_update_regs(sc)
 2887         struct bge_softc *sc;
 2888 {
 2889         struct ifnet *ifp;
 2890         struct bge_mac_stats_regs stats;
 2891         u_int32_t *s;
 2892         u_long cnt;                     /* current register value */
 2893         int i;
 2894 
 2895         ifp = sc->bge_ifp;
 2896 
 2897         s = (u_int32_t *)&stats;
 2898         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
 2899                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
 2900                 s++;
 2901         }
 2902 
 2903         cnt = stats.dot3StatsSingleCollisionFrames +
 2904             stats.dot3StatsMultipleCollisionFrames +
 2905             stats.dot3StatsExcessiveCollisions +
 2906             stats.dot3StatsLateCollisions;
 2907         ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
 2908             cnt - sc->bge_tx_collisions : cnt;
 2909         sc->bge_tx_collisions = cnt;
 2910 }
 2911 
 2912 static void
 2913 bge_stats_update(sc)
 2914         struct bge_softc *sc;
 2915 {
 2916         struct ifnet *ifp;
 2917         bus_size_t stats;
 2918         u_long cnt;                     /* current register value */
 2919 
 2920         ifp = sc->bge_ifp;
 2921 
 2922         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
 2923 
 2924 #define READ_STAT(sc, stats, stat) \
 2925         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
 2926 
 2927         cnt = READ_STAT(sc, stats,
 2928             txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
 2929         cnt += READ_STAT(sc, stats,
 2930             txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
 2931         cnt += READ_STAT(sc, stats,
 2932             txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
 2933         cnt += READ_STAT(sc, stats,
 2934                 txstats.dot3StatsLateCollisions.bge_addr_lo);
 2935         ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
 2936             cnt - sc->bge_tx_collisions : cnt;
 2937         sc->bge_tx_collisions = cnt;
 2938 
 2939         cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
 2940         ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
 2941             cnt - sc->bge_rx_discards : cnt;
 2942         sc->bge_rx_discards = cnt;
 2943 
 2944         cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
 2945         ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
 2946             cnt - sc->bge_tx_discards : cnt;
 2947         sc->bge_tx_discards = cnt;
 2948 
 2949 #undef READ_STAT
 2950 }
 2951 
 2952 /*
 2953  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
 2954  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
 2955  * but when such padded frames employ the bge IP/TCP checksum offload,
 2956  * the hardware checksum assist gives incorrect results (possibly
 2957  * from incorporating its own padding into the UDP/TCP checksum; who knows).
 2958  * If we pad such runts with zeros, the onboard checksum comes out correct.
 2959  */
 2960 static __inline int
 2961 bge_cksum_pad(struct mbuf *m)
 2962 {
 2963         int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
 2964         struct mbuf *last;
 2965 
 2966         /* If there's only the packet-header and we can pad there, use it. */
 2967         if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
 2968             M_TRAILINGSPACE(m) >= padlen) {
 2969                 last = m;
 2970         } else {
 2971                 /*
 2972                  * Walk packet chain to find last mbuf. We will either
 2973                  * pad there, or append a new mbuf and pad it.
 2974                  */
 2975                 for (last = m; last->m_next != NULL; last = last->m_next);
 2976                 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
 2977                         /* Allocate new empty mbuf, pad it. Compact later. */
 2978                         struct mbuf *n;
 2979 
 2980                         MGET(n, M_DONTWAIT, MT_DATA);
 2981                         if (n == NULL)
 2982                                 return (ENOBUFS);
 2983                         n->m_len = 0;
 2984                         last->m_next = n;
 2985                         last = n;
 2986                 }
 2987         }
 2988         
 2989         /* Now zero the pad area, to avoid the bge cksum-assist bug. */
 2990         memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
 2991         last->m_len += padlen;
 2992         m->m_pkthdr.len += padlen;
 2993 
 2994         return (0);
 2995 }
 2996 
 2997 /*
 2998  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
 2999  * pointers to descriptors.
 3000  */
 3001 static int
 3002 bge_encap(sc, m_head, txidx)
 3003         struct bge_softc *sc;
 3004         struct mbuf *m_head;
 3005         uint32_t *txidx;
 3006 {
 3007         bus_dma_segment_t       segs[BGE_NSEG_NEW];
 3008         bus_dmamap_t            map;
 3009         struct bge_tx_bd        *d = NULL;
 3010         struct m_tag            *mtag;
 3011         uint32_t                idx = *txidx;
 3012         uint16_t                csum_flags = 0;
 3013         int                     nsegs, i, error;
 3014 
 3015         if (m_head->m_pkthdr.csum_flags) {
 3016                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 3017                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
 3018                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
 3019                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
 3020                         if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
 3021                             bge_cksum_pad(m_head) != 0)
 3022                                 return (ENOBUFS);
 3023                 }
 3024                 if (m_head->m_flags & M_LASTFRAG)
 3025                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
 3026                 else if (m_head->m_flags & M_FRAG)
 3027                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
 3028         }
 3029 
 3030         mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
 3031 
 3032         map = sc->bge_cdata.bge_tx_dmamap[idx];
 3033         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
 3034             m_head, segs, &nsegs, BUS_DMA_NOWAIT);
 3035         if (error) {
 3036                 if (error == EFBIG) {
 3037                         struct mbuf *m0;
 3038 
 3039                         m0 = m_defrag(m_head, M_DONTWAIT);
 3040                         if (m0 == NULL)
 3041                                 return (ENOBUFS);
 3042                         m_head = m0;
 3043                         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
 3044                             map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
 3045                 }
 3046                 if (error)
 3047                         return (error); 
 3048         }
 3049 
 3050         /*
 3051          * Sanity check: avoid coming within 16 descriptors
 3052          * of the end of the ring.
 3053          */
 3054         if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
 3055                 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
 3056                 return (ENOBUFS);
 3057         }
 3058 
 3059         bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
 3060 
 3061         for (i = 0; ; i++) {
 3062                 d = &sc->bge_ldata.bge_tx_ring[idx];
 3063                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
 3064                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
 3065                 d->bge_len = segs[i].ds_len;
 3066                 d->bge_flags = csum_flags;
 3067                 if (i == nsegs - 1)
 3068                         break;
 3069                 BGE_INC(idx, BGE_TX_RING_CNT);
 3070         }
 3071 
 3072         /* Mark the last segment as end of packet... */
 3073         d->bge_flags |= BGE_TXBDFLAG_END;
 3074         /* ... and put VLAN tag into first segment.  */
 3075         d = &sc->bge_ldata.bge_tx_ring[*txidx];
 3076         if (mtag != NULL) {
 3077                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
 3078                 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
 3079         } else
 3080                 d->bge_vlan_tag = 0;
 3081 
 3082         /*
 3083          * Insure that the map for this transmission
 3084          * is placed at the array index of the last descriptor
 3085          * in this chain.
 3086          */
 3087         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
 3088         sc->bge_cdata.bge_tx_dmamap[idx] = map;
 3089         sc->bge_cdata.bge_tx_chain[idx] = m_head;
 3090         sc->bge_txcnt += nsegs;
 3091 
 3092         BGE_INC(idx, BGE_TX_RING_CNT);
 3093         *txidx = idx;
 3094 
 3095         return (0);
 3096 }
 3097 
 3098 /*
 3099  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3100  * to the mbuf data regions directly in the transmit descriptors.
 3101  */
 3102 static void
 3103 bge_start_locked(ifp)
 3104         struct ifnet *ifp;
 3105 {
 3106         struct bge_softc *sc;
 3107         struct mbuf *m_head = NULL;
 3108         uint32_t prodidx;
 3109         int count = 0;
 3110 
 3111         sc = ifp->if_softc;
 3112 
 3113         if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 3114                 return;
 3115 
 3116         prodidx = sc->bge_tx_prodidx;
 3117 
 3118         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
 3119                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 3120                 if (m_head == NULL)
 3121                         break;
 3122 
 3123                 /*
 3124                  * XXX
 3125                  * The code inside the if() block is never reached since we
 3126                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
 3127                  * requests to checksum TCP/UDP in a fragmented packet.
 3128                  *
 3129                  * XXX
 3130                  * safety overkill.  If this is a fragmented packet chain
 3131                  * with delayed TCP/UDP checksums, then only encapsulate
 3132                  * it if we have enough descriptors to handle the entire
 3133                  * chain at once.
 3134                  * (paranoia -- may not actually be needed)
 3135                  */
 3136                 if (m_head->m_flags & M_FIRSTFRAG &&
 3137                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
 3138                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
 3139                             m_head->m_pkthdr.csum_data + 16) {
 3140                                 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 3141                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 3142                                 break;
 3143                         }
 3144                 }
 3145 
 3146                 /*
 3147                  * Pack the data into the transmit ring. If we
 3148                  * don't have room, set the OACTIVE flag and wait
 3149                  * for the NIC to drain the ring.
 3150                  */
 3151                 if (bge_encap(sc, m_head, &prodidx)) {
 3152                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 3153                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 3154                         break;
 3155                 }
 3156                 ++count;
 3157 
 3158                 /*
 3159                  * If there's a BPF listener, bounce a copy of this frame
 3160                  * to him.
 3161                  */
 3162                 BPF_MTAP(ifp, m_head);
 3163         }
 3164 
 3165         if (count == 0) {
 3166                 /* no packets were dequeued */
 3167                 return;
 3168         }
 3169 
 3170         /* Transmit */
 3171         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3172         /* 5700 b2 errata */
 3173         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
 3174                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
 3175 
 3176         sc->bge_tx_prodidx = prodidx;
 3177 
 3178         /*
 3179          * Set a timeout in case the chip goes out to lunch.
 3180          */
 3181         ifp->if_timer = 5;
 3182 
 3183         return;
 3184 }
 3185 
 3186 /*
 3187  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3188  * to the mbuf data regions directly in the transmit descriptors.
 3189  */
 3190 static void
 3191 bge_start(ifp)
 3192         struct ifnet *ifp;
 3193 {
 3194         struct bge_softc *sc;
 3195 
 3196         sc = ifp->if_softc;
 3197         BGE_LOCK(sc);
 3198         bge_start_locked(ifp);
 3199         BGE_UNLOCK(sc);
 3200 }
 3201 
 3202 static void
 3203 bge_init_locked(sc)
 3204         struct bge_softc *sc;
 3205 {
 3206         struct ifnet *ifp;
 3207         u_int16_t *m;
 3208 
 3209         BGE_LOCK_ASSERT(sc);
 3210 
 3211         ifp = sc->bge_ifp;
 3212 
 3213         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3214                 return;
 3215 
 3216         /* Cancel pending I/O and flush buffers. */
 3217         bge_stop(sc);
 3218         bge_reset(sc);
 3219         bge_chipinit(sc);
 3220 
 3221         /*
 3222          * Init the various state machines, ring
 3223          * control blocks and firmware.
 3224          */
 3225         if (bge_blockinit(sc)) {
 3226                 device_printf(sc->bge_dev, "initialization failure\n");
 3227                 return;
 3228         }
 3229 
 3230         ifp = sc->bge_ifp;
 3231 
 3232         /* Specify MTU. */
 3233         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
 3234             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
 3235 
 3236         /* Load our MAC address. */
 3237         m = (u_int16_t *)&IFP2ENADDR(sc->bge_ifp)[0];
 3238         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
 3239         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
 3240 
 3241         /* Enable or disable promiscuous mode as needed. */
 3242         if (ifp->if_flags & IFF_PROMISC) {
 3243                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3244         } else {
 3245                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
 3246         }
 3247 
 3248         /* Program multicast filter. */
 3249         bge_setmulti(sc);
 3250 
 3251         /* Init RX ring. */
 3252         bge_init_rx_ring_std(sc);
 3253 
 3254         /*
 3255          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
 3256          * memory to insure that the chip has in fact read the first
 3257          * entry of the ring.
 3258          */
 3259         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
 3260                 u_int32_t               v, i;
 3261                 for (i = 0; i < 10; i++) {
 3262                         DELAY(20);
 3263                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
 3264                         if (v == (MCLBYTES - ETHER_ALIGN))
 3265                                 break;
 3266                 }
 3267                 if (i == 10)
 3268                         device_printf (sc->bge_dev,
 3269                             "5705 A0 chip failed to load RX ring\n");
 3270         }
 3271 
 3272         /* Init jumbo RX ring. */
 3273         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
 3274                 bge_init_rx_ring_jumbo(sc);
 3275 
 3276         /* Init our RX return ring index */
 3277         sc->bge_rx_saved_considx = 0;
 3278 
 3279         /* Init TX ring. */
 3280         bge_init_tx_ring(sc);
 3281 
 3282         /* Turn on transmitter */
 3283         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
 3284 
 3285         /* Turn on receiver */
 3286         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3287 
 3288         /* Tell firmware we're alive. */
 3289         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3290 
 3291 #ifdef DEVICE_POLLING
 3292         /* Disable interrupts if we are polling. */
 3293         if (ifp->if_capenable & IFCAP_POLLING) {
 3294                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
 3295                     BGE_PCIMISCCTL_MASK_PCI_INTR);
 3296                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3297                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
 3298                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
 3299         } else
 3300 #endif
 3301         
 3302         /* Enable host interrupts. */
 3303         {
 3304         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
 3305         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3306         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3307         }
 3308         
 3309         bge_ifmedia_upd(ifp);
 3310 
 3311         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 3312         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3313 
 3314         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
 3315 }
 3316 
 3317 static void
 3318 bge_init(xsc)
 3319         void *xsc;
 3320 {
 3321         struct bge_softc *sc = xsc;
 3322 
 3323         BGE_LOCK(sc);
 3324         bge_init_locked(sc);
 3325         BGE_UNLOCK(sc);
 3326 
 3327         return;
 3328 }
 3329 
 3330 /*
 3331  * Set media options.
 3332  */
 3333 static int
 3334 bge_ifmedia_upd(ifp)
 3335         struct ifnet *ifp;
 3336 {
 3337         struct bge_softc *sc;
 3338         struct mii_data *mii;
 3339         struct ifmedia *ifm;
 3340 
 3341         sc = ifp->if_softc;
 3342         ifm = &sc->bge_ifmedia;
 3343 
 3344         /* If this is a 1000baseX NIC, enable the TBI port. */
 3345         if (sc->bge_tbi) {
 3346                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 3347                         return(EINVAL);
 3348                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
 3349                 case IFM_AUTO:
 3350 #ifndef BGE_FAKE_AUTONEG
 3351                         /*
 3352                          * The BCM5704 ASIC appears to have a special
 3353                          * mechanism for programming the autoneg
 3354                          * advertisement registers in TBI mode.
 3355                          */
 3356                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
 3357                                 uint32_t sgdig;
 3358                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
 3359                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
 3360                                 sgdig |= BGE_SGDIGCFG_AUTO|
 3361                                     BGE_SGDIGCFG_PAUSE_CAP|
 3362                                     BGE_SGDIGCFG_ASYM_PAUSE;
 3363                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
 3364                                     sgdig|BGE_SGDIGCFG_SEND);
 3365                                 DELAY(5);
 3366                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
 3367                         }
 3368 #endif
 3369                         break;
 3370                 case IFM_1000_SX:
 3371                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3372                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
 3373                                     BGE_MACMODE_HALF_DUPLEX);
 3374                         } else {
 3375                                 BGE_SETBIT(sc, BGE_MAC_MODE,
 3376                                     BGE_MACMODE_HALF_DUPLEX);
 3377                         }
 3378                         break;
 3379                 default:
 3380                         return(EINVAL);
 3381                 }
 3382                 return(0);
 3383         }
 3384 
 3385         sc->bge_link_evt++;
 3386         mii = device_get_softc(sc->bge_miibus);
 3387         if (mii->mii_instance) {
 3388                 struct mii_softc *miisc;
 3389                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
 3390                     miisc = LIST_NEXT(miisc, mii_list))
 3391                         mii_phy_reset(miisc);
 3392         }
 3393         mii_mediachg(mii);
 3394 
 3395         return(0);
 3396 }
 3397 
 3398 /*
 3399  * Report current media status.
 3400  */
 3401 static void
 3402 bge_ifmedia_sts(ifp, ifmr)
 3403         struct ifnet *ifp;
 3404         struct ifmediareq *ifmr;
 3405 {
 3406         struct bge_softc *sc;
 3407         struct mii_data *mii;
 3408 
 3409         sc = ifp->if_softc;
 3410 
 3411         if (sc->bge_tbi) {
 3412                 ifmr->ifm_status = IFM_AVALID;
 3413                 ifmr->ifm_active = IFM_ETHER;
 3414                 if (CSR_READ_4(sc, BGE_MAC_STS) &
 3415                     BGE_MACSTAT_TBI_PCS_SYNCHED)
 3416                         ifmr->ifm_status |= IFM_ACTIVE;
 3417                 ifmr->ifm_active |= IFM_1000_SX;
 3418                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
 3419                         ifmr->ifm_active |= IFM_HDX;
 3420                 else
 3421                         ifmr->ifm_active |= IFM_FDX;
 3422                 return;
 3423         }
 3424 
 3425         mii = device_get_softc(sc->bge_miibus);
 3426         mii_pollstat(mii);
 3427         ifmr->ifm_active = mii->mii_media_active;
 3428         ifmr->ifm_status = mii->mii_media_status;
 3429 
 3430         return;
 3431 }
 3432 
 3433 static int
 3434 bge_ioctl(ifp, command, data)
 3435         struct ifnet *ifp;
 3436         u_long command;
 3437         caddr_t data;
 3438 {
 3439         struct bge_softc *sc = ifp->if_softc;
 3440         struct ifreq *ifr = (struct ifreq *) data;
 3441         int mask, error = 0;
 3442         struct mii_data *mii;
 3443 
 3444         switch(command) {
 3445         case SIOCSIFMTU:
 3446                 /* Disallow jumbo frames on 5705. */
 3447                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
 3448                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
 3449                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
 3450                         error = EINVAL;
 3451                 else {
 3452                         ifp->if_mtu = ifr->ifr_mtu;
 3453                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3454                         bge_init(sc);
 3455                 }
 3456                 break;
 3457         case SIOCSIFFLAGS:
 3458                 BGE_LOCK(sc);
 3459                 if (ifp->if_flags & IFF_UP) {
 3460                         /*
 3461                          * If only the state of the PROMISC flag changed,
 3462                          * then just use the 'set promisc mode' command
 3463                          * instead of reinitializing the entire NIC. Doing
 3464                          * a full re-init means reloading the firmware and
 3465                          * waiting for it to start up, which may take a
 3466                          * second or two.  Similarly for ALLMULTI.
 3467                          */
 3468                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3469                             ifp->if_flags & IFF_PROMISC &&
 3470                             !(sc->bge_if_flags & IFF_PROMISC)) {
 3471                                 BGE_SETBIT(sc, BGE_RX_MODE,
 3472                                     BGE_RXMODE_RX_PROMISC);
 3473                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3474                             !(ifp->if_flags & IFF_PROMISC) &&
 3475                             sc->bge_if_flags & IFF_PROMISC) {
 3476                                 BGE_CLRBIT(sc, BGE_RX_MODE,
 3477                                     BGE_RXMODE_RX_PROMISC);
 3478                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3479                             (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
 3480                                 bge_setmulti(sc);
 3481                         } else
 3482                                 bge_init_locked(sc);
 3483                 } else {
 3484                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3485                                 bge_stop(sc);
 3486                         }
 3487                 }
 3488                 sc->bge_if_flags = ifp->if_flags;
 3489                 BGE_UNLOCK(sc);
 3490                 error = 0;
 3491                 break;
 3492         case SIOCADDMULTI:
 3493         case SIOCDELMULTI:
 3494                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3495                         BGE_LOCK(sc);
 3496                         bge_setmulti(sc);
 3497                         BGE_UNLOCK(sc);
 3498                         error = 0;
 3499                 }
 3500                 break;
 3501         case SIOCSIFMEDIA:
 3502         case SIOCGIFMEDIA:
 3503                 if (sc->bge_tbi) {
 3504                         error = ifmedia_ioctl(ifp, ifr,
 3505                             &sc->bge_ifmedia, command);
 3506                 } else {
 3507                         mii = device_get_softc(sc->bge_miibus);
 3508                         error = ifmedia_ioctl(ifp, ifr,
 3509                             &mii->mii_media, command);
 3510                 }
 3511                 break;
 3512         case SIOCSIFCAP:
 3513                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3514 #ifdef DEVICE_POLLING
 3515                 if (mask & IFCAP_POLLING) {
 3516                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 3517                                 error = ether_poll_register(bge_poll, ifp);
 3518                                 if (error)
 3519                                         return(error);
 3520                                 BGE_LOCK(sc);
 3521                                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
 3522                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
 3523                                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3524                                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
 3525                                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
 3526                                 ifp->if_capenable |= IFCAP_POLLING;   
 3527                                 BGE_UNLOCK(sc);
 3528                         } else {
 3529                                 error = ether_poll_deregister(ifp);
 3530                                 /* Enable interrupt even in error case */
 3531                                 BGE_LOCK(sc);
 3532                                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
 3533                                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
 3534                                 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
 3535                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
 3536                                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
 3537                                 ifp->if_capenable &= ~IFCAP_POLLING;
 3538                                 BGE_UNLOCK(sc);
 3539                         }
 3540                 }
 3541 #endif
 3542                 if (mask & IFCAP_HWCSUM) {
 3543                         ifp->if_capenable ^= IFCAP_HWCSUM;
 3544                         if (IFCAP_HWCSUM & ifp->if_capenable &&
 3545                             IFCAP_HWCSUM & ifp->if_capabilities)
 3546                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
 3547                         else
 3548                                 ifp->if_hwassist = 0;
 3549                 }
 3550                 break;
 3551         default:
 3552                 error = ether_ioctl(ifp, command, data);
 3553                 break;
 3554         }
 3555 
 3556         return(error);
 3557 }
 3558 
 3559 static void
 3560 bge_watchdog(ifp)
 3561         struct ifnet *ifp;
 3562 {
 3563         struct bge_softc *sc;
 3564 
 3565         sc = ifp->if_softc;
 3566 
 3567         if_printf(ifp, "watchdog timeout -- resetting\n");
 3568 
 3569         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3570         bge_init(sc);
 3571 
 3572         ifp->if_oerrors++;
 3573 
 3574         return;
 3575 }
 3576 
 3577 /*
 3578  * Stop the adapter and free any mbufs allocated to the
 3579  * RX and TX lists.
 3580  */
 3581 static void
 3582 bge_stop(sc)
 3583         struct bge_softc *sc;
 3584 {
 3585         struct ifnet *ifp;
 3586         struct ifmedia_entry *ifm;
 3587         struct mii_data *mii = NULL;
 3588         int mtmp, itmp;
 3589 
 3590         BGE_LOCK_ASSERT(sc);
 3591 
 3592         ifp = sc->bge_ifp;
 3593 
 3594         if (!sc->bge_tbi)
 3595                 mii = device_get_softc(sc->bge_miibus);
 3596 
 3597         callout_stop(&sc->bge_stat_ch);
 3598 
 3599         /*
 3600          * Disable all of the receiver blocks
 3601          */
 3602         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
 3603         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
 3604         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
 3605         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3606             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3607                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
 3608         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
 3609         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
 3610         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
 3611 
 3612         /*
 3613          * Disable all of the transmit blocks
 3614          */
 3615         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
 3616         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
 3617         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
 3618         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
 3619         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
 3620         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3621             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3622                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
 3623         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
 3624 
 3625         /*
 3626          * Shut down all of the memory managers and related
 3627          * state machines.
 3628          */
 3629         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
 3630         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
 3631         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3632             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3633                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
 3634         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
 3635         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
 3636         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3637             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
 3638                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
 3639                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
 3640         }
 3641 
 3642         /* Disable host interrupts. */
 3643         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
 3644         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
 3645 
 3646         /*
 3647          * Tell firmware we're shutting down.
 3648          */
 3649         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
 3650 
 3651         /* Free the RX lists. */
 3652         bge_free_rx_ring_std(sc);
 3653 
 3654         /* Free jumbo RX list. */
 3655         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
 3656             sc->bge_asicrev != BGE_ASICREV_BCM5750)
 3657                 bge_free_rx_ring_jumbo(sc);
 3658 
 3659         /* Free TX buffers. */
 3660         bge_free_tx_ring(sc);
 3661 
 3662         /*
 3663          * Isolate/power down the PHY, but leave the media selection
 3664          * unchanged so that things will be put back to normal when
 3665          * we bring the interface back up.
 3666          */
 3667         if (!sc->bge_tbi) {
 3668                 itmp = ifp->if_flags;
 3669                 ifp->if_flags |= IFF_UP;
 3670                 /*
 3671                  * If we are called from bge_detach(), mii is already NULL.
 3672                  */
 3673                 if (mii != NULL) {
 3674                         ifm = mii->mii_media.ifm_cur;
 3675                         mtmp = ifm->ifm_media;
 3676                         ifm->ifm_media = IFM_ETHER|IFM_NONE;
 3677                         mii_mediachg(mii);
 3678                         ifm->ifm_media = mtmp;
 3679                 }
 3680                 ifp->if_flags = itmp;
 3681         }
 3682 
 3683         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
 3684 
 3685         /*
 3686          * We can't just call bge_link_upd() cause chip is almost stopped so
 3687          * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
 3688          * lead to hardware deadlock. So we just clearing MAC's link state
 3689          * (PHY may still have link UP).
 3690          */
 3691         if (bootverbose && sc->bge_link)
 3692                 if_printf(sc->bge_ifp, "link DOWN\n");
 3693         sc->bge_link = 0;
 3694 
 3695         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3696 }
 3697 
 3698 /*
 3699  * Stop all chip I/O so that the kernel's probe routines don't
 3700  * get confused by errant DMAs when rebooting.
 3701  */
 3702 static void
 3703 bge_shutdown(dev)
 3704         device_t dev;
 3705 {
 3706         struct bge_softc *sc;
 3707 
 3708         sc = device_get_softc(dev);
 3709 
 3710         BGE_LOCK(sc);
 3711         bge_stop(sc);
 3712         bge_reset(sc);
 3713         BGE_UNLOCK(sc);
 3714 
 3715         return;
 3716 }
 3717 
 3718 static int
 3719 bge_suspend(device_t dev)
 3720 {
 3721         struct bge_softc *sc;
 3722 
 3723         sc = device_get_softc(dev);
 3724         BGE_LOCK(sc);
 3725         bge_stop(sc);
 3726         BGE_UNLOCK(sc);
 3727 
 3728         return (0);
 3729 }
 3730 
 3731 static int
 3732 bge_resume(device_t dev)
 3733 {
 3734         struct bge_softc *sc;
 3735         struct ifnet *ifp;
 3736 
 3737         sc = device_get_softc(dev);
 3738         BGE_LOCK(sc);
 3739         ifp = sc->bge_ifp;
 3740         if (ifp->if_flags & IFF_UP) {
 3741                 bge_init_locked(sc);
 3742                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3743                         bge_start_locked(ifp);
 3744         }
 3745         BGE_UNLOCK(sc);
 3746 
 3747         return (0);
 3748 }
 3749 
 3750 static void
 3751 bge_link_upd(sc)
 3752         struct bge_softc *sc;
 3753 {
 3754         struct mii_data *mii;
 3755         uint32_t link, status;
 3756 
 3757         BGE_LOCK_ASSERT(sc);
 3758 
 3759         /* Clear 'pending link event' flag */
 3760         sc->bge_link_evt = 0;
 3761 
 3762         /*
 3763          * Process link state changes.
 3764          * Grrr. The link status word in the status block does
 3765          * not work correctly on the BCM5700 rev AX and BX chips,
 3766          * according to all available information. Hence, we have
 3767          * to enable MII interrupts in order to properly obtain
 3768          * async link changes. Unfortunately, this also means that
 3769          * we have to read the MAC status register to detect link
 3770          * changes, thereby adding an additional register access to
 3771          * the interrupt handler.
 3772          *
 3773          * XXX: perhaps link state detection procedure used for
 3774          * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions.
 3775          */
 3776 
 3777         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
 3778             sc->bge_chipid != BGE_CHIPID_BCM5700_B1) {
 3779                 status = CSR_READ_4(sc, BGE_MAC_STS);
 3780                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
 3781                         callout_stop(&sc->bge_stat_ch);
 3782                         bge_tick_locked(sc);
 3783 
 3784                         mii = device_get_softc(sc->bge_miibus);
 3785                         if (!sc->bge_link &&
 3786                             mii->mii_media_status & IFM_ACTIVE &&
 3787                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 3788                                 sc->bge_link++;
 3789                                 if (bootverbose)
 3790                                         if_printf(sc->bge_ifp, "link UP\n");
 3791                         } else if (sc->bge_link &&
 3792                             (!(mii->mii_media_status & IFM_ACTIVE) ||
 3793                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
 3794                                 sc->bge_link = 0;
 3795                                 if (bootverbose)
 3796                                         if_printf(sc->bge_ifp, "link DOWN\n");
 3797                         }
 3798 
 3799                         /* Clear the interrupt */
 3800                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
 3801                             BGE_EVTENB_MI_INTERRUPT);
 3802                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
 3803                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
 3804                             BRGPHY_INTRS);
 3805                 }
 3806                 return;
 3807         } 
 3808 
 3809         if (sc->bge_tbi) {
 3810                 status = CSR_READ_4(sc, BGE_MAC_STS);
 3811                 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
 3812                         if (!sc->bge_link) {
 3813                                 sc->bge_link++;
 3814                                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
 3815                                         BGE_CLRBIT(sc, BGE_MAC_MODE,
 3816                                             BGE_MACMODE_TBI_SEND_CFGS);
 3817                                 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
 3818                                 if (bootverbose)
 3819                                         if_printf(sc->bge_ifp, "link UP\n");
 3820                                 if_link_state_change(sc->bge_ifp, LINK_STATE_UP);
 3821                         }
 3822                 } else if (sc->bge_link) {
 3823                         sc->bge_link = 0;
 3824                         if (bootverbose)
 3825                                 if_printf(sc->bge_ifp, "link DOWN\n");
 3826                         if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
 3827                 }
 3828         /* Discard link events for MII/GMII cards if MI auto-polling disabled */
 3829         } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
 3830                 /* 
 3831                  * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
 3832                  * in status word always set. Workaround this bug by reading
 3833                  * PHY link status directly.
 3834                  */
 3835                 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
 3836 
 3837                 if (link != sc->bge_link ||
 3838                     sc->bge_asicrev == BGE_ASICREV_BCM5700) {
 3839                         callout_stop(&sc->bge_stat_ch);
 3840                         bge_tick_locked(sc);
 3841 
 3842                         mii = device_get_softc(sc->bge_miibus);
 3843                         if (!sc->bge_link &&
 3844                             mii->mii_media_status & IFM_ACTIVE &&
 3845                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 3846                                 sc->bge_link++;
 3847                                 if (bootverbose)
 3848                                         if_printf(sc->bge_ifp, "link UP\n");
 3849                         } else if (sc->bge_link &&
 3850                             (!(mii->mii_media_status & IFM_ACTIVE) ||
 3851                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
 3852                                 sc->bge_link = 0;
 3853                                 if (bootverbose)
 3854                                         if_printf(sc->bge_ifp, "link DOWN\n");
 3855                         }
 3856                 }
 3857         }
 3858 
 3859         /* Clear the attention */
 3860         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
 3861             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
 3862             BGE_MACSTAT_LINK_CHANGED);
 3863 }

Cache object: ac32b5f0045265ca576e9f168600b72e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.