The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bfe/if_bfe.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 Stuart Walsh<stu@ipng.org.uk>
    3  * and Duncan Barclay<dmlb@dmlb.org>
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include <sys/param.h>
   32 #include <sys/systm.h>
   33 #include <sys/sockio.h>
   34 #include <sys/mbuf.h>
   35 #include <sys/malloc.h>
   36 #include <sys/kernel.h>
   37 #include <sys/module.h>
   38 #include <sys/socket.h>
   39 #include <sys/queue.h>
   40 
   41 #include <net/if.h>
   42 #include <net/if_arp.h>
   43 #include <net/ethernet.h>
   44 #include <net/if_dl.h>
   45 #include <net/if_media.h>
   46 
   47 #include <net/bpf.h>
   48 
   49 #include <net/if_types.h>
   50 #include <net/if_vlan_var.h>
   51 
   52 #include <netinet/in_systm.h>
   53 #include <netinet/in.h>
   54 #include <netinet/ip.h>
   55 
   56 #include <machine/bus.h>
   57 #include <machine/resource.h>
   58 #include <sys/bus.h>
   59 #include <sys/rman.h>
   60 
   61 #include <dev/mii/mii.h>
   62 #include <dev/mii/miivar.h>
   63 #include "miidevs.h"
   64 
   65 #include <dev/pci/pcireg.h>
   66 #include <dev/pci/pcivar.h>
   67 
   68 #include <dev/bfe/if_bfereg.h>
   69 
   70 MODULE_DEPEND(bfe, pci, 1, 1, 1);
   71 MODULE_DEPEND(bfe, ether, 1, 1, 1);
   72 MODULE_DEPEND(bfe, miibus, 1, 1, 1);
   73 
   74 /* "device miibus" required.  See GENERIC if you get errors here. */
   75 #include "miibus_if.h"
   76 
   77 #define BFE_DEVDESC_MAX         64      /* Maximum device description length */
   78 
   79 static struct bfe_type bfe_devs[] = {
   80         { BCOM_VENDORID, BCOM_DEVICEID_BCM4401,
   81                 "Broadcom BCM4401 Fast Ethernet" },
   82         { BCOM_VENDORID, BCOM_DEVICEID_BCM4401B0,
   83                 "Broadcom BCM4401-B0 Fast Ethernet" },
   84                 { 0, 0, NULL }
   85 };
   86 
   87 static int  bfe_probe                           (device_t);
   88 static int  bfe_attach                          (device_t);
   89 static int  bfe_detach                          (device_t);
   90 static int  bfe_suspend                         (device_t);
   91 static int  bfe_resume                          (device_t);
   92 static void bfe_release_resources       (struct bfe_softc *);
   93 static void bfe_intr                            (void *);
   94 static void bfe_start                           (struct ifnet *);
   95 static void bfe_start_locked                    (struct ifnet *);
   96 static int  bfe_ioctl                           (struct ifnet *, u_long, caddr_t);
   97 static void bfe_init                            (void *);
   98 static void bfe_init_locked                     (void *);
   99 static void bfe_stop                            (struct bfe_softc *);
  100 static void bfe_watchdog                        (struct ifnet *);
  101 static void bfe_shutdown                        (device_t);
  102 static void bfe_tick                            (void *);
  103 static void bfe_txeof                           (struct bfe_softc *);
  104 static void bfe_rxeof                           (struct bfe_softc *);
  105 static void bfe_set_rx_mode                     (struct bfe_softc *);
  106 static int  bfe_list_rx_init            (struct bfe_softc *);
  107 static int  bfe_list_newbuf                     (struct bfe_softc *, int, struct mbuf*);
  108 static void bfe_rx_ring_free            (struct bfe_softc *);
  109 
  110 static void bfe_pci_setup                       (struct bfe_softc *, u_int32_t);
  111 static int  bfe_ifmedia_upd                     (struct ifnet *);
  112 static void bfe_ifmedia_sts                     (struct ifnet *, struct ifmediareq *);
  113 static int  bfe_miibus_readreg          (device_t, int, int);
  114 static int  bfe_miibus_writereg         (device_t, int, int, int);
  115 static void bfe_miibus_statchg          (device_t);
  116 static int  bfe_wait_bit                        (struct bfe_softc *, u_int32_t, u_int32_t,
  117                 u_long, const int);
  118 static void bfe_get_config                      (struct bfe_softc *sc);
  119 static void bfe_read_eeprom                     (struct bfe_softc *, u_int8_t *);
  120 static void bfe_stats_update            (struct bfe_softc *);
  121 static void bfe_clear_stats                     (struct bfe_softc *);
  122 static int  bfe_readphy                         (struct bfe_softc *, u_int32_t, u_int32_t*);
  123 static int  bfe_writephy                        (struct bfe_softc *, u_int32_t, u_int32_t);
  124 static int  bfe_resetphy                        (struct bfe_softc *);
  125 static int  bfe_setupphy                        (struct bfe_softc *);
  126 static void bfe_chip_reset                      (struct bfe_softc *);
  127 static void bfe_chip_halt                       (struct bfe_softc *);
  128 static void bfe_core_reset                      (struct bfe_softc *);
  129 static void bfe_core_disable            (struct bfe_softc *);
  130 static int  bfe_dma_alloc                       (device_t);
  131 static void bfe_dma_map_desc            (void *, bus_dma_segment_t *, int, int);
  132 static void bfe_dma_map                         (void *, bus_dma_segment_t *, int, int);
  133 static void bfe_cam_write                       (struct bfe_softc *, u_char *, int);
  134 
  135 static device_method_t bfe_methods[] = {
  136         /* Device interface */
  137         DEVMETHOD(device_probe,         bfe_probe),
  138         DEVMETHOD(device_attach,        bfe_attach),
  139         DEVMETHOD(device_detach,        bfe_detach),
  140         DEVMETHOD(device_shutdown,      bfe_shutdown),
  141         DEVMETHOD(device_suspend,       bfe_suspend),
  142         DEVMETHOD(device_resume,        bfe_resume),
  143 
  144         /* bus interface */
  145         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  146         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  147 
  148         /* MII interface */
  149         DEVMETHOD(miibus_readreg,       bfe_miibus_readreg),
  150         DEVMETHOD(miibus_writereg,      bfe_miibus_writereg),
  151         DEVMETHOD(miibus_statchg,       bfe_miibus_statchg),
  152 
  153         { 0, 0 }
  154 };
  155 
  156 static driver_t bfe_driver = {
  157         "bfe",
  158         bfe_methods,
  159         sizeof(struct bfe_softc)
  160 };
  161 
  162 static devclass_t bfe_devclass;
  163 
  164 DRIVER_MODULE(bfe, pci, bfe_driver, bfe_devclass, 0, 0);
  165 DRIVER_MODULE(miibus, bfe, miibus_driver, miibus_devclass, 0, 0);
  166 
  167 /*
  168  * Probe for a Broadcom 4401 chip.
  169  */
  170 static int
  171 bfe_probe(device_t dev)
  172 {
  173         struct bfe_type *t;
  174         struct bfe_softc *sc;
  175 
  176         t = bfe_devs;
  177 
  178         sc = device_get_softc(dev);
  179         bzero(sc, sizeof(struct bfe_softc));
  180         sc->bfe_unit = device_get_unit(dev);
  181         sc->bfe_dev = dev;
  182 
  183         while(t->bfe_name != NULL) {
  184                 if ((pci_get_vendor(dev) == t->bfe_vid) &&
  185                                 (pci_get_device(dev) == t->bfe_did)) {
  186                         device_set_desc_copy(dev, t->bfe_name);
  187                         return (BUS_PROBE_DEFAULT);
  188                 }
  189                 t++;
  190         }
  191 
  192         return (ENXIO);
  193 }
  194 
  195 static int
  196 bfe_dma_alloc(device_t dev)
  197 {
  198         struct bfe_softc *sc;
  199         int error, i;
  200 
  201         sc = device_get_softc(dev);
  202 
  203         /*
  204          * parent tag.  Apparently the chip cannot handle any DMA address
  205          * greater than 1GB.
  206          */
  207         error = bus_dma_tag_create(NULL,  /* parent */
  208                         4096, 0,                  /* alignment, boundary */
  209                         0x3FFFFFFF,               /* lowaddr */
  210                         BUS_SPACE_MAXADDR,        /* highaddr */
  211                         NULL, NULL,               /* filter, filterarg */
  212                         MAXBSIZE,                 /* maxsize */
  213                         BUS_SPACE_UNRESTRICTED,   /* num of segments */
  214                         BUS_SPACE_MAXSIZE_32BIT,  /* max segment size */
  215                         0,                        /* flags */
  216                         NULL, NULL,               /* lockfunc, lockarg */
  217                         &sc->bfe_parent_tag);
  218 
  219         /* tag for TX ring */
  220         error = bus_dma_tag_create(sc->bfe_parent_tag,
  221                         4096, 0,
  222                         BUS_SPACE_MAXADDR,
  223                         BUS_SPACE_MAXADDR,
  224                         NULL, NULL,
  225                         BFE_TX_LIST_SIZE,
  226                         1,
  227                         BUS_SPACE_MAXSIZE_32BIT,
  228                         0,
  229                         NULL, NULL,
  230                         &sc->bfe_tx_tag);
  231 
  232         if (error) {
  233                 device_printf(dev, "could not allocate dma tag\n");
  234                 return (ENOMEM);
  235         }
  236 
  237         /* tag for RX ring */
  238         error = bus_dma_tag_create(sc->bfe_parent_tag,
  239                         4096, 0,
  240                         BUS_SPACE_MAXADDR,
  241                         BUS_SPACE_MAXADDR,
  242                         NULL, NULL,
  243                         BFE_RX_LIST_SIZE,
  244                         1,
  245                         BUS_SPACE_MAXSIZE_32BIT,
  246                         0,
  247                         NULL, NULL,
  248                         &sc->bfe_rx_tag);
  249 
  250         if (error) {
  251                 device_printf(dev, "could not allocate dma tag\n");
  252                 return (ENOMEM);
  253         }
  254 
  255         /* tag for mbufs */
  256         error = bus_dma_tag_create(sc->bfe_parent_tag,
  257                         ETHER_ALIGN, 0,
  258                         BUS_SPACE_MAXADDR,
  259                         BUS_SPACE_MAXADDR,
  260                         NULL, NULL,
  261                         MCLBYTES,
  262                         1,
  263                         BUS_SPACE_MAXSIZE_32BIT,
  264                         BUS_DMA_ALLOCNOW,
  265                         NULL, NULL,
  266                         &sc->bfe_tag);
  267 
  268         if (error) {
  269                 device_printf(dev, "could not allocate dma tag\n");
  270                 return (ENOMEM);
  271         }
  272 
  273         /* pre allocate dmamaps for RX list */
  274         for (i = 0; i < BFE_RX_LIST_CNT; i++) {
  275                 error = bus_dmamap_create(sc->bfe_tag, 0,
  276                     &sc->bfe_rx_ring[i].bfe_map);
  277                 if (error) {
  278                         device_printf(dev, "cannot create DMA map for RX\n");
  279                         return (ENOMEM);
  280                 }
  281         }
  282 
  283         /* pre allocate dmamaps for TX list */
  284         for (i = 0; i < BFE_TX_LIST_CNT; i++) {
  285                 error = bus_dmamap_create(sc->bfe_tag, 0,
  286                     &sc->bfe_tx_ring[i].bfe_map);
  287                 if (error) {
  288                         device_printf(dev, "cannot create DMA map for TX\n");
  289                         return (ENOMEM);
  290                 }
  291         }
  292 
  293         /* Alloc dma for rx ring */
  294         error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list,
  295                         BUS_DMA_NOWAIT, &sc->bfe_rx_map);
  296 
  297         if(error)
  298                 return (ENOMEM);
  299 
  300         bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
  301         error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map,
  302                         sc->bfe_rx_list, sizeof(struct bfe_desc),
  303                         bfe_dma_map, &sc->bfe_rx_dma, BUS_DMA_NOWAIT);
  304 
  305         if(error)
  306                 return (ENOMEM);
  307 
  308         bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE);
  309 
  310         error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list,
  311                         BUS_DMA_NOWAIT, &sc->bfe_tx_map);
  312         if (error)
  313                 return (ENOMEM);
  314 
  315 
  316         error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map,
  317                         sc->bfe_tx_list, sizeof(struct bfe_desc),
  318                         bfe_dma_map, &sc->bfe_tx_dma, BUS_DMA_NOWAIT);
  319         if(error)
  320                 return (ENOMEM);
  321 
  322         bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
  323         bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE);
  324 
  325         return (0);
  326 }
  327 
  328 static int
  329 bfe_attach(device_t dev)
  330 {
  331         struct ifnet *ifp = NULL;
  332         struct bfe_softc *sc;
  333         int unit, error = 0, rid;
  334 
  335         sc = device_get_softc(dev);
  336         mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  337                         MTX_DEF);
  338 
  339         unit = device_get_unit(dev);
  340         sc->bfe_dev = dev;
  341         sc->bfe_unit = unit;
  342 
  343         /*
  344          * Map control/status registers.
  345          */
  346         pci_enable_busmaster(dev);
  347 
  348         rid = BFE_PCI_MEMLO;
  349         sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  350                         RF_ACTIVE);
  351         if (sc->bfe_res == NULL) {
  352                 printf ("bfe%d: couldn't map memory\n", unit);
  353                 error = ENXIO;
  354                 goto fail;
  355         }
  356 
  357         sc->bfe_btag = rman_get_bustag(sc->bfe_res);
  358         sc->bfe_bhandle = rman_get_bushandle(sc->bfe_res);
  359         sc->bfe_vhandle = (vm_offset_t)rman_get_virtual(sc->bfe_res);
  360 
  361         /* Allocate interrupt */
  362         rid = 0;
  363 
  364         sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  365                         RF_SHAREABLE | RF_ACTIVE);
  366         if (sc->bfe_irq == NULL) {
  367                 printf("bfe%d: couldn't map interrupt\n", unit);
  368                 error = ENXIO;
  369                 goto fail;
  370         }
  371 
  372         if (bfe_dma_alloc(dev)) {
  373                 printf("bfe%d: failed to allocate DMA resources\n",
  374                     sc->bfe_unit);
  375                 error = ENXIO;
  376                 goto fail;
  377         }
  378 
  379         /* Set up ifnet structure */
  380         ifp = sc->bfe_ifp = if_alloc(IFT_ETHER);
  381         if (ifp == NULL) {
  382                 printf("bfe%d: failed to if_alloc()\n", sc->bfe_unit);
  383                 error = ENOSPC;
  384                 goto fail;
  385         }
  386         ifp->if_softc = sc;
  387         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  388         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  389         ifp->if_ioctl = bfe_ioctl;
  390         ifp->if_start = bfe_start;
  391         ifp->if_watchdog = bfe_watchdog;
  392         ifp->if_init = bfe_init;
  393         ifp->if_mtu = ETHERMTU;
  394         IFQ_SET_MAXLEN(&ifp->if_snd, BFE_TX_QLEN);
  395         ifp->if_snd.ifq_drv_maxlen = BFE_TX_QLEN;
  396         IFQ_SET_READY(&ifp->if_snd);
  397 
  398         bfe_get_config(sc);
  399 
  400         /* Reset the chip and turn on the PHY */
  401         BFE_LOCK(sc);
  402         bfe_chip_reset(sc);
  403         BFE_UNLOCK(sc);
  404 
  405         if (mii_phy_probe(dev, &sc->bfe_miibus,
  406                                 bfe_ifmedia_upd, bfe_ifmedia_sts)) {
  407                 printf("bfe%d: MII without any PHY!\n", sc->bfe_unit);
  408                 error = ENXIO;
  409                 goto fail;
  410         }
  411 
  412         ether_ifattach(ifp, sc->bfe_enaddr);
  413         callout_handle_init(&sc->bfe_stat_ch);
  414 
  415         /*
  416          * Tell the upper layer(s) we support long frames.
  417          */
  418         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  419         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  420         ifp->if_capenable |= IFCAP_VLAN_MTU;
  421 
  422         /*
  423          * Hook interrupt last to avoid having to lock softc
  424          */
  425         error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
  426                         NULL, bfe_intr, sc, &sc->bfe_intrhand);
  427 
  428         if (error) {
  429                 printf("bfe%d: couldn't set up irq\n", unit);
  430                 goto fail;
  431         }
  432 fail:
  433         if (error)
  434                 bfe_release_resources(sc);
  435         return (error);
  436 }
  437 
  438 static int
  439 bfe_detach(device_t dev)
  440 {
  441         struct bfe_softc *sc;
  442         struct ifnet *ifp;
  443 
  444         sc = device_get_softc(dev);
  445 
  446         KASSERT(mtx_initialized(&sc->bfe_mtx), ("bfe mutex not initialized"));
  447         BFE_LOCK(sc);
  448 
  449         ifp = sc->bfe_ifp;
  450 
  451         if (device_is_attached(dev)) {
  452                 bfe_stop(sc);
  453                 ether_ifdetach(ifp);
  454         }
  455 
  456         bfe_chip_reset(sc);
  457 
  458         bus_generic_detach(dev);
  459         if(sc->bfe_miibus != NULL)
  460                 device_delete_child(dev, sc->bfe_miibus);
  461 
  462         bfe_release_resources(sc);
  463         BFE_UNLOCK(sc);
  464         mtx_destroy(&sc->bfe_mtx);
  465 
  466         return (0);
  467 }
  468 
  469 /*
  470  * Stop all chip I/O so that the kernel's probe routines don't
  471  * get confused by errant DMAs when rebooting.
  472  */
  473 static void
  474 bfe_shutdown(device_t dev)
  475 {
  476         struct bfe_softc *sc;
  477 
  478         sc = device_get_softc(dev);
  479         BFE_LOCK(sc);
  480         bfe_stop(sc);
  481 
  482         BFE_UNLOCK(sc);
  483         return;
  484 }
  485 
  486 static int
  487 bfe_suspend(device_t dev)
  488 {
  489         struct bfe_softc *sc;
  490 
  491         sc = device_get_softc(dev);
  492         BFE_LOCK(sc);
  493         bfe_stop(sc);
  494         BFE_UNLOCK(sc);
  495 
  496         return (0);
  497 }
  498 
  499 static int
  500 bfe_resume(device_t dev)
  501 {
  502         struct bfe_softc *sc;
  503         struct ifnet *ifp;
  504 
  505         sc = device_get_softc(dev);
  506         ifp = sc->bfe_ifp;
  507         BFE_LOCK(sc);
  508         bfe_chip_reset(sc);
  509         if (ifp->if_flags & IFF_UP) {
  510                 bfe_init_locked(sc);
  511                 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
  512                     !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
  513                         bfe_start_locked(ifp);
  514         }
  515         BFE_UNLOCK(sc);
  516 
  517         return (0);
  518 }
  519 
  520 static int
  521 bfe_miibus_readreg(device_t dev, int phy, int reg)
  522 {
  523         struct bfe_softc *sc;
  524         u_int32_t ret;
  525 
  526         sc = device_get_softc(dev);
  527         if(phy != sc->bfe_phyaddr)
  528                 return (0);
  529         bfe_readphy(sc, reg, &ret);
  530 
  531         return (ret);
  532 }
  533 
  534 static int
  535 bfe_miibus_writereg(device_t dev, int phy, int reg, int val)
  536 {
  537         struct bfe_softc *sc;
  538 
  539         sc = device_get_softc(dev);
  540         if(phy != sc->bfe_phyaddr)
  541                 return (0);
  542         bfe_writephy(sc, reg, val);
  543 
  544         return (0);
  545 }
  546 
  547 static void
  548 bfe_miibus_statchg(device_t dev)
  549 {
  550         return;
  551 }
  552 
  553 static void
  554 bfe_tx_ring_free(struct bfe_softc *sc)
  555 {
  556         int i;
  557 
  558         for(i = 0; i < BFE_TX_LIST_CNT; i++) {
  559                 if(sc->bfe_tx_ring[i].bfe_mbuf != NULL) {
  560                         m_freem(sc->bfe_tx_ring[i].bfe_mbuf);
  561                         sc->bfe_tx_ring[i].bfe_mbuf = NULL;
  562                         bus_dmamap_unload(sc->bfe_tag,
  563                                         sc->bfe_tx_ring[i].bfe_map);
  564                 }
  565         }
  566         bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
  567         bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE);
  568 }
  569 
  570 static void
  571 bfe_rx_ring_free(struct bfe_softc *sc)
  572 {
  573         int i;
  574 
  575         for (i = 0; i < BFE_RX_LIST_CNT; i++) {
  576                 if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) {
  577                         m_freem(sc->bfe_rx_ring[i].bfe_mbuf);
  578                         sc->bfe_rx_ring[i].bfe_mbuf = NULL;
  579                         bus_dmamap_unload(sc->bfe_tag,
  580                                         sc->bfe_rx_ring[i].bfe_map);
  581                 }
  582         }
  583         bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
  584         bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE);
  585 }
  586 
  587 static int
  588 bfe_list_rx_init(struct bfe_softc *sc)
  589 {
  590         int i;
  591 
  592         for(i = 0; i < BFE_RX_LIST_CNT; i++) {
  593                 if(bfe_list_newbuf(sc, i, NULL) == ENOBUFS)
  594                         return (ENOBUFS);
  595         }
  596 
  597         bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE);
  598         CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc)));
  599 
  600         sc->bfe_rx_cons = 0;
  601 
  602         return (0);
  603 }
  604 
  605 static int
  606 bfe_list_newbuf(struct bfe_softc *sc, int c, struct mbuf *m)
  607 {
  608         struct bfe_rxheader *rx_header;
  609         struct bfe_desc *d;
  610         struct bfe_data *r;
  611         u_int32_t ctrl;
  612         int error;
  613 
  614         if ((c < 0) || (c >= BFE_RX_LIST_CNT))
  615                 return (EINVAL);
  616 
  617         if(m == NULL) {
  618                 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  619                 if(m == NULL)
  620                         return (ENOBUFS);
  621                 m->m_len = m->m_pkthdr.len = MCLBYTES;
  622         }
  623         else
  624                 m->m_data = m->m_ext.ext_buf;
  625 
  626         rx_header = mtod(m, struct bfe_rxheader *);
  627         rx_header->len = 0;
  628         rx_header->flags = 0;
  629 
  630         /* Map the mbuf into DMA */
  631         sc->bfe_rx_cnt = c;
  632         d = &sc->bfe_rx_list[c];
  633         r = &sc->bfe_rx_ring[c];
  634         error = bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void *),
  635                         MCLBYTES, bfe_dma_map_desc, d, BUS_DMA_NOWAIT);
  636         if (error)
  637                 printf("Serious error: bfe failed to map RX buffer\n");
  638         bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREWRITE);
  639 
  640         ctrl = ETHER_MAX_LEN + 32;
  641 
  642         if(c == BFE_RX_LIST_CNT - 1)
  643                 ctrl |= BFE_DESC_EOT;
  644 
  645         d->bfe_ctrl = ctrl;
  646         r->bfe_mbuf = m;
  647         bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE);
  648         return (0);
  649 }
  650 
  651 static void
  652 bfe_get_config(struct bfe_softc *sc)
  653 {
  654         u_int8_t eeprom[128];
  655 
  656         bfe_read_eeprom(sc, eeprom);
  657 
  658         sc->bfe_enaddr[0] = eeprom[79];
  659         sc->bfe_enaddr[1] = eeprom[78];
  660         sc->bfe_enaddr[2] = eeprom[81];
  661         sc->bfe_enaddr[3] = eeprom[80];
  662         sc->bfe_enaddr[4] = eeprom[83];
  663         sc->bfe_enaddr[5] = eeprom[82];
  664 
  665         sc->bfe_phyaddr = eeprom[90] & 0x1f;
  666         sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1;
  667 
  668         sc->bfe_core_unit = 0;
  669         sc->bfe_dma_offset = BFE_PCI_DMA;
  670 }
  671 
  672 static void
  673 bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores)
  674 {
  675         u_int32_t bar_orig, pci_rev, val;
  676 
  677         bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4);
  678         pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4);
  679         pci_rev = CSR_READ_4(sc, BFE_SBIDHIGH) & BFE_RC_MASK;
  680 
  681         val = CSR_READ_4(sc, BFE_SBINTVEC);
  682         val |= cores;
  683         CSR_WRITE_4(sc, BFE_SBINTVEC, val);
  684 
  685         val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2);
  686         val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
  687         CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val);
  688 
  689         pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4);
  690 }
  691 
  692 static void
  693 bfe_clear_stats(struct bfe_softc *sc)
  694 {
  695         u_long reg;
  696 
  697         BFE_LOCK_ASSERT(sc);
  698 
  699         CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
  700         for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
  701                 CSR_READ_4(sc, reg);
  702         for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
  703                 CSR_READ_4(sc, reg);
  704 }
  705 
  706 static int
  707 bfe_resetphy(struct bfe_softc *sc)
  708 {
  709         u_int32_t val;
  710 
  711         bfe_writephy(sc, 0, BMCR_RESET);
  712         DELAY(100);
  713         bfe_readphy(sc, 0, &val);
  714         if (val & BMCR_RESET) {
  715                 printf("bfe%d: PHY Reset would not complete.\n", sc->bfe_unit);
  716                 return (ENXIO);
  717         }
  718         return (0);
  719 }
  720 
  721 static void
  722 bfe_chip_halt(struct bfe_softc *sc)
  723 {
  724         BFE_LOCK_ASSERT(sc);
  725         /* disable interrupts - not that it actually does..*/
  726         CSR_WRITE_4(sc, BFE_IMASK, 0);
  727         CSR_READ_4(sc, BFE_IMASK);
  728 
  729         CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
  730         bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1);
  731 
  732         CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
  733         CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
  734         DELAY(10);
  735 }
  736 
  737 static void
  738 bfe_chip_reset(struct bfe_softc *sc)
  739 {
  740         u_int32_t val;
  741 
  742         BFE_LOCK_ASSERT(sc);
  743 
  744         /* Set the interrupt vector for the enet core */
  745         bfe_pci_setup(sc, BFE_INTVEC_ENET0);
  746 
  747         /* is core up? */
  748         val = CSR_READ_4(sc, BFE_SBTMSLOW) &
  749             (BFE_RESET | BFE_REJECT | BFE_CLOCK);
  750         if (val == BFE_CLOCK) {
  751                 /* It is, so shut it down */
  752                 CSR_WRITE_4(sc, BFE_RCV_LAZY, 0);
  753                 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
  754                 bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1);
  755                 CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
  756                 sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0;
  757                 if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK)
  758                         bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE,
  759                             100, 0);
  760                 CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
  761                 sc->bfe_rx_prod = sc->bfe_rx_cons = 0;
  762         }
  763 
  764         bfe_core_reset(sc);
  765         bfe_clear_stats(sc);
  766 
  767         /*
  768          * We want the phy registers to be accessible even when
  769          * the driver is "downed" so initialize MDC preamble, frequency,
  770          * and whether internal or external phy here.
  771          */
  772 
  773         /* 4402 has 62.5Mhz SB clock and internal phy */
  774         CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d);
  775 
  776         /* Internal or external PHY? */
  777         val = CSR_READ_4(sc, BFE_DEVCTRL);
  778         if(!(val & BFE_IPP))
  779                 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL);
  780         else if(CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) {
  781                 BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR);
  782                 DELAY(100);
  783         }
  784 
  785         /* Enable CRC32 generation and set proper LED modes */
  786         BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
  787 
  788         /* Reset or clear powerdown control bit  */
  789         BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
  790 
  791         CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
  792                                 BFE_LAZY_FC_MASK));
  793 
  794         /*
  795          * We don't want lazy interrupts, so just send them at
  796          * the end of a frame, please
  797          */
  798         BFE_OR(sc, BFE_RCV_LAZY, 0);
  799 
  800         /* Set max lengths, accounting for VLAN tags */
  801         CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32);
  802         CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32);
  803 
  804         /* Set watermark XXX - magic */
  805         CSR_WRITE_4(sc, BFE_TX_WMARK, 56);
  806 
  807         /*
  808          * Initialise DMA channels
  809          * - not forgetting dma addresses need to be added to BFE_PCI_DMA
  810          */
  811         CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
  812         CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA);
  813 
  814         CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) |
  815                         BFE_RX_CTRL_ENABLE);
  816         CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA);
  817 
  818         bfe_resetphy(sc);
  819         bfe_setupphy(sc);
  820 }
  821 
  822 static void
  823 bfe_core_disable(struct bfe_softc *sc)
  824 {
  825         if((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET)
  826                 return;
  827 
  828         /*
  829          * Set reject, wait for it set, then wait for the core to stop
  830          * being busy, then set reset and reject and enable the clocks.
  831          */
  832         CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
  833         bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0);
  834         bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1);
  835         CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT |
  836                                 BFE_RESET));
  837         CSR_READ_4(sc, BFE_SBTMSLOW);
  838         DELAY(10);
  839         /* Leave reset and reject set */
  840         CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
  841         DELAY(10);
  842 }
  843 
  844 static void
  845 bfe_core_reset(struct bfe_softc *sc)
  846 {
  847         u_int32_t val;
  848 
  849         /* Disable the core */
  850         bfe_core_disable(sc);
  851 
  852         /* and bring it back up */
  853         CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
  854         CSR_READ_4(sc, BFE_SBTMSLOW);
  855         DELAY(10);
  856 
  857         /* Chip bug, clear SERR, IB and TO if they are set. */
  858         if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR)
  859                 CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0);
  860         val = CSR_READ_4(sc, BFE_SBIMSTATE);
  861         if (val & (BFE_IBE | BFE_TO))
  862                 CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
  863 
  864         /* Clear reset and allow it to move through the core */
  865         CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
  866         CSR_READ_4(sc, BFE_SBTMSLOW);
  867         DELAY(10);
  868 
  869         /* Leave the clock set */
  870         CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK);
  871         CSR_READ_4(sc, BFE_SBTMSLOW);
  872         DELAY(10);
  873 }
  874 
  875 static void
  876 bfe_cam_write(struct bfe_softc *sc, u_char *data, int index)
  877 {
  878         u_int32_t val;
  879 
  880         val  = ((u_int32_t) data[2]) << 24;
  881         val |= ((u_int32_t) data[3]) << 16;
  882         val |= ((u_int32_t) data[4]) <<  8;
  883         val |= ((u_int32_t) data[5]);
  884         CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val);
  885         val = (BFE_CAM_HI_VALID |
  886                         (((u_int32_t) data[0]) << 8) |
  887                         (((u_int32_t) data[1])));
  888         CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val);
  889         CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE |
  890                                 ((u_int32_t) index << BFE_CAM_INDEX_SHIFT)));
  891         bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1);
  892 }
  893 
  894 static void
  895 bfe_set_rx_mode(struct bfe_softc *sc)
  896 {
  897         struct ifnet *ifp = sc->bfe_ifp;
  898         struct ifmultiaddr  *ifma;
  899         u_int32_t val;
  900         int i = 0;
  901 
  902         val = CSR_READ_4(sc, BFE_RXCONF);
  903 
  904         if (ifp->if_flags & IFF_PROMISC)
  905                 val |= BFE_RXCONF_PROMISC;
  906         else
  907                 val &= ~BFE_RXCONF_PROMISC;
  908 
  909         if (ifp->if_flags & IFF_BROADCAST)
  910                 val &= ~BFE_RXCONF_DBCAST;
  911         else
  912                 val |= BFE_RXCONF_DBCAST;
  913 
  914 
  915         CSR_WRITE_4(sc, BFE_CAM_CTRL, 0);
  916         bfe_cam_write(sc, IF_LLADDR(sc->bfe_ifp), i++);
  917 
  918         if (ifp->if_flags & IFF_ALLMULTI)
  919                 val |= BFE_RXCONF_ALLMULTI;
  920         else {
  921                 val &= ~BFE_RXCONF_ALLMULTI;
  922                 IF_ADDR_LOCK(ifp);
  923                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  924                         if (ifma->ifma_addr->sa_family != AF_LINK)
  925                                 continue;
  926                         bfe_cam_write(sc,
  927                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i++);
  928                 }
  929                 IF_ADDR_UNLOCK(ifp);
  930         }
  931 
  932         CSR_WRITE_4(sc, BFE_RXCONF, val);
  933         BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE);
  934 }
  935 
  936 static void
  937 bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  938 {
  939         u_int32_t *ptr;
  940 
  941         ptr = arg;
  942         *ptr = segs->ds_addr;
  943 }
  944 
  945 static void
  946 bfe_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  947 {
  948         struct bfe_desc *d;
  949 
  950         d = arg;
  951         /* The chip needs all addresses to be added to BFE_PCI_DMA */
  952         d->bfe_addr = segs->ds_addr + BFE_PCI_DMA;
  953 }
  954 
  955 static void
  956 bfe_release_resources(struct bfe_softc *sc)
  957 {
  958         device_t dev;
  959         int i;
  960 
  961         dev = sc->bfe_dev;
  962 
  963         if (sc->bfe_vpd_prodname != NULL)
  964                 free(sc->bfe_vpd_prodname, M_DEVBUF);
  965 
  966         if (sc->bfe_vpd_readonly != NULL)
  967                 free(sc->bfe_vpd_readonly, M_DEVBUF);
  968 
  969         if (sc->bfe_intrhand != NULL)
  970                 bus_teardown_intr(dev, sc->bfe_irq, sc->bfe_intrhand);
  971 
  972         if (sc->bfe_irq != NULL)
  973                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bfe_irq);
  974 
  975         if (sc->bfe_res != NULL)
  976                 bus_release_resource(dev, SYS_RES_MEMORY, 0x10, sc->bfe_res);
  977 
  978         if (sc->bfe_ifp != NULL)
  979                 if_free(sc->bfe_ifp);
  980 
  981         if(sc->bfe_tx_tag != NULL) {
  982                 bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map);
  983                 bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list,
  984                     sc->bfe_tx_map);
  985                 bus_dma_tag_destroy(sc->bfe_tx_tag);
  986                 sc->bfe_tx_tag = NULL;
  987         }
  988 
  989         if(sc->bfe_rx_tag != NULL) {
  990                 bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map);
  991                 bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list,
  992                     sc->bfe_rx_map);
  993                 bus_dma_tag_destroy(sc->bfe_rx_tag);
  994                 sc->bfe_rx_tag = NULL;
  995         }
  996 
  997         if(sc->bfe_tag != NULL) {
  998                 for(i = 0; i < BFE_TX_LIST_CNT; i++) {
  999                         bus_dmamap_destroy(sc->bfe_tag,
 1000                             sc->bfe_tx_ring[i].bfe_map);
 1001                 }
 1002                 for(i = 0; i < BFE_RX_LIST_CNT; i++) {
 1003                         bus_dmamap_destroy(sc->bfe_tag,
 1004                             sc->bfe_rx_ring[i].bfe_map);
 1005                 }
 1006                 bus_dma_tag_destroy(sc->bfe_tag);
 1007                 sc->bfe_tag = NULL;
 1008         }
 1009 
 1010         if(sc->bfe_parent_tag != NULL)
 1011                 bus_dma_tag_destroy(sc->bfe_parent_tag);
 1012 
 1013         return;
 1014 }
 1015 
 1016 static void
 1017 bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data)
 1018 {
 1019         long i;
 1020         u_int16_t *ptr = (u_int16_t *)data;
 1021 
 1022         for(i = 0; i < 128; i += 2)
 1023                 ptr[i/2] = CSR_READ_4(sc, 4096 + i);
 1024 }
 1025 
 1026 static int
 1027 bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit,
 1028                 u_long timeout, const int clear)
 1029 {
 1030         u_long i;
 1031 
 1032         for (i = 0; i < timeout; i++) {
 1033                 u_int32_t val = CSR_READ_4(sc, reg);
 1034 
 1035                 if (clear && !(val & bit))
 1036                         break;
 1037                 if (!clear && (val & bit))
 1038                         break;
 1039                 DELAY(10);
 1040         }
 1041         if (i == timeout) {
 1042                 printf("bfe%d: BUG!  Timeout waiting for bit %08x of register "
 1043                                 "%x to %s.\n", sc->bfe_unit, bit, reg,
 1044                                 (clear ? "clear" : "set"));
 1045                 return (-1);
 1046         }
 1047         return (0);
 1048 }
 1049 
 1050 static int
 1051 bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val)
 1052 {
 1053         int err;
 1054 
 1055         /* Clear MII ISR */
 1056         CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
 1057         CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
 1058                                 (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
 1059                                 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
 1060                                 (reg << BFE_MDIO_RA_SHIFT) |
 1061                                 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
 1062         err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
 1063         *val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA;
 1064 
 1065         return (err);
 1066 }
 1067 
 1068 static int
 1069 bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val)
 1070 {
 1071         int status;
 1072 
 1073         CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
 1074         CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
 1075                                 (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
 1076                                 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
 1077                                 (reg << BFE_MDIO_RA_SHIFT) |
 1078                                 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
 1079                                 (val & BFE_MDIO_DATA_DATA)));
 1080         status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
 1081 
 1082         return (status);
 1083 }
 1084 
 1085 /*
 1086  * XXX - I think this is handled by the PHY driver, but it can't hurt to do it
 1087  * twice
 1088  */
 1089 static int
 1090 bfe_setupphy(struct bfe_softc *sc)
 1091 {
 1092         u_int32_t val;
 1093 
 1094         /* Enable activity LED */
 1095         bfe_readphy(sc, 26, &val);
 1096         bfe_writephy(sc, 26, val & 0x7fff);
 1097         bfe_readphy(sc, 26, &val);
 1098 
 1099         /* Enable traffic meter LED mode */
 1100         bfe_readphy(sc, 27, &val);
 1101         bfe_writephy(sc, 27, val | (1 << 6));
 1102 
 1103         return (0);
 1104 }
 1105 
 1106 static void
 1107 bfe_stats_update(struct bfe_softc *sc)
 1108 {
 1109         u_long reg;
 1110         u_int32_t *val;
 1111 
 1112         val = &sc->bfe_hwstats.tx_good_octets;
 1113         for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) {
 1114                 *val++ += CSR_READ_4(sc, reg);
 1115         }
 1116         val = &sc->bfe_hwstats.rx_good_octets;
 1117         for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) {
 1118                 *val++ += CSR_READ_4(sc, reg);
 1119         }
 1120 }
 1121 
 1122 static void
 1123 bfe_txeof(struct bfe_softc *sc)
 1124 {
 1125         struct ifnet *ifp;
 1126         int i, chipidx;
 1127 
 1128         BFE_LOCK_ASSERT(sc);
 1129 
 1130         ifp = sc->bfe_ifp;
 1131 
 1132         chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
 1133         chipidx /= sizeof(struct bfe_desc);
 1134 
 1135         i = sc->bfe_tx_cons;
 1136         /* Go through the mbufs and free those that have been transmitted */
 1137         while(i != chipidx) {
 1138                 struct bfe_data *r = &sc->bfe_tx_ring[i];
 1139                 if(r->bfe_mbuf != NULL) {
 1140                         ifp->if_opackets++;
 1141                         m_freem(r->bfe_mbuf);
 1142                         r->bfe_mbuf = NULL;
 1143                 }
 1144                 bus_dmamap_unload(sc->bfe_tag, r->bfe_map);
 1145                 sc->bfe_tx_cnt--;
 1146                 BFE_INC(i, BFE_TX_LIST_CNT);
 1147         }
 1148 
 1149         if(i != sc->bfe_tx_cons) {
 1150                 /* we freed up some mbufs */
 1151                 sc->bfe_tx_cons = i;
 1152                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1153         }
 1154         if(sc->bfe_tx_cnt == 0)
 1155                 ifp->if_timer = 0;
 1156         else
 1157                 ifp->if_timer = 5;
 1158 }
 1159 
 1160 /* Pass a received packet up the stack */
 1161 static void
 1162 bfe_rxeof(struct bfe_softc *sc)
 1163 {
 1164         struct mbuf *m;
 1165         struct ifnet *ifp;
 1166         struct bfe_rxheader *rxheader;
 1167         struct bfe_data *r;
 1168         int cons;
 1169         u_int32_t status, current, len, flags;
 1170 
 1171         BFE_LOCK_ASSERT(sc);
 1172         cons = sc->bfe_rx_cons;
 1173         status = CSR_READ_4(sc, BFE_DMARX_STAT);
 1174         current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc);
 1175 
 1176         ifp = sc->bfe_ifp;
 1177 
 1178         while(current != cons) {
 1179                 r = &sc->bfe_rx_ring[cons];
 1180                 m = r->bfe_mbuf;
 1181                 rxheader = mtod(m, struct bfe_rxheader*);
 1182                 bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_POSTREAD);
 1183                 len = rxheader->len;
 1184                 r->bfe_mbuf = NULL;
 1185 
 1186                 bus_dmamap_unload(sc->bfe_tag, r->bfe_map);
 1187                 flags = rxheader->flags;
 1188 
 1189                 len -= ETHER_CRC_LEN;
 1190 
 1191                 /* flag an error and try again */
 1192                 if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) {
 1193                         ifp->if_ierrors++;
 1194                         if (flags & BFE_RX_FLAG_SERR)
 1195                                 ifp->if_collisions++;
 1196                         bfe_list_newbuf(sc, cons, m);
 1197                         BFE_INC(cons, BFE_RX_LIST_CNT);
 1198                         continue;
 1199                 }
 1200 
 1201                 /* Go past the rx header */
 1202                 if (bfe_list_newbuf(sc, cons, NULL) == 0) {
 1203                         m_adj(m, BFE_RX_OFFSET);
 1204                         m->m_len = m->m_pkthdr.len = len;
 1205                 } else {
 1206                         bfe_list_newbuf(sc, cons, m);
 1207                         ifp->if_ierrors++;
 1208                         BFE_INC(cons, BFE_RX_LIST_CNT);
 1209                         continue;
 1210                 }
 1211 
 1212                 ifp->if_ipackets++;
 1213                 m->m_pkthdr.rcvif = ifp;
 1214                 BFE_UNLOCK(sc);
 1215                 (*ifp->if_input)(ifp, m);
 1216                 BFE_LOCK(sc);
 1217 
 1218                 BFE_INC(cons, BFE_RX_LIST_CNT);
 1219         }
 1220         sc->bfe_rx_cons = cons;
 1221 }
 1222 
 1223 static void
 1224 bfe_intr(void *xsc)
 1225 {
 1226         struct bfe_softc *sc = xsc;
 1227         struct ifnet *ifp;
 1228         u_int32_t istat, imask, flag;
 1229 
 1230         ifp = sc->bfe_ifp;
 1231 
 1232         BFE_LOCK(sc);
 1233 
 1234         istat = CSR_READ_4(sc, BFE_ISTAT);
 1235         imask = CSR_READ_4(sc, BFE_IMASK);
 1236 
 1237         /*
 1238          * Defer unsolicited interrupts - This is necessary because setting the
 1239          * chips interrupt mask register to 0 doesn't actually stop the
 1240          * interrupts
 1241          */
 1242         istat &= imask;
 1243         CSR_WRITE_4(sc, BFE_ISTAT, istat);
 1244         CSR_READ_4(sc, BFE_ISTAT);
 1245 
 1246         /* not expecting this interrupt, disregard it */
 1247         if(istat == 0) {
 1248                 BFE_UNLOCK(sc);
 1249                 return;
 1250         }
 1251 
 1252         if(istat & BFE_ISTAT_ERRORS) {
 1253 
 1254                 if (istat & BFE_ISTAT_DSCE) {
 1255                         printf("if_bfe Descriptor Error\n");
 1256                         bfe_stop(sc);
 1257                         BFE_UNLOCK(sc);
 1258                         return;
 1259                 }
 1260 
 1261                 if (istat & BFE_ISTAT_DPE) {
 1262                         printf("if_bfe Descriptor Protocol Error\n");
 1263                         bfe_stop(sc);
 1264                         BFE_UNLOCK(sc);
 1265                         return;
 1266                 }
 1267                 
 1268                 flag = CSR_READ_4(sc, BFE_DMATX_STAT);
 1269                 if(flag & BFE_STAT_EMASK)
 1270                         ifp->if_oerrors++;
 1271 
 1272                 flag = CSR_READ_4(sc, BFE_DMARX_STAT);
 1273                 if(flag & BFE_RX_FLAG_ERRORS)
 1274                         ifp->if_ierrors++;
 1275 
 1276                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1277                 bfe_init_locked(sc);
 1278         }
 1279 
 1280         /* A packet was received */
 1281         if(istat & BFE_ISTAT_RX)
 1282                 bfe_rxeof(sc);
 1283 
 1284         /* A packet was sent */
 1285         if(istat & BFE_ISTAT_TX)
 1286                 bfe_txeof(sc);
 1287 
 1288         /* We have packets pending, fire them out */
 1289         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 1290             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1291                 bfe_start_locked(ifp);
 1292 
 1293         BFE_UNLOCK(sc);
 1294 }
 1295 
 1296 static int
 1297 bfe_encap(struct bfe_softc *sc, struct mbuf **m_head, u_int32_t *txidx)
 1298 {
 1299         struct bfe_desc *d = NULL;
 1300         struct bfe_data *r = NULL;
 1301         struct mbuf     *m;
 1302         u_int32_t          frag, cur, cnt = 0;
 1303         int chainlen = 0;
 1304         int error;
 1305 
 1306         if(BFE_TX_LIST_CNT - sc->bfe_tx_cnt < 2)
 1307                 return (ENOBUFS);
 1308 
 1309         /*
 1310          * Count the number of frags in this chain to see if
 1311          * we need to m_defrag.  Since the descriptor list is shared
 1312          * by all packets, we'll m_defrag long chains so that they
 1313          * do not use up the entire list, even if they would fit.
 1314          */
 1315         for(m = *m_head; m != NULL; m = m->m_next)
 1316                 chainlen++;
 1317 
 1318 
 1319         if ((chainlen > BFE_TX_LIST_CNT / 4) ||
 1320                         ((BFE_TX_LIST_CNT - (chainlen + sc->bfe_tx_cnt)) < 2)) {
 1321                 m = m_defrag(*m_head, M_DONTWAIT);
 1322                 if (m == NULL)
 1323                         return (ENOBUFS);
 1324                 *m_head = m;
 1325         }
 1326 
 1327         /*
 1328          * Start packing the mbufs in this chain into
 1329          * the fragment pointers. Stop when we run out
 1330          * of fragments or hit the end of the mbuf chain.
 1331          */
 1332         cur = frag = *txidx;
 1333         cnt = 0;
 1334 
 1335         for(m = *m_head; m != NULL; m = m->m_next) {
 1336                 if(m->m_len != 0) {
 1337                         if((BFE_TX_LIST_CNT - (sc->bfe_tx_cnt + cnt)) < 2)
 1338                                 return (ENOBUFS);
 1339 
 1340                         d = &sc->bfe_tx_list[cur];
 1341                         r = &sc->bfe_tx_ring[cur];
 1342                         d->bfe_ctrl = BFE_DESC_LEN & m->m_len;
 1343                         /* always intterupt on completion */
 1344                         d->bfe_ctrl |= BFE_DESC_IOC;
 1345                         if(cnt == 0)
 1346                                 /* Set start of frame */
 1347                                 d->bfe_ctrl |= BFE_DESC_SOF;
 1348                         if(cur == BFE_TX_LIST_CNT - 1)
 1349                                 /*
 1350                                  * Tell the chip to wrap to the start of
 1351                                  * the descriptor list
 1352                                  */
 1353                                 d->bfe_ctrl |= BFE_DESC_EOT;
 1354 
 1355                         error = bus_dmamap_load(sc->bfe_tag,
 1356                             r->bfe_map, mtod(m, void*), m->m_len,
 1357                             bfe_dma_map_desc, d, BUS_DMA_NOWAIT);
 1358                         if (error)
 1359                                 return (ENOBUFS);
 1360                         bus_dmamap_sync(sc->bfe_tag, r->bfe_map,
 1361                             BUS_DMASYNC_PREWRITE);
 1362 
 1363                         frag = cur;
 1364                         BFE_INC(cur, BFE_TX_LIST_CNT);
 1365                         cnt++;
 1366                 }
 1367         }
 1368 
 1369         if (m != NULL)
 1370                 return (ENOBUFS);
 1371 
 1372         sc->bfe_tx_list[frag].bfe_ctrl |= BFE_DESC_EOF;
 1373         sc->bfe_tx_ring[frag].bfe_mbuf = *m_head;
 1374         bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE);
 1375 
 1376         *txidx = cur;
 1377         sc->bfe_tx_cnt += cnt;
 1378         return (0);
 1379 }
 1380 
 1381 /*
 1382  * Set up to transmit a packet.
 1383  */
 1384 static void
 1385 bfe_start(struct ifnet *ifp)
 1386 {
 1387         BFE_LOCK((struct bfe_softc *)ifp->if_softc);
 1388         bfe_start_locked(ifp);
 1389         BFE_UNLOCK((struct bfe_softc *)ifp->if_softc);
 1390 }
 1391 
 1392 /*
 1393  * Set up to transmit a packet. The softc is already locked.
 1394  */
 1395 static void
 1396 bfe_start_locked(struct ifnet *ifp)
 1397 {
 1398         struct bfe_softc *sc;
 1399         struct mbuf *m_head = NULL;
 1400         int idx, queued = 0;
 1401 
 1402         sc = ifp->if_softc;
 1403         idx = sc->bfe_tx_prod;
 1404 
 1405         BFE_LOCK_ASSERT(sc);
 1406 
 1407         /*
 1408          * Not much point trying to send if the link is down
 1409          * or we have nothing to send.
 1410          */
 1411         if (!sc->bfe_link && ifp->if_snd.ifq_len < 10)
 1412                 return;
 1413 
 1414         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
 1415                 return;
 1416 
 1417         while(sc->bfe_tx_ring[idx].bfe_mbuf == NULL) {
 1418                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 1419                 if(m_head == NULL)
 1420                         break;
 1421 
 1422                 /*
 1423                  * Pack the data into the tx ring.  If we dont have
 1424                  * enough room, let the chip drain the ring.
 1425                  */
 1426                 if(bfe_encap(sc, &m_head, &idx)) {
 1427                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 1428                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1429                         break;
 1430                 }
 1431 
 1432                 queued++;
 1433 
 1434                 /*
 1435                  * If there's a BPF listener, bounce a copy of this frame
 1436                  * to him.
 1437                  */
 1438                 BPF_MTAP(ifp, m_head);
 1439         }
 1440 
 1441         if (queued) {
 1442                 sc->bfe_tx_prod = idx;
 1443                 /* Transmit - twice due to apparent hardware bug */
 1444                 CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc));
 1445                 CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc));
 1446 
 1447                 /*
 1448                  * Set a timeout in case the chip goes out to lunch.
 1449                  */
 1450                 ifp->if_timer = 5;
 1451         }
 1452 }
 1453 
 1454 static void
 1455 bfe_init(void *xsc)
 1456 {
 1457         BFE_LOCK((struct bfe_softc *)xsc);
 1458         bfe_init_locked(xsc);
 1459         BFE_UNLOCK((struct bfe_softc *)xsc);
 1460 }
 1461 
 1462 static void
 1463 bfe_init_locked(void *xsc)
 1464 {
 1465         struct bfe_softc *sc = (struct bfe_softc*)xsc;
 1466         struct ifnet *ifp = sc->bfe_ifp;
 1467 
 1468         BFE_LOCK_ASSERT(sc);
 1469 
 1470         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1471                 return;
 1472 
 1473         bfe_stop(sc);
 1474         bfe_chip_reset(sc);
 1475 
 1476         if (bfe_list_rx_init(sc) == ENOBUFS) {
 1477                 printf("bfe%d: bfe_init: Not enough memory for list buffers\n",
 1478                     sc->bfe_unit);
 1479                 bfe_stop(sc);
 1480                 return;
 1481         }
 1482 
 1483         bfe_set_rx_mode(sc);
 1484 
 1485         /* Enable the chip and core */
 1486         BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE);
 1487         /* Enable interrupts */
 1488         CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF);
 1489 
 1490         bfe_ifmedia_upd(ifp);
 1491         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1492         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1493 
 1494         sc->bfe_stat_ch = timeout(bfe_tick, sc, hz);
 1495 }
 1496 
 1497 /*
 1498  * Set media options.
 1499  */
 1500 static int
 1501 bfe_ifmedia_upd(struct ifnet *ifp)
 1502 {
 1503         struct bfe_softc *sc;
 1504         struct mii_data *mii;
 1505 
 1506         sc = ifp->if_softc;
 1507 
 1508         mii = device_get_softc(sc->bfe_miibus);
 1509         sc->bfe_link = 0;
 1510         if (mii->mii_instance) {
 1511                 struct mii_softc *miisc;
 1512                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
 1513                                 miisc = LIST_NEXT(miisc, mii_list))
 1514                         mii_phy_reset(miisc);
 1515         }
 1516         mii_mediachg(mii);
 1517 
 1518         return (0);
 1519 }
 1520 
 1521 /*
 1522  * Report current media status.
 1523  */
 1524 static void
 1525 bfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 1526 {
 1527         struct bfe_softc *sc = ifp->if_softc;
 1528         struct mii_data *mii;
 1529 
 1530         mii = device_get_softc(sc->bfe_miibus);
 1531         mii_pollstat(mii);
 1532         ifmr->ifm_active = mii->mii_media_active;
 1533         ifmr->ifm_status = mii->mii_media_status;
 1534 }
 1535 
 1536 static int
 1537 bfe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1538 {
 1539         struct bfe_softc *sc = ifp->if_softc;
 1540         struct ifreq *ifr = (struct ifreq *) data;
 1541         struct mii_data *mii;
 1542         int error = 0;
 1543 
 1544         switch(command) {
 1545                 case SIOCSIFFLAGS:
 1546                         BFE_LOCK(sc);
 1547                         if(ifp->if_flags & IFF_UP)
 1548                                 if(ifp->if_drv_flags & IFF_DRV_RUNNING)
 1549                                         bfe_set_rx_mode(sc);
 1550                                 else
 1551                                         bfe_init_locked(sc);
 1552                         else if(ifp->if_drv_flags & IFF_DRV_RUNNING)
 1553                                 bfe_stop(sc);
 1554                         BFE_UNLOCK(sc);
 1555                         break;
 1556                 case SIOCADDMULTI:
 1557                 case SIOCDELMULTI:
 1558                         BFE_LOCK(sc);
 1559                         if(ifp->if_drv_flags & IFF_DRV_RUNNING)
 1560                                 bfe_set_rx_mode(sc);
 1561                         BFE_UNLOCK(sc);
 1562                         break;
 1563                 case SIOCGIFMEDIA:
 1564                 case SIOCSIFMEDIA:
 1565                         mii = device_get_softc(sc->bfe_miibus);
 1566                         error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
 1567                             command);
 1568                         break;
 1569                 default:
 1570                         error = ether_ioctl(ifp, command, data);
 1571                         break;
 1572         }
 1573 
 1574         return (error);
 1575 }
 1576 
 1577 static void
 1578 bfe_watchdog(struct ifnet *ifp)
 1579 {
 1580         struct bfe_softc *sc;
 1581 
 1582         sc = ifp->if_softc;
 1583 
 1584         BFE_LOCK(sc);
 1585 
 1586         printf("bfe%d: watchdog timeout -- resetting\n", sc->bfe_unit);
 1587 
 1588         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1589         bfe_init_locked(sc);
 1590 
 1591         ifp->if_oerrors++;
 1592 
 1593         BFE_UNLOCK(sc);
 1594 }
 1595 
 1596 static void
 1597 bfe_tick(void *xsc)
 1598 {
 1599         struct bfe_softc *sc = xsc;
 1600         struct mii_data *mii;
 1601 
 1602         if (sc == NULL)
 1603                 return;
 1604 
 1605         BFE_LOCK(sc);
 1606 
 1607         mii = device_get_softc(sc->bfe_miibus);
 1608 
 1609         bfe_stats_update(sc);
 1610         sc->bfe_stat_ch = timeout(bfe_tick, sc, hz);
 1611 
 1612         if(sc->bfe_link) {
 1613                 BFE_UNLOCK(sc);
 1614                 return;
 1615         }
 1616 
 1617         mii_tick(mii);
 1618         if (!sc->bfe_link && mii->mii_media_status & IFM_ACTIVE &&
 1619                         IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
 1620                 sc->bfe_link++;
 1621 
 1622         BFE_UNLOCK(sc);
 1623 }
 1624 
 1625 /*
 1626  * Stop the adapter and free any mbufs allocated to the
 1627  * RX and TX lists.
 1628  */
 1629 static void
 1630 bfe_stop(struct bfe_softc *sc)
 1631 {
 1632         struct ifnet *ifp;
 1633 
 1634         BFE_LOCK_ASSERT(sc);
 1635 
 1636         untimeout(bfe_tick, sc, sc->bfe_stat_ch);
 1637 
 1638         ifp = sc->bfe_ifp;
 1639 
 1640         bfe_chip_halt(sc);
 1641         bfe_tx_ring_free(sc);
 1642         bfe_rx_ring_free(sc);
 1643 
 1644         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1645 }

Cache object: b5c7d877e53e2f53251651a946d9ede7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.