The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/fxp/if_fxp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1995, David Greenman
    3  * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice unmodified, this list of conditions, and the following
   11  *    disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 /*
   34  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
   35  */
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/endian.h>
   40 #include <sys/mbuf.h>
   41                 /* #include <sys/mutex.h> */
   42 #include <sys/kernel.h>
   43 #include <sys/module.h>
   44 #include <sys/socket.h>
   45 #include <sys/sysctl.h>
   46 
   47 #include <net/if.h>
   48 #include <net/if_dl.h>
   49 #include <net/if_media.h>
   50 
   51 #include <net/bpf.h>
   52 #include <sys/sockio.h>
   53 #include <sys/bus.h>
   54 #include <machine/bus.h>
   55 #include <sys/rman.h>
   56 #include <machine/resource.h>
   57 
   58 #include <net/ethernet.h>
   59 #include <net/if_arp.h>
   60 
   61 #include <machine/clock.h>      /* for DELAY */
   62 
   63 #include <net/if_types.h>
   64 #include <net/if_vlan_var.h>
   65 
   66 #ifdef FXP_IP_CSUM_WAR
   67 #include <netinet/in.h>
   68 #include <netinet/in_systm.h>
   69 #include <netinet/ip.h>
   70 #include <machine/in_cksum.h>
   71 #endif
   72 
   73 #include <dev/pci/pcivar.h>
   74 #include <dev/pci/pcireg.h>             /* for PCIM_CMD_xxx */
   75 
   76 #include <dev/mii/mii.h>
   77 #include <dev/mii/miivar.h>
   78 
   79 #include <dev/fxp/if_fxpreg.h>
   80 #include <dev/fxp/if_fxpvar.h>
   81 #include <dev/fxp/rcvbundl.h>
   82 
   83 MODULE_DEPEND(fxp, pci, 1, 1, 1);
   84 MODULE_DEPEND(fxp, ether, 1, 1, 1);
   85 MODULE_DEPEND(fxp, miibus, 1, 1, 1);
   86 #include "miibus_if.h"
   87 
   88 /*
   89  * NOTE!  On the Alpha, we have an alignment constraint.  The
   90  * card DMAs the packet immediately following the RFA.  However,
   91  * the first thing in the packet is a 14-byte Ethernet header.
   92  * This means that the packet is misaligned.  To compensate,
   93  * we actually offset the RFA 2 bytes into the cluster.  This
   94  * alignes the packet after the Ethernet header at a 32-bit
   95  * boundary.  HOWEVER!  This means that the RFA is misaligned!
   96  */
   97 #define RFA_ALIGNMENT_FUDGE     2
   98 
   99 /*
  100  * Set initial transmit threshold at 64 (512 bytes). This is
  101  * increased by 64 (512 bytes) at a time, to maximum of 192
  102  * (1536 bytes), if an underrun occurs.
  103  */
  104 static int tx_threshold = 64;
  105 
  106 /*
  107  * The configuration byte map has several undefined fields which
  108  * must be one or must be zero.  Set up a template for these bits
  109  * only, (assuming a 82557 chip) leaving the actual configuration
  110  * to fxp_init.
  111  *
  112  * See struct fxp_cb_config for the bit definitions.
  113  */
  114 static u_char fxp_cb_config_template[] = {
  115         0x0, 0x0,               /* cb_status */
  116         0x0, 0x0,               /* cb_command */
  117         0x0, 0x0, 0x0, 0x0,     /* link_addr */
  118         0x0,    /*  0 */
  119         0x0,    /*  1 */
  120         0x0,    /*  2 */
  121         0x0,    /*  3 */
  122         0x0,    /*  4 */
  123         0x0,    /*  5 */
  124         0x32,   /*  6 */
  125         0x0,    /*  7 */
  126         0x0,    /*  8 */
  127         0x0,    /*  9 */
  128         0x6,    /* 10 */
  129         0x0,    /* 11 */
  130         0x0,    /* 12 */
  131         0x0,    /* 13 */
  132         0xf2,   /* 14 */
  133         0x48,   /* 15 */
  134         0x0,    /* 16 */
  135         0x40,   /* 17 */
  136         0xf0,   /* 18 */
  137         0x0,    /* 19 */
  138         0x3f,   /* 20 */
  139         0x5     /* 21 */
  140 };
  141 
  142 struct fxp_ident {
  143         uint16_t        devid;
  144         int16_t         revid;          /* -1 matches anything */
  145         char            *name;
  146 };
  147 
  148 /*
  149  * Claim various Intel PCI device identifiers for this driver.  The
  150  * sub-vendor and sub-device field are extensively used to identify
  151  * particular variants, but we don't currently differentiate between
  152  * them.
  153  */
  154 static struct fxp_ident fxp_ident_table[] = {
  155     { 0x1029,   -1,     "Intel 82559 PCI/CardBus Pro/100" },
  156     { 0x1030,   -1,     "Intel 82559 Pro/100 Ethernet" },
  157     { 0x1031,   -1,     "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
  158     { 0x1032,   -1,     "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
  159     { 0x1033,   -1,     "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  160     { 0x1034,   -1,     "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  161     { 0x1035,   -1,     "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  162     { 0x1036,   -1,     "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  163     { 0x1037,   -1,     "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  164     { 0x1038,   -1,     "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  165     { 0x1039,   -1,     "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
  166     { 0x103A,   -1,     "Intel 82801DB (ICH4) Pro/100 Ethernet" },
  167     { 0x103B,   -1,     "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
  168     { 0x103C,   -1,     "Intel 82801DB (ICH4) Pro/100 Ethernet" },
  169     { 0x103D,   -1,     "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
  170     { 0x103E,   -1,     "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
  171     { 0x1050,   -1,     "Intel 82801BA (D865) Pro/100 VE Ethernet" },
  172     { 0x1051,   -1,     "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
  173     { 0x1059,   -1,     "Intel 82551QM Pro/100 M Mobile Connection" },
  174     { 0x1064,   -1,     "Intel 82562EZ (ICH6)" },
  175     { 0x1068,   -1,     "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
  176     { 0x1069,   -1,     "Intel 82562EM/EX/GX Pro/100 Ethernet" },
  177     { 0x1209,   -1,     "Intel 82559ER Embedded 10/100 Ethernet" },
  178     { 0x1229,   0x01,   "Intel 82557 Pro/100 Ethernet" },
  179     { 0x1229,   0x02,   "Intel 82557 Pro/100 Ethernet" },
  180     { 0x1229,   0x03,   "Intel 82557 Pro/100 Ethernet" },
  181     { 0x1229,   0x04,   "Intel 82558 Pro/100 Ethernet" },
  182     { 0x1229,   0x05,   "Intel 82558 Pro/100 Ethernet" },
  183     { 0x1229,   0x06,   "Intel 82559 Pro/100 Ethernet" },
  184     { 0x1229,   0x07,   "Intel 82559 Pro/100 Ethernet" },
  185     { 0x1229,   0x08,   "Intel 82559 Pro/100 Ethernet" },
  186     { 0x1229,   0x09,   "Intel 82559ER Pro/100 Ethernet" },
  187     { 0x1229,   0x0c,   "Intel 82550 Pro/100 Ethernet" },
  188     { 0x1229,   0x0d,   "Intel 82550 Pro/100 Ethernet" },
  189     { 0x1229,   0x0e,   "Intel 82550 Pro/100 Ethernet" },
  190     { 0x1229,   0x0f,   "Intel 82551 Pro/100 Ethernet" },
  191     { 0x1229,   0x10,   "Intel 82551 Pro/100 Ethernet" },
  192     { 0x1229,   -1,     "Intel 82557/8/9 Pro/100 Ethernet" },
  193     { 0x2449,   -1,     "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
  194     { 0x27dc,   -1,     "Intel 82801GB (ICH7) 10/100 Ethernet" },
  195     { 0,        -1,     NULL },
  196 };
  197 
  198 #ifdef FXP_IP_CSUM_WAR
  199 #define FXP_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  200 #else
  201 #define FXP_CSUM_FEATURES    (CSUM_TCP | CSUM_UDP)
  202 #endif
  203 
  204 static int              fxp_probe(device_t dev);
  205 static int              fxp_attach(device_t dev);
  206 static int              fxp_detach(device_t dev);
  207 static int              fxp_shutdown(device_t dev);
  208 static int              fxp_suspend(device_t dev);
  209 static int              fxp_resume(device_t dev);
  210 
  211 static void             fxp_intr(void *xsc);
  212 static void             fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp,
  213                             uint8_t statack, int count);
  214 static void             fxp_init(void *xsc);
  215 static void             fxp_init_body(struct fxp_softc *sc);
  216 static void             fxp_tick(void *xsc);
  217 static void             fxp_start(struct ifnet *ifp);
  218 static void             fxp_start_body(struct ifnet *ifp);
  219 static void             fxp_stop(struct fxp_softc *sc);
  220 static void             fxp_release(struct fxp_softc *sc);
  221 static int              fxp_ioctl(struct ifnet *ifp, u_long command,
  222                             caddr_t data);
  223 static void             fxp_watchdog(struct ifnet *ifp);
  224 static int              fxp_add_rfabuf(struct fxp_softc *sc,
  225                             struct fxp_rx *rxp);
  226 static int              fxp_mc_addrs(struct fxp_softc *sc);
  227 static void             fxp_mc_setup(struct fxp_softc *sc);
  228 static uint16_t         fxp_eeprom_getword(struct fxp_softc *sc, int offset,
  229                             int autosize);
  230 static void             fxp_eeprom_putword(struct fxp_softc *sc, int offset,
  231                             uint16_t data);
  232 static void             fxp_autosize_eeprom(struct fxp_softc *sc);
  233 static void             fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
  234                             int offset, int words);
  235 static void             fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
  236                             int offset, int words);
  237 static int              fxp_ifmedia_upd(struct ifnet *ifp);
  238 static void             fxp_ifmedia_sts(struct ifnet *ifp,
  239                             struct ifmediareq *ifmr);
  240 static int              fxp_serial_ifmedia_upd(struct ifnet *ifp);
  241 static void             fxp_serial_ifmedia_sts(struct ifnet *ifp,
  242                             struct ifmediareq *ifmr);
  243 static volatile int     fxp_miibus_readreg(device_t dev, int phy, int reg);
  244 static void             fxp_miibus_writereg(device_t dev, int phy, int reg,
  245                             int value);
  246 static void             fxp_load_ucode(struct fxp_softc *sc);
  247 static int              sysctl_int_range(SYSCTL_HANDLER_ARGS,
  248                             int low, int high);
  249 static int              sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
  250 static int              sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
  251 static void             fxp_scb_wait(struct fxp_softc *sc);
  252 static void             fxp_scb_cmd(struct fxp_softc *sc, int cmd);
  253 static void             fxp_dma_wait(struct fxp_softc *sc,
  254                             volatile uint16_t *status, bus_dma_tag_t dmat,
  255                             bus_dmamap_t map);
  256 
  257 static device_method_t fxp_methods[] = {
  258         /* Device interface */
  259         DEVMETHOD(device_probe,         fxp_probe),
  260         DEVMETHOD(device_attach,        fxp_attach),
  261         DEVMETHOD(device_detach,        fxp_detach),
  262         DEVMETHOD(device_shutdown,      fxp_shutdown),
  263         DEVMETHOD(device_suspend,       fxp_suspend),
  264         DEVMETHOD(device_resume,        fxp_resume),
  265 
  266         /* MII interface */
  267         DEVMETHOD(miibus_readreg,       fxp_miibus_readreg),
  268         DEVMETHOD(miibus_writereg,      fxp_miibus_writereg),
  269 
  270         { 0, 0 }
  271 };
  272 
  273 static driver_t fxp_driver = {
  274         "fxp",
  275         fxp_methods,
  276         sizeof(struct fxp_softc),
  277 };
  278 
  279 static devclass_t fxp_devclass;
  280 
  281 DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0);
  282 DRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
  283 DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
  284 
  285 /*
  286  * Wait for the previous command to be accepted (but not necessarily
  287  * completed).
  288  */
  289 static void
  290 fxp_scb_wait(struct fxp_softc *sc)
  291 {
  292         int i = 10000;
  293 
  294         while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
  295                 DELAY(2);
  296         if (i == 0)
  297                 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
  298                     CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
  299                     CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
  300                     CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS),
  301                     CSR_READ_2(sc, FXP_CSR_FLOWCONTROL));
  302 }
  303 
  304 static void
  305 fxp_scb_cmd(struct fxp_softc *sc, int cmd)
  306 {
  307 
  308         if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
  309                 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
  310                 fxp_scb_wait(sc);
  311         }
  312         CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
  313 }
  314 
  315 static void
  316 fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
  317     bus_dma_tag_t dmat, bus_dmamap_t map)
  318 {
  319         int i = 10000;
  320 
  321         bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
  322         while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) {
  323                 DELAY(2);
  324                 bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
  325         }
  326         if (i == 0)
  327                 device_printf(sc->dev, "DMA timeout\n");
  328 }
  329 
  330 /*
  331  * Return identification string if this device is ours.
  332  */
  333 static int
  334 fxp_probe(device_t dev)
  335 {
  336         uint16_t devid;
  337         uint8_t revid;
  338         struct fxp_ident *ident;
  339 
  340         if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
  341                 devid = pci_get_device(dev);
  342                 revid = pci_get_revid(dev);
  343                 for (ident = fxp_ident_table; ident->name != NULL; ident++) {
  344                         if (ident->devid == devid &&
  345                             (ident->revid == revid || ident->revid == -1)) {
  346                                 device_set_desc(dev, ident->name);
  347                                 return (BUS_PROBE_DEFAULT);
  348                         }
  349                 }
  350         }
  351         return (ENXIO);
  352 }
  353 
  354 static void
  355 fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  356 {
  357         uint32_t *addr;
  358 
  359         if (error)
  360                 return;
  361 
  362         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  363         addr = arg;
  364         *addr = segs->ds_addr;
  365 }
  366 
  367 static int
  368 fxp_attach(device_t dev)
  369 {
  370         struct fxp_softc *sc;
  371         struct fxp_cb_tx *tcbp;
  372         struct fxp_tx *txp;
  373         struct fxp_rx *rxp;
  374         struct ifnet *ifp;
  375         uint32_t val;
  376         uint16_t data, myea[ETHER_ADDR_LEN / 2];
  377         int i, rid, m1, m2, prefer_iomap, maxtxseg;
  378         int error, s;
  379 
  380         error = 0;
  381         sc = device_get_softc(dev);
  382         sc->dev = dev;
  383         callout_init(&sc->stat_ch, CALLOUT_MPSAFE);
  384         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  385             MTX_DEF);
  386         ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
  387             fxp_serial_ifmedia_sts);
  388 
  389         s = splimp();
  390 
  391         /*
  392          * Enable bus mastering.
  393          */
  394         pci_enable_busmaster(dev);
  395         val = pci_read_config(dev, PCIR_COMMAND, 2);
  396 
  397         /*
  398          * Figure out which we should try first - memory mapping or i/o mapping?
  399          * We default to memory mapping. Then we accept an override from the
  400          * command line. Then we check to see which one is enabled.
  401          */
  402         m1 = PCIM_CMD_MEMEN;
  403         m2 = PCIM_CMD_PORTEN;
  404         prefer_iomap = 0;
  405         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
  406             "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) {
  407                 m1 = PCIM_CMD_PORTEN;
  408                 m2 = PCIM_CMD_MEMEN;
  409         }
  410 
  411         sc->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
  412         sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
  413         sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd, RF_ACTIVE);
  414         if (sc->mem == NULL) {
  415                 sc->rtp =
  416                     (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
  417                 sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
  418                 sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd,
  419                                             RF_ACTIVE);
  420         }
  421 
  422         if (!sc->mem) {
  423                 error = ENXIO;
  424                 goto fail;
  425         }
  426         if (bootverbose) {
  427                 device_printf(dev, "using %s space register mapping\n",
  428                    sc->rtp == SYS_RES_MEMORY? "memory" : "I/O");
  429         }
  430 
  431         sc->sc_st = rman_get_bustag(sc->mem);
  432         sc->sc_sh = rman_get_bushandle(sc->mem);
  433 
  434         /*
  435          * Allocate our interrupt.
  436          */
  437         rid = 0;
  438         sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  439                                  RF_SHAREABLE | RF_ACTIVE);
  440         if (sc->irq == NULL) {
  441                 device_printf(dev, "could not map interrupt\n");
  442                 error = ENXIO;
  443                 goto fail;
  444         }
  445 
  446         /*
  447          * Reset to a stable state.
  448          */
  449         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
  450         DELAY(10);
  451 
  452         /*
  453          * Find out how large of an SEEPROM we have.
  454          */
  455         fxp_autosize_eeprom(sc);
  456 
  457         /*
  458          * Find out the chip revision; lump all 82557 revs together.
  459          */
  460         fxp_read_eeprom(sc, &data, 5, 1);
  461         if ((data >> 8) == 1)
  462                 sc->revision = FXP_REV_82557;
  463         else
  464                 sc->revision = pci_get_revid(dev);
  465 
  466         /*
  467          * Determine whether we must use the 503 serial interface.
  468          */
  469         fxp_read_eeprom(sc, &data, 6, 1);
  470         if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
  471             && (data & FXP_PHY_SERIAL_ONLY))
  472                 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
  473 
  474         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  475             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  476             OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW,
  477             &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
  478             "FXP driver receive interrupt microcode bundling delay");
  479         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  480             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  481             OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW,
  482             &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
  483             "FXP driver receive interrupt microcode bundle size limit");
  484         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
  485             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  486             OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
  487             "FXP RNR events");
  488         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
  489             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  490             OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0,
  491             "FXP flow control disabled");
  492 
  493         /*
  494          * Pull in device tunables.
  495          */
  496         sc->tunable_int_delay = TUNABLE_INT_DELAY;
  497         sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
  498         sc->tunable_noflow = 1;
  499         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
  500             "int_delay", &sc->tunable_int_delay);
  501         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
  502             "bundle_max", &sc->tunable_bundle_max);
  503         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
  504             "noflow", &sc->tunable_noflow);
  505         sc->rnr = 0;
  506 
  507         /*
  508          * Enable workarounds for certain chip revision deficiencies.
  509          *
  510          * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
  511          * some systems based a normal 82559 design, have a defect where
  512          * the chip can cause a PCI protocol violation if it receives
  513          * a CU_RESUME command when it is entering the IDLE state.  The 
  514          * workaround is to disable Dynamic Standby Mode, so the chip never
  515          * deasserts CLKRUN#, and always remains in an active state.
  516          *
  517          * See Intel 82801BA/82801BAM Specification Update, Errata #30.
  518          */
  519         i = pci_get_device(dev);
  520         if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
  521             sc->revision >= FXP_REV_82559_A0) {
  522                 fxp_read_eeprom(sc, &data, 10, 1);
  523                 if (data & 0x02) {                      /* STB enable */
  524                         uint16_t cksum;
  525                         int i;
  526 
  527                         device_printf(dev,
  528                             "Disabling dynamic standby mode in EEPROM\n");
  529                         data &= ~0x02;
  530                         fxp_write_eeprom(sc, &data, 10, 1);
  531                         device_printf(dev, "New EEPROM ID: 0x%x\n", data);
  532                         cksum = 0;
  533                         for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
  534                                 fxp_read_eeprom(sc, &data, i, 1);
  535                                 cksum += data;
  536                         }
  537                         i = (1 << sc->eeprom_size) - 1;
  538                         cksum = 0xBABA - cksum;
  539                         fxp_read_eeprom(sc, &data, i, 1);
  540                         fxp_write_eeprom(sc, &cksum, i, 1);
  541                         device_printf(dev,
  542                             "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
  543                             i, data, cksum);
  544 #if 1
  545                         /*
  546                          * If the user elects to continue, try the software
  547                          * workaround, as it is better than nothing.
  548                          */
  549                         sc->flags |= FXP_FLAG_CU_RESUME_BUG;
  550 #endif
  551                 }
  552         }
  553 
  554         /*
  555          * If we are not a 82557 chip, we can enable extended features.
  556          */
  557         if (sc->revision != FXP_REV_82557) {
  558                 /*
  559                  * If MWI is enabled in the PCI configuration, and there
  560                  * is a valid cacheline size (8 or 16 dwords), then tell
  561                  * the board to turn on MWI.
  562                  */
  563                 if (val & PCIM_CMD_MWRICEN &&
  564                     pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
  565                         sc->flags |= FXP_FLAG_MWI_ENABLE;
  566 
  567                 /* turn on the extended TxCB feature */
  568                 sc->flags |= FXP_FLAG_EXT_TXCB;
  569 
  570                 /* enable reception of long frames for VLAN */
  571                 sc->flags |= FXP_FLAG_LONG_PKT_EN;
  572         } else {
  573                 /* a hack to get long VLAN frames on a 82557 */
  574                 sc->flags |= FXP_FLAG_SAVE_BAD;
  575         }
  576 
  577         /*
  578          * Enable use of extended RFDs and TCBs for 82550
  579          * and later chips. Note: we need extended TXCB support
  580          * too, but that's already enabled by the code above.
  581          * Be careful to do this only on the right devices.
  582          */
  583         if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
  584             sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
  585             || sc->revision == FXP_REV_82551_10) {
  586                 sc->rfa_size = sizeof (struct fxp_rfa);
  587                 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
  588                 sc->flags |= FXP_FLAG_EXT_RFA;
  589         } else {
  590                 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
  591                 sc->tx_cmd = FXP_CB_COMMAND_XMIT;
  592         }
  593 
  594         /*
  595          * Allocate DMA tags and DMA safe memory.
  596          */
  597         maxtxseg = sc->flags & FXP_FLAG_EXT_RFA ? FXP_NTXSEG - 1 : FXP_NTXSEG;
  598         error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT,
  599             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * maxtxseg,
  600             maxtxseg, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->fxp_mtag);
  601         if (error) {
  602                 device_printf(dev, "could not allocate dma tag\n");
  603                 goto fail;
  604         }
  605 
  606         error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
  607             BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1,
  608             sizeof(struct fxp_stats), 0, busdma_lock_mutex, &Giant,
  609             &sc->fxp_stag);
  610         if (error) {
  611                 device_printf(dev, "could not allocate dma tag\n");
  612                 goto fail;
  613         }
  614 
  615         error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
  616             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap);
  617         if (error)
  618                 goto fail;
  619         error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
  620             sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
  621         if (error) {
  622                 device_printf(dev, "could not map the stats buffer\n");
  623                 goto fail;
  624         }
  625 
  626         error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
  627             BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1,
  628             FXP_TXCB_SZ, 0, busdma_lock_mutex, &Giant, &sc->cbl_tag);
  629         if (error) {
  630                 device_printf(dev, "could not allocate dma tag\n");
  631                 goto fail;
  632         }
  633 
  634         error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
  635             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map);
  636         if (error)
  637                 goto fail;
  638 
  639         error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
  640             sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
  641             &sc->fxp_desc.cbl_addr, 0);
  642         if (error) {
  643                 device_printf(dev, "could not map DMA memory\n");
  644                 goto fail;
  645         }
  646 
  647         error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
  648             BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1,
  649             sizeof(struct fxp_cb_mcs), 0, busdma_lock_mutex, &Giant,
  650             &sc->mcs_tag);
  651         if (error) {
  652                 device_printf(dev, "could not allocate dma tag\n");
  653                 goto fail;
  654         }
  655 
  656         error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
  657             BUS_DMA_NOWAIT, &sc->mcs_map);
  658         if (error)
  659                 goto fail;
  660         error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
  661             sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
  662         if (error) {
  663                 device_printf(dev, "can't map the multicast setup command\n");
  664                 goto fail;
  665         }
  666 
  667         /*
  668          * Pre-allocate the TX DMA maps and setup the pointers to
  669          * the TX command blocks.
  670          */
  671         txp = sc->fxp_desc.tx_list;
  672         tcbp = sc->fxp_desc.cbl_list;
  673         for (i = 0; i < FXP_NTXCB; i++) {
  674                 txp[i].tx_cb = tcbp + i;
  675                 error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map);
  676                 if (error) {
  677                         device_printf(dev, "can't create DMA map for TX\n");
  678                         goto fail;
  679                 }
  680         }
  681         error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
  682         if (error) {
  683                 device_printf(dev, "can't create spare DMA map\n");
  684                 goto fail;
  685         }
  686 
  687         /*
  688          * Pre-allocate our receive buffers.
  689          */
  690         sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
  691         for (i = 0; i < FXP_NRFABUFS; i++) {
  692                 rxp = &sc->fxp_desc.rx_list[i];
  693                 error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
  694                 if (error) {
  695                         device_printf(dev, "can't create DMA map for RX\n");
  696                         goto fail;
  697                 }
  698                 if (fxp_add_rfabuf(sc, rxp) != 0) {
  699                         error = ENOMEM;
  700                         goto fail;
  701                 }
  702         }
  703 
  704         /*
  705          * Read MAC address.
  706          */
  707         fxp_read_eeprom(sc, myea, 0, 3);
  708         sc->arpcom.ac_enaddr[0] = myea[0] & 0xff;
  709         sc->arpcom.ac_enaddr[1] = myea[0] >> 8;
  710         sc->arpcom.ac_enaddr[2] = myea[1] & 0xff;
  711         sc->arpcom.ac_enaddr[3] = myea[1] >> 8;
  712         sc->arpcom.ac_enaddr[4] = myea[2] & 0xff;
  713         sc->arpcom.ac_enaddr[5] = myea[2] >> 8;
  714         if (bootverbose) {
  715                 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
  716                     pci_get_vendor(dev), pci_get_device(dev),
  717                     pci_get_subvendor(dev), pci_get_subdevice(dev),
  718                     pci_get_revid(dev));
  719                 fxp_read_eeprom(sc, &data, 10, 1);
  720                 device_printf(dev, "Dynamic Standby mode is %s\n",
  721                     data & 0x02 ? "enabled" : "disabled");
  722         }
  723 
  724         /*
  725          * If this is only a 10Mbps device, then there is no MII, and
  726          * the PHY will use a serial interface instead.
  727          *
  728          * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
  729          * doesn't have a programming interface of any sort.  The
  730          * media is sensed automatically based on how the link partner
  731          * is configured.  This is, in essence, manual configuration.
  732          */
  733         if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
  734                 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
  735                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
  736         } else {
  737                 if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
  738                     fxp_ifmedia_sts)) {
  739                         device_printf(dev, "MII without any PHY!\n");
  740                         error = ENXIO;
  741                         goto fail;
  742                 }
  743         }
  744 
  745         ifp = &sc->arpcom.ac_if;
  746         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  747         ifp->if_baudrate = 100000000;
  748         ifp->if_init = fxp_init;
  749         ifp->if_softc = sc;
  750         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  751         ifp->if_ioctl = fxp_ioctl;
  752         ifp->if_start = fxp_start;
  753         ifp->if_watchdog = fxp_watchdog;
  754 
  755         ifp->if_capabilities = ifp->if_capenable = 0;
  756 
  757         /* Enable checksum offload for 82550 or better chips */
  758         if (sc->flags & FXP_FLAG_EXT_RFA) {
  759                 ifp->if_hwassist = FXP_CSUM_FEATURES;
  760                 ifp->if_capabilities |= IFCAP_HWCSUM;
  761                 ifp->if_capenable |= IFCAP_HWCSUM;
  762         }
  763 
  764 #ifdef DEVICE_POLLING
  765         /* Inform the world we support polling. */
  766         ifp->if_capabilities |= IFCAP_POLLING;
  767         ifp->if_capenable |= IFCAP_POLLING;
  768 #endif
  769 
  770         /*
  771          * Attach the interface.
  772          */
  773         ether_ifattach(ifp, sc->arpcom.ac_enaddr);
  774 
  775         /*
  776          * Tell the upper layer(s) we support long frames.
  777          * Must appear after the call to ether_ifattach() because
  778          * ether_ifattach() sets ifi_hdrlen to the default value.
  779          */
  780         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  781         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  782         ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
  783 
  784         /*
  785          * Let the system queue as many packets as we have available
  786          * TX descriptors.
  787          */
  788         IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
  789         ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1;
  790         IFQ_SET_READY(&ifp->if_snd);
  791 
  792         /* 
  793          * Hook our interrupt after all initialization is complete.
  794          */
  795         error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
  796                                fxp_intr, sc, &sc->ih);
  797         if (error) {
  798                 device_printf(dev, "could not setup irq\n");
  799                 ether_ifdetach(&sc->arpcom.ac_if);
  800                 goto fail;
  801         }
  802 
  803 fail:
  804         splx(s);
  805         if (error)
  806                 fxp_release(sc);
  807         return (error);
  808 }
  809 
  810 /*
  811  * Release all resources.  The softc lock should not be held and the
  812  * interrupt should already be torn down.
  813  */
  814 static void
  815 fxp_release(struct fxp_softc *sc)
  816 {
  817         struct fxp_rx *rxp;
  818         struct fxp_tx *txp;
  819         int i;
  820 
  821         FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
  822         KASSERT(sc->ih == NULL,
  823             ("fxp_release() called with intr handle still active"));
  824         if (sc->miibus)
  825                 device_delete_child(sc->dev, sc->miibus);
  826         bus_generic_detach(sc->dev);
  827         ifmedia_removeall(&sc->sc_media);
  828         if (sc->fxp_desc.cbl_list) {
  829                 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
  830                 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
  831                     sc->cbl_map);
  832         }
  833         if (sc->fxp_stats) {
  834                 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
  835                 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
  836         }
  837         if (sc->mcsp) {
  838                 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
  839                 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
  840         }
  841         if (sc->irq)
  842                 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
  843         if (sc->mem)
  844                 bus_release_resource(sc->dev, sc->rtp, sc->rgd, sc->mem);
  845         if (sc->fxp_mtag) {
  846                 for (i = 0; i < FXP_NRFABUFS; i++) {
  847                         rxp = &sc->fxp_desc.rx_list[i];
  848                         if (rxp->rx_mbuf != NULL) {
  849                                 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
  850                                     BUS_DMASYNC_POSTREAD);
  851                                 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
  852                                 m_freem(rxp->rx_mbuf);
  853                         }
  854                         bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
  855                 }
  856                 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
  857                 for (i = 0; i < FXP_NTXCB; i++) {
  858                         txp = &sc->fxp_desc.tx_list[i];
  859                         if (txp->tx_mbuf != NULL) {
  860                                 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
  861                                     BUS_DMASYNC_POSTWRITE);
  862                                 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
  863                                 m_freem(txp->tx_mbuf);
  864                         }
  865                         bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
  866                 }
  867                 bus_dma_tag_destroy(sc->fxp_mtag);
  868         }
  869         if (sc->fxp_stag)
  870                 bus_dma_tag_destroy(sc->fxp_stag);
  871         if (sc->cbl_tag)
  872                 bus_dma_tag_destroy(sc->cbl_tag);
  873         if (sc->mcs_tag)
  874                 bus_dma_tag_destroy(sc->mcs_tag);
  875 
  876         mtx_destroy(&sc->sc_mtx);
  877 }
  878 
  879 /*
  880  * Detach interface.
  881  */
  882 static int
  883 fxp_detach(device_t dev)
  884 {
  885         struct fxp_softc *sc = device_get_softc(dev);
  886         int s;
  887 
  888         FXP_LOCK(sc);
  889         s = splimp();
  890 
  891         sc->suspended = 1;      /* Do same thing as we do for suspend */
  892         /*
  893          * Close down routes etc.
  894          */
  895         ether_ifdetach(&sc->arpcom.ac_if);
  896 
  897         /*
  898          * Stop DMA and drop transmit queue, but disable interrupts first.
  899          */
  900         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
  901         fxp_stop(sc);
  902         FXP_UNLOCK(sc);
  903 
  904         /*
  905          * Unhook interrupt before dropping lock. This is to prevent
  906          * races with fxp_intr().
  907          */
  908         bus_teardown_intr(sc->dev, sc->irq, sc->ih);
  909         sc->ih = NULL;
  910 
  911         splx(s);
  912 
  913         /* Release our allocated resources. */
  914         fxp_release(sc);
  915         return (0);
  916 }
  917 
  918 /*
  919  * Device shutdown routine. Called at system shutdown after sync. The
  920  * main purpose of this routine is to shut off receiver DMA so that
  921  * kernel memory doesn't get clobbered during warmboot.
  922  */
  923 static int
  924 fxp_shutdown(device_t dev)
  925 {
  926         /*
  927          * Make sure that DMA is disabled prior to reboot. Not doing
  928          * do could allow DMA to corrupt kernel memory during the
  929          * reboot before the driver initializes.
  930          */
  931         fxp_stop((struct fxp_softc *) device_get_softc(dev));
  932         return (0);
  933 }
  934 
  935 /*
  936  * Device suspend routine.  Stop the interface and save some PCI
  937  * settings in case the BIOS doesn't restore them properly on
  938  * resume.
  939  */
  940 static int
  941 fxp_suspend(device_t dev)
  942 {
  943         struct fxp_softc *sc = device_get_softc(dev);
  944         int i, s;
  945 
  946         FXP_LOCK(sc);
  947         s = splimp();
  948 
  949         fxp_stop(sc);
  950         
  951         for (i = 0; i < 5; i++)
  952                 sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4);
  953         sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
  954         sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
  955         sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
  956         sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
  957 
  958         sc->suspended = 1;
  959 
  960         FXP_UNLOCK(sc);
  961         splx(s);
  962         return (0);
  963 }
  964 
  965 /*
  966  * Device resume routine.  Restore some PCI settings in case the BIOS
  967  * doesn't, re-enable busmastering, and restart the interface if
  968  * appropriate.
  969  */
  970 static int
  971 fxp_resume(device_t dev)
  972 {
  973         struct fxp_softc *sc = device_get_softc(dev);
  974         struct ifnet *ifp = &sc->sc_if;
  975         uint16_t pci_command;
  976         int i, s;
  977 
  978         FXP_LOCK(sc);
  979         s = splimp();
  980 
  981         /* better way to do this? */
  982         for (i = 0; i < 5; i++)
  983                 pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4);
  984         pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
  985         pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
  986         pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
  987         pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
  988 
  989         /* reenable busmastering */
  990         pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
  991         pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
  992         pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
  993 
  994         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
  995         DELAY(10);
  996 
  997         /* reinitialize interface if necessary */
  998         if (ifp->if_flags & IFF_UP)
  999                 fxp_init_body(sc);
 1000 
 1001         sc->suspended = 0;
 1002 
 1003         FXP_UNLOCK(sc);
 1004         splx(s);
 1005         return (0);
 1006 }
 1007 
 1008 static void 
 1009 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
 1010 {
 1011         uint16_t reg;
 1012         int x;
 1013 
 1014         /*
 1015          * Shift in data.
 1016          */
 1017         for (x = 1 << (length - 1); x; x >>= 1) {
 1018                 if (data & x)
 1019                         reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
 1020                 else
 1021                         reg = FXP_EEPROM_EECS;
 1022                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1023                 DELAY(1);
 1024                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1025                 DELAY(1);
 1026                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1027                 DELAY(1);
 1028         }
 1029 }
 1030 
 1031 /*
 1032  * Read from the serial EEPROM. Basically, you manually shift in
 1033  * the read opcode (one bit at a time) and then shift in the address,
 1034  * and then you shift out the data (all of this one bit at a time).
 1035  * The word size is 16 bits, so you have to provide the address for
 1036  * every 16 bits of data.
 1037  */
 1038 static uint16_t
 1039 fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
 1040 {
 1041         uint16_t reg, data;
 1042         int x;
 1043 
 1044         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1045         /*
 1046          * Shift in read opcode.
 1047          */
 1048         fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
 1049         /*
 1050          * Shift in address.
 1051          */
 1052         data = 0;
 1053         for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
 1054                 if (offset & x)
 1055                         reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
 1056                 else
 1057                         reg = FXP_EEPROM_EECS;
 1058                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1059                 DELAY(1);
 1060                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1061                 DELAY(1);
 1062                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1063                 DELAY(1);
 1064                 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
 1065                 data++;
 1066                 if (autosize && reg == 0) {
 1067                         sc->eeprom_size = data;
 1068                         break;
 1069                 }
 1070         }
 1071         /*
 1072          * Shift out data.
 1073          */
 1074         data = 0;
 1075         reg = FXP_EEPROM_EECS;
 1076         for (x = 1 << 15; x; x >>= 1) {
 1077                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1078                 DELAY(1);
 1079                 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
 1080                         data |= x;
 1081                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1082                 DELAY(1);
 1083         }
 1084         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1085         DELAY(1);
 1086 
 1087         return (data);
 1088 }
 1089 
 1090 static void
 1091 fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
 1092 {
 1093         int i;
 1094 
 1095         /*
 1096          * Erase/write enable.
 1097          */
 1098         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1099         fxp_eeprom_shiftin(sc, 0x4, 3);
 1100         fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
 1101         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1102         DELAY(1);
 1103         /*
 1104          * Shift in write opcode, address, data.
 1105          */
 1106         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1107         fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
 1108         fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
 1109         fxp_eeprom_shiftin(sc, data, 16);
 1110         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1111         DELAY(1);
 1112         /*
 1113          * Wait for EEPROM to finish up.
 1114          */
 1115         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1116         DELAY(1);
 1117         for (i = 0; i < 1000; i++) {
 1118                 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
 1119                         break;
 1120                 DELAY(50);
 1121         }
 1122         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1123         DELAY(1);
 1124         /*
 1125          * Erase/write disable.
 1126          */
 1127         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1128         fxp_eeprom_shiftin(sc, 0x4, 3);
 1129         fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
 1130         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1131         DELAY(1);
 1132 }
 1133 
 1134 /*
 1135  * From NetBSD:
 1136  *
 1137  * Figure out EEPROM size.
 1138  *
 1139  * 559's can have either 64-word or 256-word EEPROMs, the 558
 1140  * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
 1141  * talks about the existance of 16 to 256 word EEPROMs.
 1142  *
 1143  * The only known sizes are 64 and 256, where the 256 version is used
 1144  * by CardBus cards to store CIS information.
 1145  *
 1146  * The address is shifted in msb-to-lsb, and after the last
 1147  * address-bit the EEPROM is supposed to output a `dummy zero' bit,
 1148  * after which follows the actual data. We try to detect this zero, by
 1149  * probing the data-out bit in the EEPROM control register just after
 1150  * having shifted in a bit. If the bit is zero, we assume we've
 1151  * shifted enough address bits. The data-out should be tri-state,
 1152  * before this, which should translate to a logical one.
 1153  */
 1154 static void
 1155 fxp_autosize_eeprom(struct fxp_softc *sc)
 1156 {
 1157 
 1158         /* guess maximum size of 256 words */
 1159         sc->eeprom_size = 8;
 1160 
 1161         /* autosize */
 1162         (void) fxp_eeprom_getword(sc, 0, 1);
 1163 }
 1164 
 1165 static void
 1166 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
 1167 {
 1168         int i;
 1169 
 1170         for (i = 0; i < words; i++)
 1171                 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
 1172 }
 1173 
 1174 static void
 1175 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
 1176 {
 1177         int i;
 1178 
 1179         for (i = 0; i < words; i++)
 1180                 fxp_eeprom_putword(sc, offset + i, data[i]);
 1181 }
 1182 
 1183 static void
 1184 fxp_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
 1185     bus_size_t mapsize, int error)
 1186 {
 1187         struct fxp_softc *sc;
 1188         struct fxp_cb_tx *txp;
 1189         int i;
 1190 
 1191         if (error)
 1192                 return;
 1193 
 1194         KASSERT(nseg <= FXP_NTXSEG, ("too many DMA segments"));
 1195 
 1196         sc = arg;
 1197         txp = sc->fxp_desc.tx_last->tx_next->tx_cb;
 1198         for (i = 0; i < nseg; i++) {
 1199                 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
 1200                 /*
 1201                  * If this is an 82550/82551, then we're using extended
 1202                  * TxCBs _and_ we're using checksum offload. This means
 1203                  * that the TxCB is really an IPCB. One major difference
 1204                  * between the two is that with plain extended TxCBs,
 1205                  * the bottom half of the TxCB contains two entries from
 1206                  * the TBD array, whereas IPCBs contain just one entry:
 1207                  * one entry (8 bytes) has been sacrificed for the TCP/IP
 1208                  * checksum offload control bits. So to make things work
 1209                  * right, we have to start filling in the TBD array
 1210                  * starting from a different place depending on whether
 1211                  * the chip is an 82550/82551 or not.
 1212                  */
 1213                 if (sc->flags & FXP_FLAG_EXT_RFA) {
 1214                         txp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
 1215                         txp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
 1216                 } else {
 1217                         txp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
 1218                         txp->tbd[i].tb_size = htole32(segs[i].ds_len);
 1219                 }
 1220         }
 1221         txp->tbd_number = nseg;
 1222 }
 1223 
 1224 /*
 1225  * Grab the softc lock and call the real fxp_start_body() routine
 1226  */
 1227 static void
 1228 fxp_start(struct ifnet *ifp)
 1229 {
 1230         struct fxp_softc *sc = ifp->if_softc;
 1231 
 1232         FXP_LOCK(sc);
 1233         fxp_start_body(ifp);
 1234         FXP_UNLOCK(sc);
 1235 }
 1236 
 1237 /*
 1238  * Start packet transmission on the interface.  
 1239  * This routine must be called with the softc lock held, and is an
 1240  * internal entry point only.
 1241  */
 1242 static void
 1243 fxp_start_body(struct ifnet *ifp)
 1244 {
 1245         struct fxp_softc *sc = ifp->if_softc;
 1246         struct fxp_tx *txp;
 1247         struct mbuf *mb_head;
 1248         int error;
 1249 
 1250         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1251         /*
 1252          * See if we need to suspend xmit until the multicast filter
 1253          * has been reprogrammed (which can only be done at the head
 1254          * of the command chain).
 1255          */
 1256         if (sc->need_mcsetup) {
 1257                 return;
 1258         }
 1259 
 1260         txp = NULL;
 1261 
 1262         /*
 1263          * We're finished if there is nothing more to add to the list or if
 1264          * we're all filled up with buffers to transmit.
 1265          * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
 1266          *       a NOP command when needed.
 1267          */
 1268         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1269             sc->tx_queued < FXP_NTXCB - 1) {
 1270 
 1271                 /*
 1272                  * Grab a packet to transmit.
 1273                  */
 1274                 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
 1275                 if (mb_head == NULL)
 1276                         break;
 1277 
 1278                 /*
 1279                  * Get pointer to next available tx desc.
 1280                  */
 1281                 txp = sc->fxp_desc.tx_last->tx_next;
 1282 
 1283                 /*
 1284                  * A note in Appendix B of the Intel 8255x 10/100 Mbps
 1285                  * Ethernet Controller Family Open Source Software
 1286                  * Developer Manual says:
 1287                  *   Using software parsing is only allowed with legal
 1288                  *   TCP/IP or UDP/IP packets.
 1289                  *   ...
 1290                  *   For all other datagrams, hardware parsing must
 1291                  *   be used.
 1292                  * Software parsing appears to truncate ICMP and
 1293                  * fragmented UDP packets that contain one to three
 1294                  * bytes in the second (and final) mbuf of the packet.
 1295                  */
 1296                 if (sc->flags & FXP_FLAG_EXT_RFA)
 1297                         txp->tx_cb->ipcb_ip_activation_high =
 1298                             FXP_IPCB_HARDWAREPARSING_ENABLE;
 1299 
 1300                 /*
 1301                  * Deal with TCP/IP checksum offload. Note that
 1302                  * in order for TCP checksum offload to work,
 1303                  * the pseudo header checksum must have already
 1304                  * been computed and stored in the checksum field
 1305                  * in the TCP header. The stack should have
 1306                  * already done this for us.
 1307                  */
 1308 
 1309                 if (mb_head->m_pkthdr.csum_flags) {
 1310                         if (mb_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
 1311                                 txp->tx_cb->ipcb_ip_schedule =
 1312                                     FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
 1313                                 if (mb_head->m_pkthdr.csum_flags & CSUM_TCP)
 1314                                         txp->tx_cb->ipcb_ip_schedule |=
 1315                                             FXP_IPCB_TCP_PACKET;
 1316                         }
 1317 #ifdef FXP_IP_CSUM_WAR
 1318                 /*
 1319                  * XXX The 82550 chip appears to have trouble
 1320                  * dealing with IP header checksums in very small
 1321                  * datagrams, namely fragments from 1 to 3 bytes
 1322                  * in size. For example, say you want to transmit
 1323                  * a UDP packet of 1473 bytes. The packet will be
 1324                  * fragmented over two IP datagrams, the latter
 1325                  * containing only one byte of data. The 82550 will
 1326                  * botch the header checksum on the 1-byte fragment.
 1327                  * As long as the datagram contains 4 or more bytes
 1328                  * of data, you're ok.
 1329                  *
 1330                  * The following code attempts to work around this
 1331                  * problem: if the datagram is less than 38 bytes
 1332                  * in size (14 bytes ether header, 20 bytes IP header,
 1333                  * plus 4 bytes of data), we punt and compute the IP
 1334                  * header checksum by hand. This workaround doesn't
 1335                  * work very well, however, since it can be fooled
 1336                  * by things like VLAN tags and IP options that make
 1337                  * the header sizes/offsets vary.
 1338                  */
 1339 
 1340                         if (mb_head->m_pkthdr.csum_flags & CSUM_IP) {
 1341                                 if (mb_head->m_pkthdr.len < 38) {
 1342                                         struct ip *ip;
 1343                                         mb_head->m_data += ETHER_HDR_LEN;
 1344                                         ip = mtod(mb_head, struct ip *);
 1345                                         ip->ip_sum = in_cksum(mb_head,
 1346                                             ip->ip_hl << 2);
 1347                                         mb_head->m_data -= ETHER_HDR_LEN;
 1348                                 } else {
 1349                                         txp->tx_cb->ipcb_ip_activation_high =
 1350                                             FXP_IPCB_HARDWAREPARSING_ENABLE;
 1351                                         txp->tx_cb->ipcb_ip_schedule |=
 1352                                             FXP_IPCB_IP_CHECKSUM_ENABLE;
 1353                                 }
 1354                         }
 1355 #endif
 1356                 }
 1357 
 1358                 /*
 1359                  * Go through each of the mbufs in the chain and initialize
 1360                  * the transmit buffer descriptors with the physical address
 1361                  * and size of the mbuf.
 1362                  */
 1363                 error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
 1364                     mb_head, fxp_dma_map_txbuf, sc, 0);
 1365 
 1366                 if (error && error != EFBIG) {
 1367                         device_printf(sc->dev, "can't map mbuf (error %d)\n",
 1368                             error);
 1369                         m_freem(mb_head);
 1370                         break;
 1371                 }
 1372 
 1373                 if (error) {
 1374                         struct mbuf *mn;
 1375 
 1376                         /*
 1377                          * We ran out of segments. We have to recopy this
 1378                          * mbuf chain first. Bail out if we can't get the
 1379                          * new buffers.
 1380                          */
 1381                         mn = m_defrag(mb_head, M_DONTWAIT);
 1382                         if (mn == NULL) {
 1383                                 m_freem(mb_head);
 1384                                 break;
 1385                         } else {
 1386                                 mb_head = mn;
 1387                         }
 1388                         error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
 1389                             mb_head, fxp_dma_map_txbuf, sc, 0);
 1390                         if (error) {
 1391                                 device_printf(sc->dev,
 1392                                     "can't map mbuf (error %d)\n", error);
 1393                                 m_freem(mb_head);
 1394                                 break;
 1395                         }
 1396                 }
 1397 
 1398                 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
 1399                     BUS_DMASYNC_PREWRITE);
 1400 
 1401                 txp->tx_mbuf = mb_head;
 1402                 txp->tx_cb->cb_status = 0;
 1403                 txp->tx_cb->byte_count = 0;
 1404                 if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
 1405                         txp->tx_cb->cb_command =
 1406                             htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
 1407                             FXP_CB_COMMAND_S);
 1408                 } else {
 1409                         txp->tx_cb->cb_command =
 1410                             htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
 1411                             FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
 1412                         /*
 1413                          * Set a 5 second timer just in case we don't hear
 1414                          * from the card again.
 1415                          */
 1416                         ifp->if_timer = 5;
 1417                 }
 1418                 txp->tx_cb->tx_threshold = tx_threshold;
 1419 
 1420                 /*
 1421                  * Advance the end of list forward.
 1422                  */
 1423 
 1424 #ifdef __alpha__
 1425                 /*
 1426                  * On platforms which can't access memory in 16-bit
 1427                  * granularities, we must prevent the card from DMA'ing
 1428                  * up the status while we update the command field.
 1429                  * This could cause us to overwrite the completion status.
 1430                  * XXX This is probably bogus and we're _not_ looking
 1431                  * for atomicity here.
 1432                  */
 1433                 atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command,
 1434                     htole16(FXP_CB_COMMAND_S));
 1435 #else
 1436                 sc->fxp_desc.tx_last->tx_cb->cb_command &=
 1437                     htole16(~FXP_CB_COMMAND_S);
 1438 #endif /*__alpha__*/
 1439                 sc->fxp_desc.tx_last = txp;
 1440 
 1441                 /*
 1442                  * Advance the beginning of the list forward if there are
 1443                  * no other packets queued (when nothing is queued, tx_first
 1444                  * sits on the last TxCB that was sent out).
 1445                  */
 1446                 if (sc->tx_queued == 0)
 1447                         sc->fxp_desc.tx_first = txp;
 1448 
 1449                 sc->tx_queued++;
 1450 
 1451                 /*
 1452                  * Pass packet to bpf if there is a listener.
 1453                  */
 1454                 BPF_MTAP(ifp, mb_head);
 1455         }
 1456         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 1457 
 1458         /*
 1459          * We're finished. If we added to the list, issue a RESUME to get DMA
 1460          * going again if suspended.
 1461          */
 1462         if (txp != NULL) {
 1463                 fxp_scb_wait(sc);
 1464                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
 1465         }
 1466 }
 1467 
 1468 #ifdef DEVICE_POLLING
 1469 static poll_handler_t fxp_poll;
 1470 
 1471 static void
 1472 fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1473 {
 1474         struct fxp_softc *sc = ifp->if_softc;
 1475         uint8_t statack;
 1476 
 1477         FXP_LOCK(sc);
 1478         if (!(ifp->if_capenable & IFCAP_POLLING)) {
 1479                 ether_poll_deregister(ifp);
 1480                 cmd = POLL_DEREGISTER;
 1481         }
 1482         if (cmd == POLL_DEREGISTER) {   /* final call, enable interrupts */
 1483                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
 1484                 FXP_UNLOCK(sc);
 1485                 return;
 1486         }
 1487         statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
 1488             FXP_SCB_STATACK_FR;
 1489         if (cmd == POLL_AND_CHECK_STATUS) {
 1490                 uint8_t tmp;
 1491 
 1492                 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
 1493                 if (tmp == 0xff || tmp == 0) {
 1494                         FXP_UNLOCK(sc);
 1495                         return; /* nothing to do */
 1496                 }
 1497                 tmp &= ~statack;
 1498                 /* ack what we can */
 1499                 if (tmp != 0)
 1500                         CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
 1501                 statack |= tmp;
 1502         }
 1503         fxp_intr_body(sc, ifp, statack, count);
 1504         FXP_UNLOCK(sc);
 1505 }
 1506 #endif /* DEVICE_POLLING */
 1507 
 1508 /*
 1509  * Process interface interrupts.
 1510  */
 1511 static void
 1512 fxp_intr(void *xsc)
 1513 {
 1514         struct fxp_softc *sc = xsc;
 1515         struct ifnet *ifp = &sc->sc_if;
 1516         uint8_t statack;
 1517 
 1518         FXP_LOCK(sc);
 1519         if (sc->suspended) {
 1520                 FXP_UNLOCK(sc);
 1521                 return;
 1522         }
 1523 
 1524 #ifdef DEVICE_POLLING
 1525         if (ifp->if_flags & IFF_POLLING) {
 1526                 FXP_UNLOCK(sc);
 1527                 return;
 1528         }
 1529         if ((ifp->if_capenable & IFCAP_POLLING) &&
 1530             ether_poll_register(fxp_poll, ifp)) {
 1531                 /* disable interrupts */
 1532                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 1533                 FXP_UNLOCK(sc);
 1534                 fxp_poll(ifp, 0, 1);
 1535                 return;
 1536         }
 1537 #endif
 1538         while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
 1539                 /*
 1540                  * It should not be possible to have all bits set; the
 1541                  * FXP_SCB_INTR_SWI bit always returns 0 on a read.  If 
 1542                  * all bits are set, this may indicate that the card has
 1543                  * been physically ejected, so ignore it.
 1544                  */  
 1545                 if (statack == 0xff) {
 1546                         FXP_UNLOCK(sc);
 1547                         return;
 1548                 }
 1549 
 1550                 /*
 1551                  * First ACK all the interrupts in this pass.
 1552                  */
 1553                 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
 1554                 fxp_intr_body(sc, ifp, statack, -1);
 1555         }
 1556         FXP_UNLOCK(sc);
 1557 }
 1558 
 1559 static void
 1560 fxp_txeof(struct fxp_softc *sc)
 1561 {
 1562         struct fxp_tx *txp;
 1563 
 1564         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
 1565         for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
 1566             (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
 1567             txp = txp->tx_next) {
 1568                 if (txp->tx_mbuf != NULL) {
 1569                         bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
 1570                             BUS_DMASYNC_POSTWRITE);
 1571                         bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
 1572                         m_freem(txp->tx_mbuf);
 1573                         txp->tx_mbuf = NULL;
 1574                         /* clear this to reset csum offload bits */
 1575                         txp->tx_cb->tbd[0].tb_addr = 0;
 1576                 }
 1577                 sc->tx_queued--;
 1578         }
 1579         sc->fxp_desc.tx_first = txp;
 1580         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 1581 }
 1582 
 1583 static void
 1584 fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack,
 1585     int count)
 1586 {
 1587         struct mbuf *m;
 1588         struct fxp_rx *rxp;
 1589         struct fxp_rfa *rfa;
 1590         int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
 1591         int fxp_rc = 0;
 1592 
 1593         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1594         if (rnr)
 1595                 sc->rnr++;
 1596 #ifdef DEVICE_POLLING
 1597         /* Pick up a deferred RNR condition if `count' ran out last time. */
 1598         if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
 1599                 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
 1600                 rnr = 1;
 1601         }
 1602 #endif
 1603 
 1604         /*
 1605          * Free any finished transmit mbuf chains.
 1606          *
 1607          * Handle the CNA event likt a CXTNO event. It used to
 1608          * be that this event (control unit not ready) was not
 1609          * encountered, but it is now with the SMPng modifications.
 1610          * The exact sequence of events that occur when the interface
 1611          * is brought up are different now, and if this event
 1612          * goes unhandled, the configuration/rxfilter setup sequence
 1613          * can stall for several seconds. The result is that no
 1614          * packets go out onto the wire for about 5 to 10 seconds
 1615          * after the interface is ifconfig'ed for the first time.
 1616          */
 1617         if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) {
 1618                 fxp_txeof(sc);
 1619 
 1620                 ifp->if_timer = 0;
 1621                 if (sc->tx_queued == 0) {
 1622                         if (sc->need_mcsetup)
 1623                                 fxp_mc_setup(sc);
 1624                 }
 1625                 /*
 1626                  * Try to start more packets transmitting.
 1627                  */
 1628                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1629                         fxp_start_body(ifp);
 1630         }
 1631 
 1632         /*
 1633          * Just return if nothing happened on the receive side.
 1634          */
 1635         if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
 1636                 return;
 1637 
 1638         /*
 1639          * Process receiver interrupts. If a no-resource (RNR)
 1640          * condition exists, get whatever packets we can and
 1641          * re-start the receiver.
 1642          *
 1643          * When using polling, we do not process the list to completion,
 1644          * so when we get an RNR interrupt we must defer the restart
 1645          * until we hit the last buffer with the C bit set.
 1646          * If we run out of cycles and rfa_headm has the C bit set,
 1647          * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
 1648          * that the info will be used in the subsequent polling cycle.
 1649          */
 1650         for (;;) {
 1651                 rxp = sc->fxp_desc.rx_head;
 1652                 m = rxp->rx_mbuf;
 1653                 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
 1654                     RFA_ALIGNMENT_FUDGE);
 1655                 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
 1656                     BUS_DMASYNC_POSTREAD);
 1657 
 1658 #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
 1659                 if (count >= 0 && count-- == 0) {
 1660                         if (rnr) {
 1661                                 /* Defer RNR processing until the next time. */
 1662                                 sc->flags |= FXP_FLAG_DEFERRED_RNR;
 1663                                 rnr = 0;
 1664                         }
 1665                         break;
 1666                 }
 1667 #endif /* DEVICE_POLLING */
 1668 
 1669                 if ((le16toh(rfa->rfa_status) & FXP_RFA_STATUS_C) == 0)
 1670                         break;
 1671 
 1672                 /*
 1673                  * Advance head forward.
 1674                  */
 1675                 sc->fxp_desc.rx_head = rxp->rx_next;
 1676 
 1677                 /*
 1678                  * Add a new buffer to the receive chain.
 1679                  * If this fails, the old buffer is recycled
 1680                  * instead.
 1681                  */
 1682                 fxp_rc = fxp_add_rfabuf(sc, rxp);
 1683                 if (fxp_rc == 0) {
 1684                         int total_len;
 1685 
 1686                         /*
 1687                          * Fetch packet length (the top 2 bits of
 1688                          * actual_size are flags set by the controller
 1689                          * upon completion), and drop the packet in case
 1690                          * of bogus length or CRC errors.
 1691                          */
 1692                         total_len = le16toh(rfa->actual_size) & 0x3fff;
 1693                         if (total_len < sizeof(struct ether_header) ||
 1694                             total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
 1695                                 sc->rfa_size ||
 1696                             le16toh(rfa->rfa_status) & FXP_RFA_STATUS_CRC) {
 1697                                 m_freem(m);
 1698                                 continue;
 1699                         }
 1700 
 1701                         /* Do IP checksum checking. */
 1702                         if (le16toh(rfa->rfa_status) & FXP_RFA_STATUS_PARSE) {
 1703                                 if (rfa->rfax_csum_sts &
 1704                                     FXP_RFDX_CS_IP_CSUM_BIT_VALID)
 1705                                         m->m_pkthdr.csum_flags |=
 1706                                             CSUM_IP_CHECKED;
 1707                                 if (rfa->rfax_csum_sts &
 1708                                     FXP_RFDX_CS_IP_CSUM_VALID)
 1709                                         m->m_pkthdr.csum_flags |=
 1710                                             CSUM_IP_VALID;
 1711                                 if ((rfa->rfax_csum_sts &
 1712                                     FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
 1713                                     (rfa->rfax_csum_sts &
 1714                                     FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
 1715                                         m->m_pkthdr.csum_flags |=
 1716                                             CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
 1717                                         m->m_pkthdr.csum_data = 0xffff;
 1718                                 }
 1719                         }
 1720 
 1721                         m->m_pkthdr.len = m->m_len = total_len;
 1722                         m->m_pkthdr.rcvif = ifp;
 1723 
 1724                         /*
 1725                          * Drop locks before calling if_input() since it
 1726                          * may re-enter fxp_start() in the netisr case.
 1727                          * This would result in a lock reversal.  Better
 1728                          * performance might be obtained by chaining all
 1729                          * packets received, dropping the lock, and then
 1730                          * calling if_input() on each one.
 1731                          */
 1732                         FXP_UNLOCK(sc);
 1733                         (*ifp->if_input)(ifp, m);
 1734                         FXP_LOCK(sc);
 1735                 } else if (fxp_rc == ENOBUFS) {
 1736                         rnr = 0;
 1737                         break;
 1738                 }
 1739         }
 1740         if (rnr) {
 1741                 fxp_scb_wait(sc);
 1742                 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
 1743                     sc->fxp_desc.rx_head->rx_addr);
 1744                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
 1745         }
 1746 }
 1747 
 1748 /*
 1749  * Update packet in/out/collision statistics. The i82557 doesn't
 1750  * allow you to access these counters without doing a fairly
 1751  * expensive DMA to get _all_ of the statistics it maintains, so
 1752  * we do this operation here only once per second. The statistics
 1753  * counters in the kernel are updated from the previous dump-stats
 1754  * DMA and then a new dump-stats DMA is started. The on-chip
 1755  * counters are zeroed when the DMA completes. If we can't start
 1756  * the DMA immediately, we don't wait - we just prepare to read
 1757  * them again next time.
 1758  */
 1759 static void
 1760 fxp_tick(void *xsc)
 1761 {
 1762         struct fxp_softc *sc = xsc;
 1763         struct ifnet *ifp = &sc->sc_if;
 1764         struct fxp_stats *sp = sc->fxp_stats;
 1765         int s;
 1766 
 1767         FXP_LOCK(sc);
 1768         s = splimp();
 1769         bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
 1770         ifp->if_opackets += le32toh(sp->tx_good);
 1771         ifp->if_collisions += le32toh(sp->tx_total_collisions);
 1772         if (sp->rx_good) {
 1773                 ifp->if_ipackets += le32toh(sp->rx_good);
 1774                 sc->rx_idle_secs = 0;
 1775         } else {
 1776                 /*
 1777                  * Receiver's been idle for another second.
 1778                  */
 1779                 sc->rx_idle_secs++;
 1780         }
 1781         ifp->if_ierrors +=
 1782             le32toh(sp->rx_crc_errors) +
 1783             le32toh(sp->rx_alignment_errors) +
 1784             le32toh(sp->rx_rnr_errors) +
 1785             le32toh(sp->rx_overrun_errors);
 1786         /*
 1787          * If any transmit underruns occured, bump up the transmit
 1788          * threshold by another 512 bytes (64 * 8).
 1789          */
 1790         if (sp->tx_underruns) {
 1791                 ifp->if_oerrors += le32toh(sp->tx_underruns);
 1792                 if (tx_threshold < 192)
 1793                         tx_threshold += 64;
 1794         }
 1795 
 1796         /*
 1797          * Release any xmit buffers that have completed DMA. This isn't
 1798          * strictly necessary to do here, but it's advantagous for mbufs
 1799          * with external storage to be released in a timely manner rather
 1800          * than being defered for a potentially long time. This limits
 1801          * the delay to a maximum of one second.
 1802          */ 
 1803         fxp_txeof(sc);
 1804 
 1805         /*
 1806          * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
 1807          * then assume the receiver has locked up and attempt to clear
 1808          * the condition by reprogramming the multicast filter. This is
 1809          * a work-around for a bug in the 82557 where the receiver locks
 1810          * up if it gets certain types of garbage in the syncronization
 1811          * bits prior to the packet header. This bug is supposed to only
 1812          * occur in 10Mbps mode, but has been seen to occur in 100Mbps
 1813          * mode as well (perhaps due to a 10/100 speed transition).
 1814          */
 1815         if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
 1816                 sc->rx_idle_secs = 0;
 1817                 fxp_mc_setup(sc);
 1818         }
 1819         /*
 1820          * If there is no pending command, start another stats
 1821          * dump. Otherwise punt for now.
 1822          */
 1823         if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
 1824                 /*
 1825                  * Start another stats dump.
 1826                  */
 1827                 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
 1828                     BUS_DMASYNC_PREREAD);
 1829                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
 1830         } else {
 1831                 /*
 1832                  * A previous command is still waiting to be accepted.
 1833                  * Just zero our copy of the stats and wait for the
 1834                  * next timer event to update them.
 1835                  */
 1836                 sp->tx_good = 0;
 1837                 sp->tx_underruns = 0;
 1838                 sp->tx_total_collisions = 0;
 1839 
 1840                 sp->rx_good = 0;
 1841                 sp->rx_crc_errors = 0;
 1842                 sp->rx_alignment_errors = 0;
 1843                 sp->rx_rnr_errors = 0;
 1844                 sp->rx_overrun_errors = 0;
 1845         }
 1846         if (sc->miibus != NULL)
 1847                 mii_tick(device_get_softc(sc->miibus));
 1848 
 1849         /*
 1850          * Schedule another timeout one second from now.
 1851          */
 1852         callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
 1853         FXP_UNLOCK(sc);
 1854         splx(s);
 1855 }
 1856 
 1857 /*
 1858  * Stop the interface. Cancels the statistics updater and resets
 1859  * the interface.
 1860  */
 1861 static void
 1862 fxp_stop(struct fxp_softc *sc)
 1863 {
 1864         struct ifnet *ifp = &sc->sc_if;
 1865         struct fxp_tx *txp;
 1866         int i;
 1867 
 1868         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 1869         ifp->if_timer = 0;
 1870 
 1871 #ifdef DEVICE_POLLING
 1872         ether_poll_deregister(ifp);
 1873 #endif
 1874         /*
 1875          * Cancel stats updater.
 1876          */
 1877         callout_stop(&sc->stat_ch);
 1878 
 1879         /*
 1880          * Issue software reset, which also unloads the microcode.
 1881          */
 1882         sc->flags &= ~FXP_FLAG_UCODE;
 1883         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
 1884         DELAY(50);
 1885 
 1886         /*
 1887          * Release any xmit buffers.
 1888          */
 1889         txp = sc->fxp_desc.tx_list;
 1890         if (txp != NULL) {
 1891                 for (i = 0; i < FXP_NTXCB; i++) {
 1892                         if (txp[i].tx_mbuf != NULL) {
 1893                                 bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
 1894                                     BUS_DMASYNC_POSTWRITE);
 1895                                 bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
 1896                                 m_freem(txp[i].tx_mbuf);
 1897                                 txp[i].tx_mbuf = NULL;
 1898                                 /* clear this to reset csum offload bits */
 1899                                 txp[i].tx_cb->tbd[0].tb_addr = 0;
 1900                         }
 1901                 }
 1902         }
 1903         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 1904         sc->tx_queued = 0;
 1905 }
 1906 
 1907 /*
 1908  * Watchdog/transmission transmit timeout handler. Called when a
 1909  * transmission is started on the interface, but no interrupt is
 1910  * received before the timeout. This usually indicates that the
 1911  * card has wedged for some reason.
 1912  */
 1913 static void
 1914 fxp_watchdog(struct ifnet *ifp)
 1915 {
 1916         struct fxp_softc *sc = ifp->if_softc;
 1917 
 1918         FXP_LOCK(sc);
 1919         device_printf(sc->dev, "device timeout\n");
 1920         ifp->if_oerrors++;
 1921 
 1922         fxp_init_body(sc);
 1923         FXP_UNLOCK(sc);
 1924 }
 1925 
 1926 /*
 1927  * Acquire locks and then call the real initialization function.  This
 1928  * is necessary because ether_ioctl() calls if_init() and this would
 1929  * result in mutex recursion if the mutex was held.
 1930  */
 1931 static void
 1932 fxp_init(void *xsc)
 1933 {
 1934         struct fxp_softc *sc = xsc;
 1935 
 1936         FXP_LOCK(sc);
 1937         fxp_init_body(sc);
 1938         FXP_UNLOCK(sc);
 1939 }
 1940 
 1941 /*
 1942  * Perform device initialization. This routine must be called with the
 1943  * softc lock held.
 1944  */
 1945 static void
 1946 fxp_init_body(struct fxp_softc *sc)
 1947 {
 1948         struct ifnet *ifp = &sc->sc_if;
 1949         struct fxp_cb_config *cbp;
 1950         struct fxp_cb_ias *cb_ias;
 1951         struct fxp_cb_tx *tcbp;
 1952         struct fxp_tx *txp;
 1953         struct fxp_cb_mcs *mcsp;
 1954         int i, prm, s;
 1955 
 1956         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1957         s = splimp();
 1958         /*
 1959          * Cancel any pending I/O
 1960          */
 1961         fxp_stop(sc);
 1962 
 1963         prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
 1964 
 1965         /*
 1966          * Initialize base of CBL and RFA memory. Loading with zero
 1967          * sets it up for regular linear addressing.
 1968          */
 1969         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
 1970         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
 1971 
 1972         fxp_scb_wait(sc);
 1973         fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
 1974 
 1975         /*
 1976          * Initialize base of dump-stats buffer.
 1977          */
 1978         fxp_scb_wait(sc);
 1979         bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
 1980         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
 1981         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
 1982 
 1983         /*
 1984          * Attempt to load microcode if requested.
 1985          */
 1986         if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
 1987                 fxp_load_ucode(sc);
 1988 
 1989         /*
 1990          * Initialize the multicast address list.
 1991          */
 1992         if (fxp_mc_addrs(sc)) {
 1993                 mcsp = sc->mcsp;
 1994                 mcsp->cb_status = 0;
 1995                 mcsp->cb_command =
 1996                     htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
 1997                 mcsp->link_addr = 0xffffffff;
 1998                 /*
 1999                  * Start the multicast setup command.
 2000                  */
 2001                 fxp_scb_wait(sc);
 2002                 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
 2003                 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
 2004                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2005                 /* ...and wait for it to complete. */
 2006                 fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
 2007                 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
 2008                     BUS_DMASYNC_POSTWRITE);
 2009         }
 2010 
 2011         /*
 2012          * We temporarily use memory that contains the TxCB list to
 2013          * construct the config CB. The TxCB list memory is rebuilt
 2014          * later.
 2015          */
 2016         cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
 2017 
 2018         /*
 2019          * This bcopy is kind of disgusting, but there are a bunch of must be
 2020          * zero and must be one bits in this structure and this is the easiest
 2021          * way to initialize them all to proper values.
 2022          */
 2023         bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
 2024 
 2025         cbp->cb_status =        0;
 2026         cbp->cb_command =       htole16(FXP_CB_COMMAND_CONFIG |
 2027             FXP_CB_COMMAND_EL);
 2028         cbp->link_addr =        0xffffffff;     /* (no) next command */
 2029         cbp->byte_count =       sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
 2030         cbp->rx_fifo_limit =    8;      /* rx fifo threshold (32 bytes) */
 2031         cbp->tx_fifo_limit =    0;      /* tx fifo threshold (0 bytes) */
 2032         cbp->adaptive_ifs =     0;      /* (no) adaptive interframe spacing */
 2033         cbp->mwi_enable =       sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
 2034         cbp->type_enable =      0;      /* actually reserved */
 2035         cbp->read_align_en =    sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
 2036         cbp->end_wr_on_cl =     sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
 2037         cbp->rx_dma_bytecount = 0;      /* (no) rx DMA max */
 2038         cbp->tx_dma_bytecount = 0;      /* (no) tx DMA max */
 2039         cbp->dma_mbce =         0;      /* (disable) dma max counters */
 2040         cbp->late_scb =         0;      /* (don't) defer SCB update */
 2041         cbp->direct_dma_dis =   1;      /* disable direct rcv dma mode */
 2042         cbp->tno_int_or_tco_en =0;      /* (disable) tx not okay interrupt */
 2043         cbp->ci_int =           1;      /* interrupt on CU idle */
 2044         cbp->ext_txcb_dis =     sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
 2045         cbp->ext_stats_dis =    1;      /* disable extended counters */
 2046         cbp->keep_overrun_rx =  0;      /* don't pass overrun frames to host */
 2047         cbp->save_bf =          sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
 2048         cbp->disc_short_rx =    !prm;   /* discard short packets */
 2049         cbp->underrun_retry =   1;      /* retry mode (once) on DMA underrun */
 2050         cbp->two_frames =       0;      /* do not limit FIFO to 2 frames */
 2051         cbp->dyn_tbd =          0;      /* (no) dynamic TBD mode */
 2052         cbp->ext_rfa =          sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2053         cbp->mediatype =        sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
 2054         cbp->csma_dis =         0;      /* (don't) disable link */
 2055         cbp->tcp_udp_cksum =    0;      /* (don't) enable checksum */
 2056         cbp->vlan_tco =         0;      /* (don't) enable vlan wakeup */
 2057         cbp->link_wake_en =     0;      /* (don't) assert PME# on link change */
 2058         cbp->arp_wake_en =      0;      /* (don't) assert PME# on arp */
 2059         cbp->mc_wake_en =       0;      /* (don't) enable PME# on mcmatch */
 2060         cbp->nsai =             1;      /* (don't) disable source addr insert */
 2061         cbp->preamble_length =  2;      /* (7 byte) preamble */
 2062         cbp->loopback =         0;      /* (don't) loopback */
 2063         cbp->linear_priority =  0;      /* (normal CSMA/CD operation) */
 2064         cbp->linear_pri_mode =  0;      /* (wait after xmit only) */
 2065         cbp->interfrm_spacing = 6;      /* (96 bits of) interframe spacing */
 2066         cbp->promiscuous =      prm;    /* promiscuous mode */
 2067         cbp->bcast_disable =    0;      /* (don't) disable broadcasts */
 2068         cbp->wait_after_win =   0;      /* (don't) enable modified backoff alg*/
 2069         cbp->ignore_ul =        0;      /* consider U/L bit in IA matching */
 2070         cbp->crc16_en =         0;      /* (don't) enable crc-16 algorithm */
 2071         cbp->crscdt =           sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
 2072 
 2073         cbp->stripping =        !prm;   /* truncate rx packet to byte count */
 2074         cbp->padding =          1;      /* (do) pad short tx packets */
 2075         cbp->rcv_crc_xfer =     0;      /* (don't) xfer CRC to host */
 2076         cbp->long_rx_en =       sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
 2077         cbp->ia_wake_en =       0;      /* (don't) wake up on address match */
 2078         cbp->magic_pkt_dis =    0;      /* (don't) disable magic packet */
 2079                                         /* must set wake_en in PMCSR also */
 2080         cbp->force_fdx =        0;      /* (don't) force full duplex */
 2081         cbp->fdx_pin_en =       1;      /* (enable) FDX# pin */
 2082         cbp->multi_ia =         0;      /* (don't) accept multiple IAs */
 2083         cbp->mc_all =           sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
 2084         cbp->gamla_rx =         sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2085 
 2086         if (sc->tunable_noflow || sc->revision == FXP_REV_82557) {
 2087                 /*
 2088                  * The 82557 has no hardware flow control, the values
 2089                  * below are the defaults for the chip.
 2090                  */
 2091                 cbp->fc_delay_lsb =     0;
 2092                 cbp->fc_delay_msb =     0x40;
 2093                 cbp->pri_fc_thresh =    3;
 2094                 cbp->tx_fc_dis =        0;
 2095                 cbp->rx_fc_restop =     0;
 2096                 cbp->rx_fc_restart =    0;
 2097                 cbp->fc_filter =        0;
 2098                 cbp->pri_fc_loc =       1;
 2099         } else {
 2100                 cbp->fc_delay_lsb =     0x1f;
 2101                 cbp->fc_delay_msb =     0x01;
 2102                 cbp->pri_fc_thresh =    3;
 2103                 cbp->tx_fc_dis =        0;      /* enable transmit FC */
 2104                 cbp->rx_fc_restop =     1;      /* enable FC restop frames */
 2105                 cbp->rx_fc_restart =    1;      /* enable FC restart frames */
 2106                 cbp->fc_filter =        !prm;   /* drop FC frames to host */
 2107                 cbp->pri_fc_loc =       1;      /* FC pri location (byte31) */
 2108         }
 2109 
 2110         /*
 2111          * Start the config command/DMA.
 2112          */
 2113         fxp_scb_wait(sc);
 2114         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 2115         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2116         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2117         /* ...and wait for it to complete. */
 2118         fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
 2119         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
 2120 
 2121         /*
 2122          * Now initialize the station address. Temporarily use the TxCB
 2123          * memory area like we did above for the config CB.
 2124          */
 2125         cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
 2126         cb_ias->cb_status = 0;
 2127         cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
 2128         cb_ias->link_addr = 0xffffffff;
 2129         bcopy(sc->arpcom.ac_enaddr, cb_ias->macaddr,
 2130             sizeof(sc->arpcom.ac_enaddr));
 2131 
 2132         /*
 2133          * Start the IAS (Individual Address Setup) command/DMA.
 2134          */
 2135         fxp_scb_wait(sc);
 2136         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 2137         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2138         /* ...and wait for it to complete. */
 2139         fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
 2140         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
 2141 
 2142         /*
 2143          * Initialize transmit control block (TxCB) list.
 2144          */
 2145         txp = sc->fxp_desc.tx_list;
 2146         tcbp = sc->fxp_desc.cbl_list;
 2147         bzero(tcbp, FXP_TXCB_SZ);
 2148         for (i = 0; i < FXP_NTXCB; i++) {
 2149                 txp[i].tx_mbuf = NULL;
 2150                 tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
 2151                 tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
 2152                 tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
 2153                     (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
 2154                 if (sc->flags & FXP_FLAG_EXT_TXCB)
 2155                         tcbp[i].tbd_array_addr =
 2156                             htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
 2157                 else
 2158                         tcbp[i].tbd_array_addr =
 2159                             htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
 2160                 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
 2161         }
 2162         /*
 2163          * Set the suspend flag on the first TxCB and start the control
 2164          * unit. It will execute the NOP and then suspend.
 2165          */
 2166         tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
 2167         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 2168         sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
 2169         sc->tx_queued = 1;
 2170 
 2171         fxp_scb_wait(sc);
 2172         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2173 
 2174         /*
 2175          * Initialize receiver buffer area - RFA.
 2176          */
 2177         fxp_scb_wait(sc);
 2178         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
 2179         fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
 2180 
 2181         /*
 2182          * Set current media.
 2183          */
 2184         if (sc->miibus != NULL)
 2185                 mii_mediachg(device_get_softc(sc->miibus));
 2186 
 2187         ifp->if_flags |= IFF_RUNNING;
 2188         ifp->if_flags &= ~IFF_OACTIVE;
 2189 
 2190         /*
 2191          * Enable interrupts.
 2192          */
 2193 #ifdef DEVICE_POLLING
 2194         /*
 2195          * ... but only do that if we are not polling. And because (presumably)
 2196          * the default is interrupts on, we need to disable them explicitly!
 2197          */
 2198         if ( ifp->if_flags & IFF_POLLING )
 2199                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 2200         else
 2201 #endif /* DEVICE_POLLING */
 2202         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
 2203 
 2204         /*
 2205          * Start stats updater.
 2206          */
 2207         callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
 2208         splx(s);
 2209 }
 2210 
 2211 static int
 2212 fxp_serial_ifmedia_upd(struct ifnet *ifp)
 2213 {
 2214 
 2215         return (0);
 2216 }
 2217 
 2218 static void
 2219 fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2220 {
 2221 
 2222         ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
 2223 }
 2224 
 2225 /*
 2226  * Change media according to request.
 2227  */
 2228 static int
 2229 fxp_ifmedia_upd(struct ifnet *ifp)
 2230 {
 2231         struct fxp_softc *sc = ifp->if_softc;
 2232         struct mii_data *mii;
 2233 
 2234         mii = device_get_softc(sc->miibus);
 2235         mii_mediachg(mii);
 2236         return (0);
 2237 }
 2238 
 2239 /*
 2240  * Notify the world which media we're using.
 2241  */
 2242 static void
 2243 fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2244 {
 2245         struct fxp_softc *sc = ifp->if_softc;
 2246         struct mii_data *mii;
 2247 
 2248         mii = device_get_softc(sc->miibus);
 2249         mii_pollstat(mii);
 2250         ifmr->ifm_active = mii->mii_media_active;
 2251         ifmr->ifm_status = mii->mii_media_status;
 2252 
 2253         if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG)
 2254                 sc->cu_resume_bug = 1;
 2255         else
 2256                 sc->cu_resume_bug = 0;
 2257 }
 2258 
 2259 /*
 2260  * Add a buffer to the end of the RFA buffer list.
 2261  * Return 0 if successful, 1 for failure. A failure results in
 2262  * adding the 'oldm' (if non-NULL) on to the end of the list -
 2263  * tossing out its old contents and recycling it.
 2264  * The RFA struct is stuck at the beginning of mbuf cluster and the
 2265  * data pointer is fixed up to point just past it.
 2266  */
 2267 static int
 2268 fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
 2269 {
 2270         struct mbuf *m;
 2271         struct fxp_rfa *rfa, *p_rfa;
 2272         struct fxp_rx *p_rx;
 2273         bus_dmamap_t tmp_map;
 2274         int error;
 2275 
 2276         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 2277         if (m == NULL)
 2278                 return (ENOBUFS);
 2279 
 2280         /*
 2281          * Move the data pointer up so that the incoming data packet
 2282          * will be 32-bit aligned.
 2283          */
 2284         m->m_data += RFA_ALIGNMENT_FUDGE;
 2285 
 2286         /*
 2287          * Get a pointer to the base of the mbuf cluster and move
 2288          * data start past it.
 2289          */
 2290         rfa = mtod(m, struct fxp_rfa *);
 2291         m->m_data += sc->rfa_size;
 2292         rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
 2293 
 2294         rfa->rfa_status = 0;
 2295         rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
 2296         rfa->actual_size = 0;
 2297 
 2298         /*
 2299          * Initialize the rest of the RFA.  Note that since the RFA
 2300          * is misaligned, we cannot store values directly.  We're thus
 2301          * using the le32enc() function which handles endianness and
 2302          * is also alignment-safe.
 2303          */
 2304         le32enc(&rfa->link_addr, 0xffffffff);
 2305         le32enc(&rfa->rbd_addr, 0xffffffff);
 2306 
 2307         /* Map the RFA into DMA memory. */
 2308         error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
 2309             MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
 2310             &rxp->rx_addr, 0);
 2311         if (error) {
 2312                 m_freem(m);
 2313                 return (error);
 2314         }
 2315 
 2316         bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
 2317         tmp_map = sc->spare_map;
 2318         sc->spare_map = rxp->rx_map;
 2319         rxp->rx_map = tmp_map;
 2320         rxp->rx_mbuf = m;
 2321 
 2322         bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
 2323             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2324 
 2325         /*
 2326          * If there are other buffers already on the list, attach this
 2327          * one to the end by fixing up the tail to point to this one.
 2328          */
 2329         if (sc->fxp_desc.rx_head != NULL) {
 2330                 p_rx = sc->fxp_desc.rx_tail;
 2331                 p_rfa = (struct fxp_rfa *)
 2332                     (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
 2333                 p_rx->rx_next = rxp;
 2334                 le32enc(&p_rfa->link_addr, rxp->rx_addr);
 2335                 p_rfa->rfa_control = 0;
 2336                 bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
 2337                     BUS_DMASYNC_PREWRITE);
 2338         } else {
 2339                 rxp->rx_next = NULL;
 2340                 sc->fxp_desc.rx_head = rxp;
 2341         }
 2342         sc->fxp_desc.rx_tail = rxp;
 2343         return (0);
 2344 }
 2345 
 2346 static volatile int
 2347 fxp_miibus_readreg(device_t dev, int phy, int reg)
 2348 {
 2349         struct fxp_softc *sc = device_get_softc(dev);
 2350         int count = 10000;
 2351         int value;
 2352 
 2353         CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
 2354             (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
 2355 
 2356         while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
 2357             && count--)
 2358                 DELAY(10);
 2359 
 2360         if (count <= 0)
 2361                 device_printf(dev, "fxp_miibus_readreg: timed out\n");
 2362 
 2363         return (value & 0xffff);
 2364 }
 2365 
 2366 static void
 2367 fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
 2368 {
 2369         struct fxp_softc *sc = device_get_softc(dev);
 2370         int count = 10000;
 2371 
 2372         CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
 2373             (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
 2374             (value & 0xffff));
 2375 
 2376         while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
 2377             count--)
 2378                 DELAY(10);
 2379 
 2380         if (count <= 0)
 2381                 device_printf(dev, "fxp_miibus_writereg: timed out\n");
 2382 }
 2383 
 2384 static int
 2385 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 2386 {
 2387         struct fxp_softc *sc = ifp->if_softc;
 2388         struct ifreq *ifr = (struct ifreq *)data;
 2389         struct mii_data *mii;
 2390         int flag, mask, s, error = 0;
 2391 
 2392         /*
 2393          * Detaching causes us to call ioctl with the mutex owned.  Preclude
 2394          * that by saying we're busy if the lock is already held.
 2395          */
 2396         if (FXP_LOCKED(sc))
 2397                 return (EBUSY);
 2398 
 2399         FXP_LOCK(sc);
 2400         s = splimp();
 2401 
 2402         switch (command) {
 2403         case SIOCSIFFLAGS:
 2404                 if (ifp->if_flags & IFF_ALLMULTI)
 2405                         sc->flags |= FXP_FLAG_ALL_MCAST;
 2406                 else
 2407                         sc->flags &= ~FXP_FLAG_ALL_MCAST;
 2408 
 2409                 /*
 2410                  * If interface is marked up and not running, then start it.
 2411                  * If it is marked down and running, stop it.
 2412                  * XXX If it's up then re-initialize it. This is so flags
 2413                  * such as IFF_PROMISC are handled.
 2414                  */
 2415                 if (ifp->if_flags & IFF_UP) {
 2416                         fxp_init_body(sc);
 2417                 } else {
 2418                         if (ifp->if_flags & IFF_RUNNING)
 2419                                 fxp_stop(sc);
 2420                 }
 2421                 break;
 2422 
 2423         case SIOCADDMULTI:
 2424         case SIOCDELMULTI:
 2425                 if (ifp->if_flags & IFF_ALLMULTI)
 2426                         sc->flags |= FXP_FLAG_ALL_MCAST;
 2427                 else
 2428                         sc->flags &= ~FXP_FLAG_ALL_MCAST;
 2429                 /*
 2430                  * Multicast list has changed; set the hardware filter
 2431                  * accordingly.
 2432                  */
 2433                 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
 2434                         fxp_mc_setup(sc);
 2435                 /*
 2436                  * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
 2437                  * again rather than else {}.
 2438                  */
 2439                 if (sc->flags & FXP_FLAG_ALL_MCAST)
 2440                         fxp_init_body(sc);
 2441                 error = 0;
 2442                 break;
 2443 
 2444         case SIOCSIFMEDIA:
 2445         case SIOCGIFMEDIA:
 2446                 if (sc->miibus != NULL) {
 2447                         mii = device_get_softc(sc->miibus);
 2448                         error = ifmedia_ioctl(ifp, ifr,
 2449                             &mii->mii_media, command);
 2450                 } else {
 2451                         error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
 2452                 }
 2453                 break;
 2454 
 2455         case SIOCSIFCAP:
 2456                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
 2457                 if (mask & IFCAP_POLLING)
 2458                         ifp->if_capenable ^= IFCAP_POLLING;
 2459                 if (mask & IFCAP_VLAN_MTU) {
 2460                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
 2461                         if (sc->revision != FXP_REV_82557)
 2462                                 flag = FXP_FLAG_LONG_PKT_EN;
 2463                         else /* a hack to get long frames on the old chip */
 2464                                 flag = FXP_FLAG_SAVE_BAD;
 2465                         sc->flags ^= flag;
 2466                         if (ifp->if_flags & IFF_UP)
 2467                                 fxp_init_body(sc);
 2468                 }
 2469                 break;
 2470 
 2471         default:
 2472                 /* 
 2473                  * ether_ioctl() will eventually call fxp_start() which
 2474                  * will result in mutex recursion so drop it first.
 2475                  */
 2476                 FXP_UNLOCK(sc);
 2477                 error = ether_ioctl(ifp, command, data);
 2478         }
 2479         if (FXP_LOCKED(sc))
 2480                 FXP_UNLOCK(sc);
 2481         splx(s);
 2482         return (error);
 2483 }
 2484 
 2485 /*
 2486  * Fill in the multicast address list and return number of entries.
 2487  */
 2488 static int
 2489 fxp_mc_addrs(struct fxp_softc *sc)
 2490 {
 2491         struct fxp_cb_mcs *mcsp = sc->mcsp;
 2492         struct ifnet *ifp = &sc->sc_if;
 2493         struct ifmultiaddr *ifma;
 2494         int nmcasts;
 2495 
 2496         nmcasts = 0;
 2497         if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
 2498                 IF_ADDR_LOCK(ifp);
 2499 #if __FreeBSD_version < 500000
 2500                 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2501 #else
 2502                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2503 #endif
 2504                         if (ifma->ifma_addr->sa_family != AF_LINK)
 2505                                 continue;
 2506                         if (nmcasts >= MAXMCADDR) {
 2507                                 sc->flags |= FXP_FLAG_ALL_MCAST;
 2508                                 nmcasts = 0;
 2509                                 break;
 2510                         }
 2511                         bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
 2512                             &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
 2513                         nmcasts++;
 2514                 }
 2515                 IF_ADDR_UNLOCK(ifp);
 2516         }
 2517         mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
 2518         return (nmcasts);
 2519 }
 2520 
 2521 /*
 2522  * Program the multicast filter.
 2523  *
 2524  * We have an artificial restriction that the multicast setup command
 2525  * must be the first command in the chain, so we take steps to ensure
 2526  * this. By requiring this, it allows us to keep up the performance of
 2527  * the pre-initialized command ring (esp. link pointers) by not actually
 2528  * inserting the mcsetup command in the ring - i.e. its link pointer
 2529  * points to the TxCB ring, but the mcsetup descriptor itself is not part
 2530  * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
 2531  * lead into the regular TxCB ring when it completes.
 2532  *
 2533  * This function must be called at splimp.
 2534  */
 2535 static void
 2536 fxp_mc_setup(struct fxp_softc *sc)
 2537 {
 2538         struct fxp_cb_mcs *mcsp = sc->mcsp;
 2539         struct ifnet *ifp = &sc->sc_if;
 2540         struct fxp_tx *txp;
 2541         int count;
 2542 
 2543         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2544         /*
 2545          * If there are queued commands, we must wait until they are all
 2546          * completed. If we are already waiting, then add a NOP command
 2547          * with interrupt option so that we're notified when all commands
 2548          * have been completed - fxp_start() ensures that no additional
 2549          * TX commands will be added when need_mcsetup is true.
 2550          */
 2551         if (sc->tx_queued) {
 2552                 /*
 2553                  * need_mcsetup will be true if we are already waiting for the
 2554                  * NOP command to be completed (see below). In this case, bail.
 2555                  */
 2556                 if (sc->need_mcsetup)
 2557                         return;
 2558                 sc->need_mcsetup = 1;
 2559 
 2560                 /*
 2561                  * Add a NOP command with interrupt so that we are notified
 2562                  * when all TX commands have been processed.
 2563                  */
 2564                 txp = sc->fxp_desc.tx_last->tx_next;
 2565                 txp->tx_mbuf = NULL;
 2566                 txp->tx_cb->cb_status = 0;
 2567                 txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP |
 2568                     FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
 2569                 /*
 2570                  * Advance the end of list forward.
 2571                  */
 2572                 sc->fxp_desc.tx_last->tx_cb->cb_command &=
 2573                     htole16(~FXP_CB_COMMAND_S);
 2574                 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 2575                 sc->fxp_desc.tx_last = txp;
 2576                 sc->tx_queued++;
 2577                 /*
 2578                  * Issue a resume in case the CU has just suspended.
 2579                  */
 2580                 fxp_scb_wait(sc);
 2581                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
 2582                 /*
 2583                  * Set a 5 second timer just in case we don't hear from the
 2584                  * card again.
 2585                  */
 2586                 ifp->if_timer = 5;
 2587 
 2588                 return;
 2589         }
 2590         sc->need_mcsetup = 0;
 2591 
 2592         /*
 2593          * Initialize multicast setup descriptor.
 2594          */
 2595         mcsp->cb_status = 0;
 2596         mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS |
 2597             FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
 2598         mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr);
 2599         txp = &sc->fxp_desc.mcs_tx;
 2600         txp->tx_mbuf = NULL;
 2601         txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
 2602         txp->tx_next = sc->fxp_desc.tx_list;
 2603         (void) fxp_mc_addrs(sc);
 2604         sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
 2605         sc->tx_queued = 1;
 2606 
 2607         /*
 2608          * Wait until command unit is not active. This should never
 2609          * be the case when nothing is queued, but make sure anyway.
 2610          */
 2611         count = 100;
 2612         while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
 2613             FXP_SCB_CUS_ACTIVE && --count)
 2614                 DELAY(10);
 2615         if (count == 0) {
 2616                 device_printf(sc->dev, "command queue timeout\n");
 2617                 return;
 2618         }
 2619 
 2620         /*
 2621          * Start the multicast setup command.
 2622          */
 2623         fxp_scb_wait(sc);
 2624         bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
 2625         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
 2626         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2627 
 2628         ifp->if_timer = 2;
 2629         return;
 2630 }
 2631 
 2632 static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
 2633 static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
 2634 static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
 2635 static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
 2636 static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
 2637 static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
 2638 
 2639 #define UCODE(x)        x, sizeof(x)/sizeof(uint32_t)
 2640 
 2641 struct ucode {
 2642         uint32_t        revision;
 2643         uint32_t        *ucode;
 2644         int             length;
 2645         u_short         int_delay_offset;
 2646         u_short         bundle_max_offset;
 2647 } ucode_table[] = {
 2648         { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
 2649         { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
 2650         { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
 2651             D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
 2652         { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
 2653             D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
 2654         { FXP_REV_82550, UCODE(fxp_ucode_d102),
 2655             D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
 2656         { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
 2657             D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
 2658         { 0, NULL, 0, 0, 0 }
 2659 };
 2660 
 2661 static void
 2662 fxp_load_ucode(struct fxp_softc *sc)
 2663 {
 2664         struct ucode *uc;
 2665         struct fxp_cb_ucode *cbp;
 2666         int i;
 2667 
 2668         for (uc = ucode_table; uc->ucode != NULL; uc++)
 2669                 if (sc->revision == uc->revision)
 2670                         break;
 2671         if (uc->ucode == NULL)
 2672                 return;
 2673         cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
 2674         cbp->cb_status = 0;
 2675         cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
 2676         cbp->link_addr = 0xffffffff;            /* (no) next command */
 2677         for (i = 0; i < uc->length; i++)
 2678                 cbp->ucode[i] = htole32(uc->ucode[i]);
 2679         if (uc->int_delay_offset)
 2680                 *(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
 2681                     htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
 2682         if (uc->bundle_max_offset)
 2683                 *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
 2684                     htole16(sc->tunable_bundle_max);
 2685         /*
 2686          * Download the ucode to the chip.
 2687          */
 2688         fxp_scb_wait(sc);
 2689         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
 2690         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2691         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2692         /* ...and wait for it to complete. */
 2693         fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
 2694         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
 2695         device_printf(sc->dev,
 2696             "Microcode loaded, int_delay: %d usec  bundle_max: %d\n",
 2697             sc->tunable_int_delay, 
 2698             uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
 2699         sc->flags |= FXP_FLAG_UCODE;
 2700 }
 2701 
 2702 static int
 2703 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 2704 {
 2705         int error, value;
 2706 
 2707         value = *(int *)arg1;
 2708         error = sysctl_handle_int(oidp, &value, 0, req);
 2709         if (error || !req->newptr)
 2710                 return (error);
 2711         if (value < low || value > high)
 2712                 return (EINVAL);
 2713         *(int *)arg1 = value;
 2714         return (0);
 2715 }
 2716 
 2717 /*
 2718  * Interrupt delay is expressed in microseconds, a multiplier is used
 2719  * to convert this to the appropriate clock ticks before using. 
 2720  */
 2721 static int
 2722 sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
 2723 {
 2724         return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
 2725 }
 2726 
 2727 static int
 2728 sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
 2729 {
 2730         return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
 2731 }

Cache object: 8f22239faa9c570960548f1ee41eeb3a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.