The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/fxp/if_fxp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1995, David Greenman
    3  * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice unmodified, this list of conditions, and the following
   11  *    disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.4/sys/dev/fxp/if_fxp.c 252334 2013-06-28 05:21:59Z delphij $");
   32 
   33 /*
   34  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
   35  */
   36 
   37 #ifdef HAVE_KERNEL_OPTION_HEADERS
   38 #include "opt_device_polling.h"
   39 #endif
   40 
   41 #include <sys/param.h>
   42 #include <sys/systm.h>
   43 #include <sys/bus.h>
   44 #include <sys/endian.h>
   45 #include <sys/kernel.h>
   46 #include <sys/mbuf.h>
   47 #include <sys/lock.h>
   48 #include <sys/module.h>
   49 #include <sys/mutex.h>
   50 #include <sys/rman.h>
   51 #include <sys/socket.h>
   52 #include <sys/sockio.h>
   53 #include <sys/sysctl.h>
   54 
   55 #include <net/bpf.h>
   56 #include <net/ethernet.h>
   57 #include <net/if.h>
   58 #include <net/if_arp.h>
   59 #include <net/if_dl.h>
   60 #include <net/if_media.h>
   61 #include <net/if_types.h>
   62 #include <net/if_vlan_var.h>
   63 
   64 #include <netinet/in.h>
   65 #include <netinet/in_systm.h>
   66 #include <netinet/ip.h>
   67 #include <netinet/tcp.h>
   68 #include <netinet/udp.h>
   69 
   70 #include <machine/bus.h>
   71 #include <machine/in_cksum.h>
   72 #include <machine/resource.h>
   73 
   74 #include <dev/pci/pcivar.h>
   75 #include <dev/pci/pcireg.h>             /* for PCIM_CMD_xxx */
   76 
   77 #include <dev/mii/mii.h>
   78 #include <dev/mii/miivar.h>
   79 
   80 #include <dev/fxp/if_fxpreg.h>
   81 #include <dev/fxp/if_fxpvar.h>
   82 #include <dev/fxp/rcvbundl.h>
   83 
   84 MODULE_DEPEND(fxp, pci, 1, 1, 1);
   85 MODULE_DEPEND(fxp, ether, 1, 1, 1);
   86 MODULE_DEPEND(fxp, miibus, 1, 1, 1);
   87 #include "miibus_if.h"
   88 
   89 /*
   90  * NOTE!  On !x86 we typically have an alignment constraint.  The
   91  * card DMAs the packet immediately following the RFA.  However,
   92  * the first thing in the packet is a 14-byte Ethernet header.
   93  * This means that the packet is misaligned.  To compensate,
   94  * we actually offset the RFA 2 bytes into the cluster.  This
   95  * alignes the packet after the Ethernet header at a 32-bit
   96  * boundary.  HOWEVER!  This means that the RFA is misaligned!
   97  */
   98 #define RFA_ALIGNMENT_FUDGE     2
   99 
  100 /*
  101  * Set initial transmit threshold at 64 (512 bytes). This is
  102  * increased by 64 (512 bytes) at a time, to maximum of 192
  103  * (1536 bytes), if an underrun occurs.
  104  */
  105 static int tx_threshold = 64;
  106 
  107 /*
  108  * The configuration byte map has several undefined fields which
  109  * must be one or must be zero.  Set up a template for these bits.
  110  * The actual configuration is performed in fxp_init_body.
  111  *
  112  * See struct fxp_cb_config for the bit definitions.
  113  */
  114 static const u_char fxp_cb_config_template[] = {
  115         0x0, 0x0,               /* cb_status */
  116         0x0, 0x0,               /* cb_command */
  117         0x0, 0x0, 0x0, 0x0,     /* link_addr */
  118         0x0,    /*  0 */
  119         0x0,    /*  1 */
  120         0x0,    /*  2 */
  121         0x0,    /*  3 */
  122         0x0,    /*  4 */
  123         0x0,    /*  5 */
  124         0x32,   /*  6 */
  125         0x0,    /*  7 */
  126         0x0,    /*  8 */
  127         0x0,    /*  9 */
  128         0x6,    /* 10 */
  129         0x0,    /* 11 */
  130         0x0,    /* 12 */
  131         0x0,    /* 13 */
  132         0xf2,   /* 14 */
  133         0x48,   /* 15 */
  134         0x0,    /* 16 */
  135         0x40,   /* 17 */
  136         0xf0,   /* 18 */
  137         0x0,    /* 19 */
  138         0x3f,   /* 20 */
  139         0x5,    /* 21 */
  140         0x0,    /* 22 */
  141         0x0,    /* 23 */
  142         0x0,    /* 24 */
  143         0x0,    /* 25 */
  144         0x0,    /* 26 */
  145         0x0,    /* 27 */
  146         0x0,    /* 28 */
  147         0x0,    /* 29 */
  148         0x0,    /* 30 */
  149         0x0     /* 31 */
  150 };
  151 
  152 /*
  153  * Claim various Intel PCI device identifiers for this driver.  The
  154  * sub-vendor and sub-device field are extensively used to identify
  155  * particular variants, but we don't currently differentiate between
  156  * them.
  157  */
  158 static const struct fxp_ident fxp_ident_table[] = {
  159     { 0x1029,   -1,     0, "Intel 82559 PCI/CardBus Pro/100" },
  160     { 0x1030,   -1,     0, "Intel 82559 Pro/100 Ethernet" },
  161     { 0x1031,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
  162     { 0x1032,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
  163     { 0x1033,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  164     { 0x1034,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  165     { 0x1035,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  166     { 0x1036,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  167     { 0x1037,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  168     { 0x1038,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  169     { 0x1039,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
  170     { 0x103A,   -1,     4, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
  171     { 0x103B,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
  172     { 0x103C,   -1,     4, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
  173     { 0x103D,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
  174     { 0x103E,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
  175     { 0x1050,   -1,     5, "Intel 82801BA (D865) Pro/100 VE Ethernet" },
  176     { 0x1051,   -1,     5, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
  177     { 0x1059,   -1,     0, "Intel 82551QM Pro/100 M Mobile Connection" },
  178     { 0x1064,   -1,     6, "Intel 82562EZ (ICH6)" },
  179     { 0x1065,   -1,     6, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" },
  180     { 0x1068,   -1,     6, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
  181     { 0x1069,   -1,     6, "Intel 82562EM/EX/GX Pro/100 Ethernet" },
  182     { 0x1091,   -1,     7, "Intel 82562GX Pro/100 Ethernet" },
  183     { 0x1092,   -1,     7, "Intel Pro/100 VE Network Connection" },
  184     { 0x1093,   -1,     7, "Intel Pro/100 VM Network Connection" },
  185     { 0x1094,   -1,     7, "Intel Pro/100 946GZ (ICH7) Network Connection" },
  186     { 0x1209,   -1,     0, "Intel 82559ER Embedded 10/100 Ethernet" },
  187     { 0x1229,   0x01,   0, "Intel 82557 Pro/100 Ethernet" },
  188     { 0x1229,   0x02,   0, "Intel 82557 Pro/100 Ethernet" },
  189     { 0x1229,   0x03,   0, "Intel 82557 Pro/100 Ethernet" },
  190     { 0x1229,   0x04,   0, "Intel 82558 Pro/100 Ethernet" },
  191     { 0x1229,   0x05,   0, "Intel 82558 Pro/100 Ethernet" },
  192     { 0x1229,   0x06,   0, "Intel 82559 Pro/100 Ethernet" },
  193     { 0x1229,   0x07,   0, "Intel 82559 Pro/100 Ethernet" },
  194     { 0x1229,   0x08,   0, "Intel 82559 Pro/100 Ethernet" },
  195     { 0x1229,   0x09,   0, "Intel 82559ER Pro/100 Ethernet" },
  196     { 0x1229,   0x0c,   0, "Intel 82550 Pro/100 Ethernet" },
  197     { 0x1229,   0x0d,   0, "Intel 82550C Pro/100 Ethernet" },
  198     { 0x1229,   0x0e,   0, "Intel 82550 Pro/100 Ethernet" },
  199     { 0x1229,   0x0f,   0, "Intel 82551 Pro/100 Ethernet" },
  200     { 0x1229,   0x10,   0, "Intel 82551 Pro/100 Ethernet" },
  201     { 0x1229,   -1,     0, "Intel 82557/8/9 Pro/100 Ethernet" },
  202     { 0x2449,   -1,     2, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
  203     { 0x27dc,   -1,     7, "Intel 82801GB (ICH7) 10/100 Ethernet" },
  204     { 0,        -1,     0, NULL },
  205 };
  206 
  207 #ifdef FXP_IP_CSUM_WAR
  208 #define FXP_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  209 #else
  210 #define FXP_CSUM_FEATURES    (CSUM_TCP | CSUM_UDP)
  211 #endif
  212 
  213 static int              fxp_probe(device_t dev);
  214 static int              fxp_attach(device_t dev);
  215 static int              fxp_detach(device_t dev);
  216 static int              fxp_shutdown(device_t dev);
  217 static int              fxp_suspend(device_t dev);
  218 static int              fxp_resume(device_t dev);
  219 
  220 static const struct fxp_ident *fxp_find_ident(device_t dev);
  221 static void             fxp_intr(void *xsc);
  222 static void             fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp,
  223                             struct mbuf *m, uint16_t status, int pos);
  224 static int              fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp,
  225                             uint8_t statack, int count);
  226 static void             fxp_init(void *xsc);
  227 static void             fxp_init_body(struct fxp_softc *sc, int);
  228 static void             fxp_tick(void *xsc);
  229 static void             fxp_start(struct ifnet *ifp);
  230 static void             fxp_start_body(struct ifnet *ifp);
  231 static int              fxp_encap(struct fxp_softc *sc, struct mbuf **m_head);
  232 static void             fxp_txeof(struct fxp_softc *sc);
  233 static void             fxp_stop(struct fxp_softc *sc);
  234 static void             fxp_release(struct fxp_softc *sc);
  235 static int              fxp_ioctl(struct ifnet *ifp, u_long command,
  236                             caddr_t data);
  237 static void             fxp_watchdog(struct fxp_softc *sc);
  238 static void             fxp_add_rfabuf(struct fxp_softc *sc,
  239                             struct fxp_rx *rxp);
  240 static void             fxp_discard_rfabuf(struct fxp_softc *sc,
  241                             struct fxp_rx *rxp);
  242 static int              fxp_new_rfabuf(struct fxp_softc *sc,
  243                             struct fxp_rx *rxp);
  244 static int              fxp_mc_addrs(struct fxp_softc *sc);
  245 static void             fxp_mc_setup(struct fxp_softc *sc);
  246 static uint16_t         fxp_eeprom_getword(struct fxp_softc *sc, int offset,
  247                             int autosize);
  248 static void             fxp_eeprom_putword(struct fxp_softc *sc, int offset,
  249                             uint16_t data);
  250 static void             fxp_autosize_eeprom(struct fxp_softc *sc);
  251 static void             fxp_load_eeprom(struct fxp_softc *sc);
  252 static void             fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
  253                             int offset, int words);
  254 static void             fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
  255                             int offset, int words);
  256 static int              fxp_ifmedia_upd(struct ifnet *ifp);
  257 static void             fxp_ifmedia_sts(struct ifnet *ifp,
  258                             struct ifmediareq *ifmr);
  259 static int              fxp_serial_ifmedia_upd(struct ifnet *ifp);
  260 static void             fxp_serial_ifmedia_sts(struct ifnet *ifp,
  261                             struct ifmediareq *ifmr);
  262 static int              fxp_miibus_readreg(device_t dev, int phy, int reg);
  263 static int              fxp_miibus_writereg(device_t dev, int phy, int reg,
  264                             int value);
  265 static void             fxp_miibus_statchg(device_t dev);
  266 static void             fxp_load_ucode(struct fxp_softc *sc);
  267 static void             fxp_update_stats(struct fxp_softc *sc);
  268 static void             fxp_sysctl_node(struct fxp_softc *sc);
  269 static int              sysctl_int_range(SYSCTL_HANDLER_ARGS,
  270                             int low, int high);
  271 static int              sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
  272 static int              sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
  273 static void             fxp_scb_wait(struct fxp_softc *sc);
  274 static void             fxp_scb_cmd(struct fxp_softc *sc, int cmd);
  275 static void             fxp_dma_wait(struct fxp_softc *sc,
  276                             volatile uint16_t *status, bus_dma_tag_t dmat,
  277                             bus_dmamap_t map);
  278 
  279 static device_method_t fxp_methods[] = {
  280         /* Device interface */
  281         DEVMETHOD(device_probe,         fxp_probe),
  282         DEVMETHOD(device_attach,        fxp_attach),
  283         DEVMETHOD(device_detach,        fxp_detach),
  284         DEVMETHOD(device_shutdown,      fxp_shutdown),
  285         DEVMETHOD(device_suspend,       fxp_suspend),
  286         DEVMETHOD(device_resume,        fxp_resume),
  287 
  288         /* MII interface */
  289         DEVMETHOD(miibus_readreg,       fxp_miibus_readreg),
  290         DEVMETHOD(miibus_writereg,      fxp_miibus_writereg),
  291         DEVMETHOD(miibus_statchg,       fxp_miibus_statchg),
  292 
  293         { 0, 0 }
  294 };
  295 
  296 static driver_t fxp_driver = {
  297         "fxp",
  298         fxp_methods,
  299         sizeof(struct fxp_softc),
  300 };
  301 
  302 static devclass_t fxp_devclass;
  303 
  304 DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0);
  305 DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
  306 
  307 static struct resource_spec fxp_res_spec_mem[] = {
  308         { SYS_RES_MEMORY,       FXP_PCI_MMBA,   RF_ACTIVE },
  309         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  310         { -1, 0 }
  311 };
  312 
  313 static struct resource_spec fxp_res_spec_io[] = {
  314         { SYS_RES_IOPORT,       FXP_PCI_IOBA,   RF_ACTIVE },
  315         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  316         { -1, 0 }
  317 };
  318 
  319 /*
  320  * Wait for the previous command to be accepted (but not necessarily
  321  * completed).
  322  */
  323 static void
  324 fxp_scb_wait(struct fxp_softc *sc)
  325 {
  326         union {
  327                 uint16_t w;
  328                 uint8_t b[2];
  329         } flowctl;
  330         int i = 10000;
  331 
  332         while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
  333                 DELAY(2);
  334         if (i == 0) {
  335                 flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FC_THRESH);
  336                 flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FC_STATUS);
  337                 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
  338                     CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
  339                     CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
  340                     CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w);
  341         }
  342 }
  343 
  344 static void
  345 fxp_scb_cmd(struct fxp_softc *sc, int cmd)
  346 {
  347 
  348         if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
  349                 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
  350                 fxp_scb_wait(sc);
  351         }
  352         CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
  353 }
  354 
  355 static void
  356 fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
  357     bus_dma_tag_t dmat, bus_dmamap_t map)
  358 {
  359         int i;
  360 
  361         for (i = 10000; i > 0; i--) {
  362                 DELAY(2);
  363                 bus_dmamap_sync(dmat, map,
  364                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  365                 if ((le16toh(*status) & FXP_CB_STATUS_C) != 0)
  366                         break;
  367         }
  368         if (i == 0)
  369                 device_printf(sc->dev, "DMA timeout\n");
  370 }
  371 
  372 static const struct fxp_ident *
  373 fxp_find_ident(device_t dev)
  374 {
  375         uint16_t devid;
  376         uint8_t revid;
  377         const struct fxp_ident *ident;
  378 
  379         if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
  380                 devid = pci_get_device(dev);
  381                 revid = pci_get_revid(dev);
  382                 for (ident = fxp_ident_table; ident->name != NULL; ident++) {
  383                         if (ident->devid == devid &&
  384                             (ident->revid == revid || ident->revid == -1)) {
  385                                 return (ident);
  386                         }
  387                 }
  388         }
  389         return (NULL);
  390 }
  391 
  392 /*
  393  * Return identification string if this device is ours.
  394  */
  395 static int
  396 fxp_probe(device_t dev)
  397 {
  398         const struct fxp_ident *ident;
  399 
  400         ident = fxp_find_ident(dev);
  401         if (ident != NULL) {
  402                 device_set_desc(dev, ident->name);
  403                 return (BUS_PROBE_DEFAULT);
  404         }
  405         return (ENXIO);
  406 }
  407 
  408 static void
  409 fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  410 {
  411         uint32_t *addr;
  412 
  413         if (error)
  414                 return;
  415 
  416         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  417         addr = arg;
  418         *addr = segs->ds_addr;
  419 }
  420 
  421 static int
  422 fxp_attach(device_t dev)
  423 {
  424         struct fxp_softc *sc;
  425         struct fxp_cb_tx *tcbp;
  426         struct fxp_tx *txp;
  427         struct fxp_rx *rxp;
  428         struct ifnet *ifp;
  429         uint32_t val;
  430         uint16_t data;
  431         u_char eaddr[ETHER_ADDR_LEN];
  432         int error, flags, i, pmc, prefer_iomap;
  433 
  434         error = 0;
  435         sc = device_get_softc(dev);
  436         sc->dev = dev;
  437         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  438             MTX_DEF);
  439         callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0);
  440         ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
  441             fxp_serial_ifmedia_sts);
  442 
  443         ifp = sc->ifp = if_alloc(IFT_ETHER);
  444         if (ifp == NULL) {
  445                 device_printf(dev, "can not if_alloc()\n");
  446                 error = ENOSPC;
  447                 goto fail;
  448         }
  449 
  450         /*
  451          * Enable bus mastering.
  452          */
  453         pci_enable_busmaster(dev);
  454         val = pci_read_config(dev, PCIR_COMMAND, 2);
  455 
  456         /*
  457          * Figure out which we should try first - memory mapping or i/o mapping?
  458          * We default to memory mapping. Then we accept an override from the
  459          * command line. Then we check to see which one is enabled.
  460          */
  461         prefer_iomap = 0;
  462         resource_int_value(device_get_name(dev), device_get_unit(dev),
  463             "prefer_iomap", &prefer_iomap);
  464         if (prefer_iomap)
  465                 sc->fxp_spec = fxp_res_spec_io;
  466         else
  467                 sc->fxp_spec = fxp_res_spec_mem;
  468 
  469         error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
  470         if (error) {
  471                 if (sc->fxp_spec == fxp_res_spec_mem)
  472                         sc->fxp_spec = fxp_res_spec_io;
  473                 else
  474                         sc->fxp_spec = fxp_res_spec_mem;
  475                 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
  476         }
  477         if (error) {
  478                 device_printf(dev, "could not allocate resources\n");
  479                 error = ENXIO;
  480                 goto fail;
  481         }
  482 
  483         if (bootverbose) {
  484                 device_printf(dev, "using %s space register mapping\n",
  485                    sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O");
  486         }
  487 
  488         /*
  489          * Put CU/RU idle state and prepare full reset.
  490          */
  491         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
  492         DELAY(10);
  493         /* Full reset and disable interrupts. */
  494         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
  495         DELAY(10);
  496         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
  497 
  498         /*
  499          * Find out how large of an SEEPROM we have.
  500          */
  501         fxp_autosize_eeprom(sc);
  502         fxp_load_eeprom(sc);
  503 
  504         /*
  505          * Find out the chip revision; lump all 82557 revs together.
  506          */
  507         sc->ident = fxp_find_ident(dev);
  508         if (sc->ident->ich > 0) {
  509                 /* Assume ICH controllers are 82559. */
  510                 sc->revision = FXP_REV_82559_A0;
  511         } else {
  512                 data = sc->eeprom[FXP_EEPROM_MAP_CNTR];
  513                 if ((data >> 8) == 1)
  514                         sc->revision = FXP_REV_82557;
  515                 else
  516                         sc->revision = pci_get_revid(dev);
  517         }
  518 
  519         /*
  520          * Check availability of WOL. 82559ER does not support WOL.
  521          */
  522         if (sc->revision >= FXP_REV_82558_A4 &&
  523             sc->revision != FXP_REV_82559S_A) {
  524                 data = sc->eeprom[FXP_EEPROM_MAP_ID];
  525                 if ((data & 0x20) != 0 &&
  526                     pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0)
  527                         sc->flags |= FXP_FLAG_WOLCAP;
  528         }
  529 
  530         if (sc->revision == FXP_REV_82550_C) {
  531                 /*
  532                  * 82550C with server extension requires microcode to
  533                  * receive fragmented UDP datagrams.  However if the
  534                  * microcode is used for client-only featured 82550C
  535                  * it locks up controller.
  536                  */
  537                 data = sc->eeprom[FXP_EEPROM_MAP_COMPAT];
  538                 if ((data & 0x0400) == 0)
  539                         sc->flags |= FXP_FLAG_NO_UCODE;
  540         }
  541 
  542         /* Receiver lock-up workaround detection. */
  543         if (sc->revision < FXP_REV_82558_A4) {
  544                 data = sc->eeprom[FXP_EEPROM_MAP_COMPAT];
  545                 if ((data & 0x03) != 0x03) {
  546                         sc->flags |= FXP_FLAG_RXBUG;
  547                         device_printf(dev, "Enabling Rx lock-up workaround\n");
  548                 }
  549         }
  550 
  551         /*
  552          * Determine whether we must use the 503 serial interface.
  553          */
  554         data = sc->eeprom[FXP_EEPROM_MAP_PRI_PHY];
  555         if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
  556             && (data & FXP_PHY_SERIAL_ONLY))
  557                 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
  558 
  559         fxp_sysctl_node(sc);
  560         /*
  561          * Enable workarounds for certain chip revision deficiencies.
  562          *
  563          * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
  564          * some systems based a normal 82559 design, have a defect where
  565          * the chip can cause a PCI protocol violation if it receives
  566          * a CU_RESUME command when it is entering the IDLE state.  The
  567          * workaround is to disable Dynamic Standby Mode, so the chip never
  568          * deasserts CLKRUN#, and always remains in an active state.
  569          *
  570          * See Intel 82801BA/82801BAM Specification Update, Errata #30.
  571          */
  572         if ((sc->ident->ich >= 2 && sc->ident->ich <= 3) ||
  573             (sc->ident->ich == 0 && sc->revision >= FXP_REV_82559_A0)) {
  574                 data = sc->eeprom[FXP_EEPROM_MAP_ID];
  575                 if (data & 0x02) {                      /* STB enable */
  576                         uint16_t cksum;
  577                         int i;
  578 
  579                         device_printf(dev,
  580                             "Disabling dynamic standby mode in EEPROM\n");
  581                         data &= ~0x02;
  582                         sc->eeprom[FXP_EEPROM_MAP_ID] = data;
  583                         fxp_write_eeprom(sc, &data, FXP_EEPROM_MAP_ID, 1);
  584                         device_printf(dev, "New EEPROM ID: 0x%x\n", data);
  585                         cksum = 0;
  586                         for (i = 0; i < (1 << sc->eeprom_size) - 1; i++)
  587                                 cksum += sc->eeprom[i];
  588                         i = (1 << sc->eeprom_size) - 1;
  589                         cksum = 0xBABA - cksum;
  590                         fxp_write_eeprom(sc, &cksum, i, 1);
  591                         device_printf(dev,
  592                             "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
  593                             i, sc->eeprom[i], cksum);
  594                         sc->eeprom[i] = cksum;
  595                         /*
  596                          * If the user elects to continue, try the software
  597                          * workaround, as it is better than nothing.
  598                          */
  599                         sc->flags |= FXP_FLAG_CU_RESUME_BUG;
  600                 }
  601         }
  602 
  603         /*
  604          * If we are not a 82557 chip, we can enable extended features.
  605          */
  606         if (sc->revision != FXP_REV_82557) {
  607                 /*
  608                  * If MWI is enabled in the PCI configuration, and there
  609                  * is a valid cacheline size (8 or 16 dwords), then tell
  610                  * the board to turn on MWI.
  611                  */
  612                 if (val & PCIM_CMD_MWRICEN &&
  613                     pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
  614                         sc->flags |= FXP_FLAG_MWI_ENABLE;
  615 
  616                 /* turn on the extended TxCB feature */
  617                 sc->flags |= FXP_FLAG_EXT_TXCB;
  618 
  619                 /* enable reception of long frames for VLAN */
  620                 sc->flags |= FXP_FLAG_LONG_PKT_EN;
  621         } else {
  622                 /* a hack to get long VLAN frames on a 82557 */
  623                 sc->flags |= FXP_FLAG_SAVE_BAD;
  624         }
  625 
  626         /* For 82559 or later chips, Rx checksum offload is supported. */
  627         if (sc->revision >= FXP_REV_82559_A0) {
  628                 /* 82559ER does not support Rx checksum offloading. */
  629                 if (sc->ident->devid != 0x1209)
  630                         sc->flags |= FXP_FLAG_82559_RXCSUM;
  631         }
  632         /*
  633          * Enable use of extended RFDs and TCBs for 82550
  634          * and later chips. Note: we need extended TXCB support
  635          * too, but that's already enabled by the code above.
  636          * Be careful to do this only on the right devices.
  637          */
  638         if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
  639             sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
  640             || sc->revision == FXP_REV_82551_10) {
  641                 sc->rfa_size = sizeof (struct fxp_rfa);
  642                 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
  643                 sc->flags |= FXP_FLAG_EXT_RFA;
  644                 /* Use extended RFA instead of 82559 checksum mode. */
  645                 sc->flags &= ~FXP_FLAG_82559_RXCSUM;
  646         } else {
  647                 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
  648                 sc->tx_cmd = FXP_CB_COMMAND_XMIT;
  649         }
  650 
  651         /*
  652          * Allocate DMA tags and DMA safe memory.
  653          */
  654         sc->maxtxseg = FXP_NTXSEG;
  655         sc->maxsegsize = MCLBYTES;
  656         if (sc->flags & FXP_FLAG_EXT_RFA) {
  657                 sc->maxtxseg--;
  658                 sc->maxsegsize = FXP_TSO_SEGSIZE;
  659         }
  660         error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
  661             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  662             sc->maxsegsize * sc->maxtxseg + sizeof(struct ether_vlan_header),
  663             sc->maxtxseg, sc->maxsegsize, 0,
  664             busdma_lock_mutex, &Giant, &sc->fxp_txmtag);
  665         if (error) {
  666                 device_printf(dev, "could not create TX DMA tag\n");
  667                 goto fail;
  668         }
  669 
  670         error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
  671             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  672             MCLBYTES, 1, MCLBYTES, 0,
  673             busdma_lock_mutex, &Giant, &sc->fxp_rxmtag);
  674         if (error) {
  675                 device_printf(dev, "could not create RX DMA tag\n");
  676                 goto fail;
  677         }
  678 
  679         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  680             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  681             sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0,
  682             busdma_lock_mutex, &Giant, &sc->fxp_stag);
  683         if (error) {
  684                 device_printf(dev, "could not create stats DMA tag\n");
  685                 goto fail;
  686         }
  687 
  688         error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
  689             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->fxp_smap);
  690         if (error) {
  691                 device_printf(dev, "could not allocate stats DMA memory\n");
  692                 goto fail;
  693         }
  694         error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
  695             sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr,
  696             BUS_DMA_NOWAIT);
  697         if (error) {
  698                 device_printf(dev, "could not load the stats DMA buffer\n");
  699                 goto fail;
  700         }
  701 
  702         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  703             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  704             FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0,
  705             busdma_lock_mutex, &Giant, &sc->cbl_tag);
  706         if (error) {
  707                 device_printf(dev, "could not create TxCB DMA tag\n");
  708                 goto fail;
  709         }
  710 
  711         error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
  712             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->cbl_map);
  713         if (error) {
  714                 device_printf(dev, "could not allocate TxCB DMA memory\n");
  715                 goto fail;
  716         }
  717 
  718         error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
  719             sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
  720             &sc->fxp_desc.cbl_addr, BUS_DMA_NOWAIT);
  721         if (error) {
  722                 device_printf(dev, "could not load TxCB DMA buffer\n");
  723                 goto fail;
  724         }
  725 
  726         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  727             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  728             sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0,
  729             busdma_lock_mutex, &Giant, &sc->mcs_tag);
  730         if (error) {
  731                 device_printf(dev,
  732                     "could not create multicast setup DMA tag\n");
  733                 goto fail;
  734         }
  735 
  736         error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
  737             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->mcs_map);
  738         if (error) {
  739                 device_printf(dev,
  740                     "could not allocate multicast setup DMA memory\n");
  741                 goto fail;
  742         }
  743         error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
  744             sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr,
  745             BUS_DMA_NOWAIT);
  746         if (error) {
  747                 device_printf(dev,
  748                     "can't load the multicast setup DMA buffer\n");
  749                 goto fail;
  750         }
  751 
  752         /*
  753          * Pre-allocate the TX DMA maps and setup the pointers to
  754          * the TX command blocks.
  755          */
  756         txp = sc->fxp_desc.tx_list;
  757         tcbp = sc->fxp_desc.cbl_list;
  758         for (i = 0; i < FXP_NTXCB; i++) {
  759                 txp[i].tx_cb = tcbp + i;
  760                 error = bus_dmamap_create(sc->fxp_txmtag, 0, &txp[i].tx_map);
  761                 if (error) {
  762                         device_printf(dev, "can't create DMA map for TX\n");
  763                         goto fail;
  764                 }
  765         }
  766         error = bus_dmamap_create(sc->fxp_rxmtag, 0, &sc->spare_map);
  767         if (error) {
  768                 device_printf(dev, "can't create spare DMA map\n");
  769                 goto fail;
  770         }
  771 
  772         /*
  773          * Pre-allocate our receive buffers.
  774          */
  775         sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
  776         for (i = 0; i < FXP_NRFABUFS; i++) {
  777                 rxp = &sc->fxp_desc.rx_list[i];
  778                 error = bus_dmamap_create(sc->fxp_rxmtag, 0, &rxp->rx_map);
  779                 if (error) {
  780                         device_printf(dev, "can't create DMA map for RX\n");
  781                         goto fail;
  782                 }
  783                 if (fxp_new_rfabuf(sc, rxp) != 0) {
  784                         error = ENOMEM;
  785                         goto fail;
  786                 }
  787                 fxp_add_rfabuf(sc, rxp);
  788         }
  789 
  790         /*
  791          * Read MAC address.
  792          */
  793         eaddr[0] = sc->eeprom[FXP_EEPROM_MAP_IA0] & 0xff;
  794         eaddr[1] = sc->eeprom[FXP_EEPROM_MAP_IA0] >> 8;
  795         eaddr[2] = sc->eeprom[FXP_EEPROM_MAP_IA1] & 0xff;
  796         eaddr[3] = sc->eeprom[FXP_EEPROM_MAP_IA1] >> 8;
  797         eaddr[4] = sc->eeprom[FXP_EEPROM_MAP_IA2] & 0xff;
  798         eaddr[5] = sc->eeprom[FXP_EEPROM_MAP_IA2] >> 8;
  799         if (bootverbose) {
  800                 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
  801                     pci_get_vendor(dev), pci_get_device(dev),
  802                     pci_get_subvendor(dev), pci_get_subdevice(dev),
  803                     pci_get_revid(dev));
  804                 device_printf(dev, "Dynamic Standby mode is %s\n",
  805                     sc->eeprom[FXP_EEPROM_MAP_ID] & 0x02 ? "enabled" :
  806                     "disabled");
  807         }
  808 
  809         /*
  810          * If this is only a 10Mbps device, then there is no MII, and
  811          * the PHY will use a serial interface instead.
  812          *
  813          * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
  814          * doesn't have a programming interface of any sort.  The
  815          * media is sensed automatically based on how the link partner
  816          * is configured.  This is, in essence, manual configuration.
  817          */
  818         if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
  819                 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
  820                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
  821         } else {
  822                 /*
  823                  * i82557 wedge when isolating all of their PHYs.
  824                  */
  825                 flags = MIIF_NOISOLATE;
  826                 if (sc->revision >= FXP_REV_82558_A4)
  827                         flags |= MIIF_DOPAUSE;
  828                 error = mii_attach(dev, &sc->miibus, ifp, fxp_ifmedia_upd,
  829                     fxp_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY,
  830                     MII_OFFSET_ANY, flags);
  831                 if (error != 0) {
  832                         device_printf(dev, "attaching PHYs failed\n");
  833                         goto fail;
  834                 }
  835         }
  836 
  837         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  838         ifp->if_init = fxp_init;
  839         ifp->if_softc = sc;
  840         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  841         ifp->if_ioctl = fxp_ioctl;
  842         ifp->if_start = fxp_start;
  843 
  844         ifp->if_capabilities = ifp->if_capenable = 0;
  845 
  846         /* Enable checksum offload/TSO for 82550 or better chips */
  847         if (sc->flags & FXP_FLAG_EXT_RFA) {
  848                 ifp->if_hwassist = FXP_CSUM_FEATURES | CSUM_TSO;
  849                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
  850                 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_TSO4;
  851         }
  852 
  853         if (sc->flags & FXP_FLAG_82559_RXCSUM) {
  854                 ifp->if_capabilities |= IFCAP_RXCSUM;
  855                 ifp->if_capenable |= IFCAP_RXCSUM;
  856         }
  857 
  858         if (sc->flags & FXP_FLAG_WOLCAP) {
  859                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
  860                 ifp->if_capenable |= IFCAP_WOL_MAGIC;
  861         }
  862 
  863 #ifdef DEVICE_POLLING
  864         /* Inform the world we support polling. */
  865         ifp->if_capabilities |= IFCAP_POLLING;
  866 #endif
  867 
  868         /*
  869          * Attach the interface.
  870          */
  871         ether_ifattach(ifp, eaddr);
  872 
  873         /*
  874          * Tell the upper layer(s) we support long frames.
  875          * Must appear after the call to ether_ifattach() because
  876          * ether_ifattach() sets ifi_hdrlen to the default value.
  877          */
  878         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  879         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  880         ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
  881         if ((sc->flags & FXP_FLAG_EXT_RFA) != 0) {
  882                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING |
  883                     IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
  884                 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING |
  885                     IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
  886         }
  887 
  888         /*
  889          * Let the system queue as many packets as we have available
  890          * TX descriptors.
  891          */
  892         IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
  893         ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1;
  894         IFQ_SET_READY(&ifp->if_snd);
  895 
  896         /*
  897          * Hook our interrupt after all initialization is complete.
  898          */
  899         error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE,
  900                                NULL, fxp_intr, sc, &sc->ih);
  901         if (error) {
  902                 device_printf(dev, "could not setup irq\n");
  903                 ether_ifdetach(sc->ifp);
  904                 goto fail;
  905         }
  906 
  907         /*
  908          * Configure hardware to reject magic frames otherwise
  909          * system will hang on recipt of magic frames.
  910          */
  911         if ((sc->flags & FXP_FLAG_WOLCAP) != 0) {
  912                 FXP_LOCK(sc);
  913                 /* Clear wakeup events. */
  914                 CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR));
  915                 fxp_init_body(sc, 0);
  916                 fxp_stop(sc);
  917                 FXP_UNLOCK(sc);
  918         }
  919 
  920 fail:
  921         if (error)
  922                 fxp_release(sc);
  923         return (error);
  924 }
  925 
  926 /*
  927  * Release all resources.  The softc lock should not be held and the
  928  * interrupt should already be torn down.
  929  */
  930 static void
  931 fxp_release(struct fxp_softc *sc)
  932 {
  933         struct fxp_rx *rxp;
  934         struct fxp_tx *txp;
  935         int i;
  936 
  937         FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
  938         KASSERT(sc->ih == NULL,
  939             ("fxp_release() called with intr handle still active"));
  940         if (sc->miibus)
  941                 device_delete_child(sc->dev, sc->miibus);
  942         bus_generic_detach(sc->dev);
  943         ifmedia_removeall(&sc->sc_media);
  944         if (sc->fxp_desc.cbl_list) {
  945                 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
  946                 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
  947                     sc->cbl_map);
  948         }
  949         if (sc->fxp_stats) {
  950                 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
  951                 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
  952         }
  953         if (sc->mcsp) {
  954                 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
  955                 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
  956         }
  957         bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res);
  958         if (sc->fxp_rxmtag) {
  959                 for (i = 0; i < FXP_NRFABUFS; i++) {
  960                         rxp = &sc->fxp_desc.rx_list[i];
  961                         if (rxp->rx_mbuf != NULL) {
  962                                 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
  963                                     BUS_DMASYNC_POSTREAD);
  964                                 bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map);
  965                                 m_freem(rxp->rx_mbuf);
  966                         }
  967                         bus_dmamap_destroy(sc->fxp_rxmtag, rxp->rx_map);
  968                 }
  969                 bus_dmamap_destroy(sc->fxp_rxmtag, sc->spare_map);
  970                 bus_dma_tag_destroy(sc->fxp_rxmtag);
  971         }
  972         if (sc->fxp_txmtag) {
  973                 for (i = 0; i < FXP_NTXCB; i++) {
  974                         txp = &sc->fxp_desc.tx_list[i];
  975                         if (txp->tx_mbuf != NULL) {
  976                                 bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map,
  977                                     BUS_DMASYNC_POSTWRITE);
  978                                 bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map);
  979                                 m_freem(txp->tx_mbuf);
  980                         }
  981                         bus_dmamap_destroy(sc->fxp_txmtag, txp->tx_map);
  982                 }
  983                 bus_dma_tag_destroy(sc->fxp_txmtag);
  984         }
  985         if (sc->fxp_stag)
  986                 bus_dma_tag_destroy(sc->fxp_stag);
  987         if (sc->cbl_tag)
  988                 bus_dma_tag_destroy(sc->cbl_tag);
  989         if (sc->mcs_tag)
  990                 bus_dma_tag_destroy(sc->mcs_tag);
  991         if (sc->ifp)
  992                 if_free(sc->ifp);
  993 
  994         mtx_destroy(&sc->sc_mtx);
  995 }
  996 
  997 /*
  998  * Detach interface.
  999  */
 1000 static int
 1001 fxp_detach(device_t dev)
 1002 {
 1003         struct fxp_softc *sc = device_get_softc(dev);
 1004 
 1005 #ifdef DEVICE_POLLING
 1006         if (sc->ifp->if_capenable & IFCAP_POLLING)
 1007                 ether_poll_deregister(sc->ifp);
 1008 #endif
 1009 
 1010         FXP_LOCK(sc);
 1011         /*
 1012          * Stop DMA and drop transmit queue, but disable interrupts first.
 1013          */
 1014         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 1015         fxp_stop(sc);
 1016         FXP_UNLOCK(sc);
 1017         callout_drain(&sc->stat_ch);
 1018 
 1019         /*
 1020          * Close down routes etc.
 1021          */
 1022         ether_ifdetach(sc->ifp);
 1023 
 1024         /*
 1025          * Unhook interrupt before dropping lock. This is to prevent
 1026          * races with fxp_intr().
 1027          */
 1028         bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih);
 1029         sc->ih = NULL;
 1030 
 1031         /* Release our allocated resources. */
 1032         fxp_release(sc);
 1033         return (0);
 1034 }
 1035 
 1036 /*
 1037  * Device shutdown routine. Called at system shutdown after sync. The
 1038  * main purpose of this routine is to shut off receiver DMA so that
 1039  * kernel memory doesn't get clobbered during warmboot.
 1040  */
 1041 static int
 1042 fxp_shutdown(device_t dev)
 1043 {
 1044 
 1045         /*
 1046          * Make sure that DMA is disabled prior to reboot. Not doing
 1047          * do could allow DMA to corrupt kernel memory during the
 1048          * reboot before the driver initializes.
 1049          */
 1050         return (fxp_suspend(dev));
 1051 }
 1052 
 1053 /*
 1054  * Device suspend routine.  Stop the interface and save some PCI
 1055  * settings in case the BIOS doesn't restore them properly on
 1056  * resume.
 1057  */
 1058 static int
 1059 fxp_suspend(device_t dev)
 1060 {
 1061         struct fxp_softc *sc = device_get_softc(dev);
 1062         struct ifnet *ifp;
 1063         int pmc;
 1064         uint16_t pmstat;
 1065 
 1066         FXP_LOCK(sc);
 1067 
 1068         ifp = sc->ifp;
 1069         if (pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) {
 1070                 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
 1071                 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 1072                 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
 1073                         /* Request PME. */
 1074                         pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 1075                         sc->flags |= FXP_FLAG_WOL;
 1076                         /* Reconfigure hardware to accept magic frames. */
 1077                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1078                         fxp_init_body(sc, 0);
 1079                 }
 1080                 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1081         }
 1082         fxp_stop(sc);
 1083 
 1084         sc->suspended = 1;
 1085 
 1086         FXP_UNLOCK(sc);
 1087         return (0);
 1088 }
 1089 
 1090 /*
 1091  * Device resume routine. re-enable busmastering, and restart the interface if
 1092  * appropriate.
 1093  */
 1094 static int
 1095 fxp_resume(device_t dev)
 1096 {
 1097         struct fxp_softc *sc = device_get_softc(dev);
 1098         struct ifnet *ifp = sc->ifp;
 1099         int pmc;
 1100         uint16_t pmstat;
 1101 
 1102         FXP_LOCK(sc);
 1103 
 1104         if (pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) {
 1105                 sc->flags &= ~FXP_FLAG_WOL;
 1106                 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
 1107                 /* Disable PME and clear PME status. */
 1108                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
 1109                 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1110                 if ((sc->flags & FXP_FLAG_WOLCAP) != 0)
 1111                         CSR_WRITE_1(sc, FXP_CSR_PMDR,
 1112                             CSR_READ_1(sc, FXP_CSR_PMDR));
 1113         }
 1114 
 1115         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
 1116         DELAY(10);
 1117 
 1118         /* reinitialize interface if necessary */
 1119         if (ifp->if_flags & IFF_UP)
 1120                 fxp_init_body(sc, 1);
 1121 
 1122         sc->suspended = 0;
 1123 
 1124         FXP_UNLOCK(sc);
 1125         return (0);
 1126 }
 1127 
 1128 static void
 1129 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
 1130 {
 1131         uint16_t reg;
 1132         int x;
 1133 
 1134         /*
 1135          * Shift in data.
 1136          */
 1137         for (x = 1 << (length - 1); x; x >>= 1) {
 1138                 if (data & x)
 1139                         reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
 1140                 else
 1141                         reg = FXP_EEPROM_EECS;
 1142                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1143                 DELAY(1);
 1144                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1145                 DELAY(1);
 1146                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1147                 DELAY(1);
 1148         }
 1149 }
 1150 
 1151 /*
 1152  * Read from the serial EEPROM. Basically, you manually shift in
 1153  * the read opcode (one bit at a time) and then shift in the address,
 1154  * and then you shift out the data (all of this one bit at a time).
 1155  * The word size is 16 bits, so you have to provide the address for
 1156  * every 16 bits of data.
 1157  */
 1158 static uint16_t
 1159 fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
 1160 {
 1161         uint16_t reg, data;
 1162         int x;
 1163 
 1164         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1165         /*
 1166          * Shift in read opcode.
 1167          */
 1168         fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
 1169         /*
 1170          * Shift in address.
 1171          */
 1172         data = 0;
 1173         for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
 1174                 if (offset & x)
 1175                         reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
 1176                 else
 1177                         reg = FXP_EEPROM_EECS;
 1178                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1179                 DELAY(1);
 1180                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1181                 DELAY(1);
 1182                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1183                 DELAY(1);
 1184                 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
 1185                 data++;
 1186                 if (autosize && reg == 0) {
 1187                         sc->eeprom_size = data;
 1188                         break;
 1189                 }
 1190         }
 1191         /*
 1192          * Shift out data.
 1193          */
 1194         data = 0;
 1195         reg = FXP_EEPROM_EECS;
 1196         for (x = 1 << 15; x; x >>= 1) {
 1197                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1198                 DELAY(1);
 1199                 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
 1200                         data |= x;
 1201                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1202                 DELAY(1);
 1203         }
 1204         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1205         DELAY(1);
 1206 
 1207         return (data);
 1208 }
 1209 
 1210 static void
 1211 fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
 1212 {
 1213         int i;
 1214 
 1215         /*
 1216          * Erase/write enable.
 1217          */
 1218         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1219         fxp_eeprom_shiftin(sc, 0x4, 3);
 1220         fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
 1221         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1222         DELAY(1);
 1223         /*
 1224          * Shift in write opcode, address, data.
 1225          */
 1226         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1227         fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
 1228         fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
 1229         fxp_eeprom_shiftin(sc, data, 16);
 1230         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1231         DELAY(1);
 1232         /*
 1233          * Wait for EEPROM to finish up.
 1234          */
 1235         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1236         DELAY(1);
 1237         for (i = 0; i < 1000; i++) {
 1238                 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
 1239                         break;
 1240                 DELAY(50);
 1241         }
 1242         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1243         DELAY(1);
 1244         /*
 1245          * Erase/write disable.
 1246          */
 1247         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1248         fxp_eeprom_shiftin(sc, 0x4, 3);
 1249         fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
 1250         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1251         DELAY(1);
 1252 }
 1253 
 1254 /*
 1255  * From NetBSD:
 1256  *
 1257  * Figure out EEPROM size.
 1258  *
 1259  * 559's can have either 64-word or 256-word EEPROMs, the 558
 1260  * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
 1261  * talks about the existance of 16 to 256 word EEPROMs.
 1262  *
 1263  * The only known sizes are 64 and 256, where the 256 version is used
 1264  * by CardBus cards to store CIS information.
 1265  *
 1266  * The address is shifted in msb-to-lsb, and after the last
 1267  * address-bit the EEPROM is supposed to output a `dummy zero' bit,
 1268  * after which follows the actual data. We try to detect this zero, by
 1269  * probing the data-out bit in the EEPROM control register just after
 1270  * having shifted in a bit. If the bit is zero, we assume we've
 1271  * shifted enough address bits. The data-out should be tri-state,
 1272  * before this, which should translate to a logical one.
 1273  */
 1274 static void
 1275 fxp_autosize_eeprom(struct fxp_softc *sc)
 1276 {
 1277 
 1278         /* guess maximum size of 256 words */
 1279         sc->eeprom_size = 8;
 1280 
 1281         /* autosize */
 1282         (void) fxp_eeprom_getword(sc, 0, 1);
 1283 }
 1284 
 1285 static void
 1286 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
 1287 {
 1288         int i;
 1289 
 1290         for (i = 0; i < words; i++)
 1291                 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
 1292 }
 1293 
 1294 static void
 1295 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
 1296 {
 1297         int i;
 1298 
 1299         for (i = 0; i < words; i++)
 1300                 fxp_eeprom_putword(sc, offset + i, data[i]);
 1301 }
 1302 
 1303 static void
 1304 fxp_load_eeprom(struct fxp_softc *sc)
 1305 {
 1306         int i;
 1307         uint16_t cksum;
 1308 
 1309         fxp_read_eeprom(sc, sc->eeprom, 0, 1 << sc->eeprom_size);
 1310         cksum = 0;
 1311         for (i = 0; i < (1 << sc->eeprom_size) - 1; i++)
 1312                 cksum += sc->eeprom[i];
 1313         cksum = 0xBABA - cksum;
 1314         if (cksum != sc->eeprom[(1 << sc->eeprom_size) - 1])
 1315                 device_printf(sc->dev,
 1316                     "EEPROM checksum mismatch! (0x%04x -> 0x%04x)\n",
 1317                     cksum, sc->eeprom[(1 << sc->eeprom_size) - 1]);
 1318 }
 1319 
 1320 /*
 1321  * Grab the softc lock and call the real fxp_start_body() routine
 1322  */
 1323 static void
 1324 fxp_start(struct ifnet *ifp)
 1325 {
 1326         struct fxp_softc *sc = ifp->if_softc;
 1327 
 1328         FXP_LOCK(sc);
 1329         fxp_start_body(ifp);
 1330         FXP_UNLOCK(sc);
 1331 }
 1332 
 1333 /*
 1334  * Start packet transmission on the interface.
 1335  * This routine must be called with the softc lock held, and is an
 1336  * internal entry point only.
 1337  */
 1338 static void
 1339 fxp_start_body(struct ifnet *ifp)
 1340 {
 1341         struct fxp_softc *sc = ifp->if_softc;
 1342         struct mbuf *mb_head;
 1343         int txqueued;
 1344 
 1345         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1346 
 1347         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1348             IFF_DRV_RUNNING)
 1349                 return;
 1350 
 1351         if (sc->tx_queued > FXP_NTXCB_HIWAT)
 1352                 fxp_txeof(sc);
 1353         /*
 1354          * We're finished if there is nothing more to add to the list or if
 1355          * we're all filled up with buffers to transmit.
 1356          * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
 1357          *       a NOP command when needed.
 1358          */
 1359         txqueued = 0;
 1360         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1361             sc->tx_queued < FXP_NTXCB - 1) {
 1362 
 1363                 /*
 1364                  * Grab a packet to transmit.
 1365                  */
 1366                 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
 1367                 if (mb_head == NULL)
 1368                         break;
 1369 
 1370                 if (fxp_encap(sc, &mb_head)) {
 1371                         if (mb_head == NULL)
 1372                                 break;
 1373                         IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
 1374                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1375                 }
 1376                 txqueued++;
 1377                 /*
 1378                  * Pass packet to bpf if there is a listener.
 1379                  */
 1380                 BPF_MTAP(ifp, mb_head);
 1381         }
 1382 
 1383         /*
 1384          * We're finished. If we added to the list, issue a RESUME to get DMA
 1385          * going again if suspended.
 1386          */
 1387         if (txqueued > 0) {
 1388                 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 1389                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1390                 fxp_scb_wait(sc);
 1391                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
 1392                 /*
 1393                  * Set a 5 second timer just in case we don't hear
 1394                  * from the card again.
 1395                  */
 1396                 sc->watchdog_timer = 5;
 1397         }
 1398 }
 1399 
 1400 static int
 1401 fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
 1402 {
 1403         struct ifnet *ifp;
 1404         struct mbuf *m;
 1405         struct fxp_tx *txp;
 1406         struct fxp_cb_tx *cbp;
 1407         struct tcphdr *tcp;
 1408         bus_dma_segment_t segs[FXP_NTXSEG];
 1409         int error, i, nseg, tcp_payload;
 1410 
 1411         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1412         ifp = sc->ifp;
 1413 
 1414         tcp_payload = 0;
 1415         tcp = NULL;
 1416         /*
 1417          * Get pointer to next available tx desc.
 1418          */
 1419         txp = sc->fxp_desc.tx_last->tx_next;
 1420 
 1421         /*
 1422          * A note in Appendix B of the Intel 8255x 10/100 Mbps
 1423          * Ethernet Controller Family Open Source Software
 1424          * Developer Manual says:
 1425          *   Using software parsing is only allowed with legal
 1426          *   TCP/IP or UDP/IP packets.
 1427          *   ...
 1428          *   For all other datagrams, hardware parsing must
 1429          *   be used.
 1430          * Software parsing appears to truncate ICMP and
 1431          * fragmented UDP packets that contain one to three
 1432          * bytes in the second (and final) mbuf of the packet.
 1433          */
 1434         if (sc->flags & FXP_FLAG_EXT_RFA)
 1435                 txp->tx_cb->ipcb_ip_activation_high =
 1436                     FXP_IPCB_HARDWAREPARSING_ENABLE;
 1437 
 1438         m = *m_head;
 1439         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
 1440                 /*
 1441                  * 82550/82551 requires ethernet/IP/TCP headers must be
 1442                  * contained in the first active transmit buffer.
 1443                  */
 1444                 struct ether_header *eh;
 1445                 struct ip *ip;
 1446                 uint32_t ip_off, poff;
 1447 
 1448                 if (M_WRITABLE(*m_head) == 0) {
 1449                         /* Get a writable copy. */
 1450                         m = m_dup(*m_head, M_DONTWAIT);
 1451                         m_freem(*m_head);
 1452                         if (m == NULL) {
 1453                                 *m_head = NULL;
 1454                                 return (ENOBUFS);
 1455                         }
 1456                         *m_head = m;
 1457                 }
 1458                 ip_off = sizeof(struct ether_header);
 1459                 m = m_pullup(*m_head, ip_off);
 1460                 if (m == NULL) {
 1461                         *m_head = NULL;
 1462                         return (ENOBUFS);
 1463                 }
 1464                 eh = mtod(m, struct ether_header *);
 1465                 /* Check the existence of VLAN tag. */
 1466                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 1467                         ip_off = sizeof(struct ether_vlan_header);
 1468                         m = m_pullup(m, ip_off);
 1469                         if (m == NULL) {
 1470                                 *m_head = NULL;
 1471                                 return (ENOBUFS);
 1472                         }
 1473                 }
 1474                 m = m_pullup(m, ip_off + sizeof(struct ip));
 1475                 if (m == NULL) {
 1476                         *m_head = NULL;
 1477                         return (ENOBUFS);
 1478                 }
 1479                 ip = (struct ip *)(mtod(m, char *) + ip_off);
 1480                 poff = ip_off + (ip->ip_hl << 2);
 1481                 m = m_pullup(m, poff + sizeof(struct tcphdr));
 1482                 if (m == NULL) {
 1483                         *m_head = NULL;
 1484                         return (ENOBUFS);
 1485                 }
 1486                 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
 1487                 m = m_pullup(m, poff + (tcp->th_off << 2));
 1488                 if (m == NULL) {
 1489                         *m_head = NULL;
 1490                         return (ENOBUFS);
 1491                 }
 1492 
 1493                 /*
 1494                  * Since 82550/82551 doesn't modify IP length and pseudo
 1495                  * checksum in the first frame driver should compute it.
 1496                  */
 1497                 ip = (struct ip *)(mtod(m, char *) + ip_off);
 1498                 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
 1499                 ip->ip_sum = 0;
 1500                 ip->ip_len = htons(m->m_pkthdr.tso_segsz + (ip->ip_hl << 2) +
 1501                     (tcp->th_off << 2));
 1502                 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
 1503                     htons(IPPROTO_TCP + (tcp->th_off << 2) +
 1504                     m->m_pkthdr.tso_segsz));
 1505                 /* Compute total TCP payload. */
 1506                 tcp_payload = m->m_pkthdr.len - ip_off - (ip->ip_hl << 2);
 1507                 tcp_payload -= tcp->th_off << 2;
 1508                 *m_head = m;
 1509         } else if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) {
 1510                 /*
 1511                  * Deal with TCP/IP checksum offload. Note that
 1512                  * in order for TCP checksum offload to work,
 1513                  * the pseudo header checksum must have already
 1514                  * been computed and stored in the checksum field
 1515                  * in the TCP header. The stack should have
 1516                  * already done this for us.
 1517                  */
 1518                 txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
 1519                 if (m->m_pkthdr.csum_flags & CSUM_TCP)
 1520                         txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET;
 1521 
 1522 #ifdef FXP_IP_CSUM_WAR
 1523                 /*
 1524                  * XXX The 82550 chip appears to have trouble
 1525                  * dealing with IP header checksums in very small
 1526                  * datagrams, namely fragments from 1 to 3 bytes
 1527                  * in size. For example, say you want to transmit
 1528                  * a UDP packet of 1473 bytes. The packet will be
 1529                  * fragmented over two IP datagrams, the latter
 1530                  * containing only one byte of data. The 82550 will
 1531                  * botch the header checksum on the 1-byte fragment.
 1532                  * As long as the datagram contains 4 or more bytes
 1533                  * of data, you're ok.
 1534                  *
 1535                  * The following code attempts to work around this
 1536                  * problem: if the datagram is less than 38 bytes
 1537                  * in size (14 bytes ether header, 20 bytes IP header,
 1538                  * plus 4 bytes of data), we punt and compute the IP
 1539                  * header checksum by hand. This workaround doesn't
 1540                  * work very well, however, since it can be fooled
 1541                  * by things like VLAN tags and IP options that make
 1542                  * the header sizes/offsets vary.
 1543                  */
 1544 
 1545                 if (m->m_pkthdr.csum_flags & CSUM_IP) {
 1546                         if (m->m_pkthdr.len < 38) {
 1547                                 struct ip *ip;
 1548                                 m->m_data += ETHER_HDR_LEN;
 1549                                 ip = mtod(m, struct ip *);
 1550                                 ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
 1551                                 m->m_data -= ETHER_HDR_LEN;
 1552                                 m->m_pkthdr.csum_flags &= ~CSUM_IP;
 1553                         } else {
 1554                                 txp->tx_cb->ipcb_ip_activation_high =
 1555                                     FXP_IPCB_HARDWAREPARSING_ENABLE;
 1556                                 txp->tx_cb->ipcb_ip_schedule |=
 1557                                     FXP_IPCB_IP_CHECKSUM_ENABLE;
 1558                         }
 1559                 }
 1560 #endif
 1561         }
 1562 
 1563         error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, *m_head,
 1564             segs, &nseg, 0);
 1565         if (error == EFBIG) {
 1566                 m = m_collapse(*m_head, M_DONTWAIT, sc->maxtxseg);
 1567                 if (m == NULL) {
 1568                         m_freem(*m_head);
 1569                         *m_head = NULL;
 1570                         return (ENOMEM);
 1571                 }
 1572                 *m_head = m;
 1573                 error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map,
 1574                     *m_head, segs, &nseg, 0);
 1575                 if (error != 0) {
 1576                         m_freem(*m_head);
 1577                         *m_head = NULL;
 1578                         return (ENOMEM);
 1579                 }
 1580         } else if (error != 0)
 1581                 return (error);
 1582         if (nseg == 0) {
 1583                 m_freem(*m_head);
 1584                 *m_head = NULL;
 1585                 return (EIO);
 1586         }
 1587 
 1588         KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments"));
 1589         bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, BUS_DMASYNC_PREWRITE);
 1590 
 1591         cbp = txp->tx_cb;
 1592         for (i = 0; i < nseg; i++) {
 1593                 /*
 1594                  * If this is an 82550/82551, then we're using extended
 1595                  * TxCBs _and_ we're using checksum offload. This means
 1596                  * that the TxCB is really an IPCB. One major difference
 1597                  * between the two is that with plain extended TxCBs,
 1598                  * the bottom half of the TxCB contains two entries from
 1599                  * the TBD array, whereas IPCBs contain just one entry:
 1600                  * one entry (8 bytes) has been sacrificed for the TCP/IP
 1601                  * checksum offload control bits. So to make things work
 1602                  * right, we have to start filling in the TBD array
 1603                  * starting from a different place depending on whether
 1604                  * the chip is an 82550/82551 or not.
 1605                  */
 1606                 if (sc->flags & FXP_FLAG_EXT_RFA) {
 1607                         cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
 1608                         cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
 1609                 } else {
 1610                         cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
 1611                         cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
 1612                 }
 1613         }
 1614         if (sc->flags & FXP_FLAG_EXT_RFA) {
 1615                 /* Configure dynamic TBD for 82550/82551. */
 1616                 cbp->tbd_number = 0xFF;
 1617                 cbp->tbd[nseg].tb_size |= htole32(0x8000);
 1618         } else
 1619                 cbp->tbd_number = nseg;
 1620         /* Configure TSO. */
 1621         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
 1622                 cbp->tbd[-1].tb_size = htole32(m->m_pkthdr.tso_segsz << 16);
 1623                 cbp->tbd[1].tb_size |= htole32(tcp_payload << 16);
 1624                 cbp->ipcb_ip_schedule |= FXP_IPCB_LARGESEND_ENABLE |
 1625                     FXP_IPCB_IP_CHECKSUM_ENABLE |
 1626                     FXP_IPCB_TCP_PACKET |
 1627                     FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
 1628         }
 1629         /* Configure VLAN hardware tag insertion. */
 1630         if ((m->m_flags & M_VLANTAG) != 0) {
 1631                 cbp->ipcb_vlan_id = htons(m->m_pkthdr.ether_vtag);
 1632                 txp->tx_cb->ipcb_ip_activation_high |=
 1633                     FXP_IPCB_INSERTVLAN_ENABLE;
 1634         }
 1635 
 1636         txp->tx_mbuf = m;
 1637         txp->tx_cb->cb_status = 0;
 1638         txp->tx_cb->byte_count = 0;
 1639         if (sc->tx_queued != FXP_CXINT_THRESH - 1)
 1640                 txp->tx_cb->cb_command =
 1641                     htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
 1642                     FXP_CB_COMMAND_S);
 1643         else
 1644                 txp->tx_cb->cb_command =
 1645                     htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
 1646                     FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
 1647         if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0)
 1648                 txp->tx_cb->tx_threshold = tx_threshold;
 1649 
 1650         /*
 1651          * Advance the end of list forward.
 1652          */
 1653         sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S);
 1654         sc->fxp_desc.tx_last = txp;
 1655 
 1656         /*
 1657          * Advance the beginning of the list forward if there are
 1658          * no other packets queued (when nothing is queued, tx_first
 1659          * sits on the last TxCB that was sent out).
 1660          */
 1661         if (sc->tx_queued == 0)
 1662                 sc->fxp_desc.tx_first = txp;
 1663 
 1664         sc->tx_queued++;
 1665 
 1666         return (0);
 1667 }
 1668 
 1669 #ifdef DEVICE_POLLING
 1670 static poll_handler_t fxp_poll;
 1671 
 1672 static int
 1673 fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1674 {
 1675         struct fxp_softc *sc = ifp->if_softc;
 1676         uint8_t statack;
 1677         int rx_npkts = 0;
 1678 
 1679         FXP_LOCK(sc);
 1680         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1681                 FXP_UNLOCK(sc);
 1682                 return (rx_npkts);
 1683         }
 1684 
 1685         statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
 1686             FXP_SCB_STATACK_FR;
 1687         if (cmd == POLL_AND_CHECK_STATUS) {
 1688                 uint8_t tmp;
 1689 
 1690                 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
 1691                 if (tmp == 0xff || tmp == 0) {
 1692                         FXP_UNLOCK(sc);
 1693                         return (rx_npkts); /* nothing to do */
 1694                 }
 1695                 tmp &= ~statack;
 1696                 /* ack what we can */
 1697                 if (tmp != 0)
 1698                         CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
 1699                 statack |= tmp;
 1700         }
 1701         rx_npkts = fxp_intr_body(sc, ifp, statack, count);
 1702         FXP_UNLOCK(sc);
 1703         return (rx_npkts);
 1704 }
 1705 #endif /* DEVICE_POLLING */
 1706 
 1707 /*
 1708  * Process interface interrupts.
 1709  */
 1710 static void
 1711 fxp_intr(void *xsc)
 1712 {
 1713         struct fxp_softc *sc = xsc;
 1714         struct ifnet *ifp = sc->ifp;
 1715         uint8_t statack;
 1716 
 1717         FXP_LOCK(sc);
 1718         if (sc->suspended) {
 1719                 FXP_UNLOCK(sc);
 1720                 return;
 1721         }
 1722 
 1723 #ifdef DEVICE_POLLING
 1724         if (ifp->if_capenable & IFCAP_POLLING) {
 1725                 FXP_UNLOCK(sc);
 1726                 return;
 1727         }
 1728 #endif
 1729         while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
 1730                 /*
 1731                  * It should not be possible to have all bits set; the
 1732                  * FXP_SCB_INTR_SWI bit always returns 0 on a read.  If
 1733                  * all bits are set, this may indicate that the card has
 1734                  * been physically ejected, so ignore it.
 1735                  */
 1736                 if (statack == 0xff) {
 1737                         FXP_UNLOCK(sc);
 1738                         return;
 1739                 }
 1740 
 1741                 /*
 1742                  * First ACK all the interrupts in this pass.
 1743                  */
 1744                 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
 1745                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1746                         fxp_intr_body(sc, ifp, statack, -1);
 1747         }
 1748         FXP_UNLOCK(sc);
 1749 }
 1750 
 1751 static void
 1752 fxp_txeof(struct fxp_softc *sc)
 1753 {
 1754         struct ifnet *ifp;
 1755         struct fxp_tx *txp;
 1756 
 1757         ifp = sc->ifp;
 1758         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 1759             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1760         for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
 1761             (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
 1762             txp = txp->tx_next) {
 1763                 if (txp->tx_mbuf != NULL) {
 1764                         bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map,
 1765                             BUS_DMASYNC_POSTWRITE);
 1766                         bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map);
 1767                         m_freem(txp->tx_mbuf);
 1768                         txp->tx_mbuf = NULL;
 1769                         /* clear this to reset csum offload bits */
 1770                         txp->tx_cb->tbd[0].tb_addr = 0;
 1771                 }
 1772                 sc->tx_queued--;
 1773                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1774         }
 1775         sc->fxp_desc.tx_first = txp;
 1776         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 1777             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1778         if (sc->tx_queued == 0)
 1779                 sc->watchdog_timer = 0;
 1780 }
 1781 
 1782 static void
 1783 fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m,
 1784     uint16_t status, int pos)
 1785 {
 1786         struct ether_header *eh;
 1787         struct ip *ip;
 1788         struct udphdr *uh;
 1789         int32_t hlen, len, pktlen, temp32;
 1790         uint16_t csum, *opts;
 1791 
 1792         if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) {
 1793                 if ((status & FXP_RFA_STATUS_PARSE) != 0) {
 1794                         if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID)
 1795                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1796                         if (status & FXP_RFDX_CS_IP_CSUM_VALID)
 1797                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1798                         if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
 1799                             (status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
 1800                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 1801                                     CSUM_PSEUDO_HDR;
 1802                                 m->m_pkthdr.csum_data = 0xffff;
 1803                         }
 1804                 }
 1805                 return;
 1806         }
 1807 
 1808         pktlen = m->m_pkthdr.len;
 1809         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1810                 return;
 1811         eh = mtod(m, struct ether_header *);
 1812         if (eh->ether_type != htons(ETHERTYPE_IP))
 1813                 return;
 1814         ip = (struct ip *)(eh + 1);
 1815         if (ip->ip_v != IPVERSION)
 1816                 return;
 1817 
 1818         hlen = ip->ip_hl << 2;
 1819         pktlen -= sizeof(struct ether_header);
 1820         if (hlen < sizeof(struct ip))
 1821                 return;
 1822         if (ntohs(ip->ip_len) < hlen)
 1823                 return;
 1824         if (ntohs(ip->ip_len) != pktlen)
 1825                 return;
 1826         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1827                 return; /* can't handle fragmented packet */
 1828 
 1829         switch (ip->ip_p) {
 1830         case IPPROTO_TCP:
 1831                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1832                         return;
 1833                 break;
 1834         case IPPROTO_UDP:
 1835                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1836                         return;
 1837                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1838                 if (uh->uh_sum == 0)
 1839                         return; /* no checksum */
 1840                 break;
 1841         default:
 1842                 return;
 1843         }
 1844         /* Extract computed checksum. */
 1845         csum = be16dec(mtod(m, char *) + pos);
 1846         /* checksum fixup for IP options */
 1847         len = hlen - sizeof(struct ip);
 1848         if (len > 0) {
 1849                 opts = (uint16_t *)(ip + 1);
 1850                 for (; len > 0; len -= sizeof(uint16_t), opts++) {
 1851                         temp32 = csum - *opts;
 1852                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1853                         csum = temp32 & 65535;
 1854                 }
 1855         }
 1856         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1857         m->m_pkthdr.csum_data = csum;
 1858 }
 1859 
 1860 static int
 1861 fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack,
 1862     int count)
 1863 {
 1864         struct mbuf *m;
 1865         struct fxp_rx *rxp;
 1866         struct fxp_rfa *rfa;
 1867         int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
 1868         int rx_npkts;
 1869         uint16_t status;
 1870 
 1871         rx_npkts = 0;
 1872         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1873 
 1874         if (rnr)
 1875                 sc->rnr++;
 1876 #ifdef DEVICE_POLLING
 1877         /* Pick up a deferred RNR condition if `count' ran out last time. */
 1878         if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
 1879                 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
 1880                 rnr = 1;
 1881         }
 1882 #endif
 1883 
 1884         /*
 1885          * Free any finished transmit mbuf chains.
 1886          *
 1887          * Handle the CNA event likt a CXTNO event. It used to
 1888          * be that this event (control unit not ready) was not
 1889          * encountered, but it is now with the SMPng modifications.
 1890          * The exact sequence of events that occur when the interface
 1891          * is brought up are different now, and if this event
 1892          * goes unhandled, the configuration/rxfilter setup sequence
 1893          * can stall for several seconds. The result is that no
 1894          * packets go out onto the wire for about 5 to 10 seconds
 1895          * after the interface is ifconfig'ed for the first time.
 1896          */
 1897         if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA))
 1898                 fxp_txeof(sc);
 1899 
 1900         /*
 1901          * Try to start more packets transmitting.
 1902          */
 1903         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1904                 fxp_start_body(ifp);
 1905 
 1906         /*
 1907          * Just return if nothing happened on the receive side.
 1908          */
 1909         if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
 1910                 return (rx_npkts);
 1911 
 1912         /*
 1913          * Process receiver interrupts. If a no-resource (RNR)
 1914          * condition exists, get whatever packets we can and
 1915          * re-start the receiver.
 1916          *
 1917          * When using polling, we do not process the list to completion,
 1918          * so when we get an RNR interrupt we must defer the restart
 1919          * until we hit the last buffer with the C bit set.
 1920          * If we run out of cycles and rfa_headm has the C bit set,
 1921          * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
 1922          * that the info will be used in the subsequent polling cycle.
 1923          */
 1924         for (;;) {
 1925                 rxp = sc->fxp_desc.rx_head;
 1926                 m = rxp->rx_mbuf;
 1927                 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
 1928                     RFA_ALIGNMENT_FUDGE);
 1929                 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
 1930                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1931 
 1932 #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
 1933                 if (count >= 0 && count-- == 0) {
 1934                         if (rnr) {
 1935                                 /* Defer RNR processing until the next time. */
 1936                                 sc->flags |= FXP_FLAG_DEFERRED_RNR;
 1937                                 rnr = 0;
 1938                         }
 1939                         break;
 1940                 }
 1941 #endif /* DEVICE_POLLING */
 1942 
 1943                 status = le16toh(rfa->rfa_status);
 1944                 if ((status & FXP_RFA_STATUS_C) == 0)
 1945                         break;
 1946 
 1947                 if ((status & FXP_RFA_STATUS_RNR) != 0)
 1948                         rnr++;
 1949                 /*
 1950                  * Advance head forward.
 1951                  */
 1952                 sc->fxp_desc.rx_head = rxp->rx_next;
 1953 
 1954                 /*
 1955                  * Add a new buffer to the receive chain.
 1956                  * If this fails, the old buffer is recycled
 1957                  * instead.
 1958                  */
 1959                 if (fxp_new_rfabuf(sc, rxp) == 0) {
 1960                         int total_len;
 1961 
 1962                         /*
 1963                          * Fetch packet length (the top 2 bits of
 1964                          * actual_size are flags set by the controller
 1965                          * upon completion), and drop the packet in case
 1966                          * of bogus length or CRC errors.
 1967                          */
 1968                         total_len = le16toh(rfa->actual_size) & 0x3fff;
 1969                         if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
 1970                             (ifp->if_capenable & IFCAP_RXCSUM) != 0) {
 1971                                 /* Adjust for appended checksum bytes. */
 1972                                 total_len -= 2;
 1973                         }
 1974                         if (total_len < (int)sizeof(struct ether_header) ||
 1975                             total_len > (MCLBYTES - RFA_ALIGNMENT_FUDGE -
 1976                             sc->rfa_size) ||
 1977                             status & (FXP_RFA_STATUS_CRC |
 1978                             FXP_RFA_STATUS_ALIGN | FXP_RFA_STATUS_OVERRUN)) {
 1979                                 m_freem(m);
 1980                                 fxp_add_rfabuf(sc, rxp);
 1981                                 continue;
 1982                         }
 1983 
 1984                         m->m_pkthdr.len = m->m_len = total_len;
 1985                         m->m_pkthdr.rcvif = ifp;
 1986 
 1987                         /* Do IP checksum checking. */
 1988                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 1989                                 fxp_rxcsum(sc, ifp, m, status, total_len);
 1990                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
 1991                             (status & FXP_RFA_STATUS_VLAN) != 0) {
 1992                                 m->m_pkthdr.ether_vtag =
 1993                                     ntohs(rfa->rfax_vlan_id);
 1994                                 m->m_flags |= M_VLANTAG;
 1995                         }
 1996                         /*
 1997                          * Drop locks before calling if_input() since it
 1998                          * may re-enter fxp_start() in the netisr case.
 1999                          * This would result in a lock reversal.  Better
 2000                          * performance might be obtained by chaining all
 2001                          * packets received, dropping the lock, and then
 2002                          * calling if_input() on each one.
 2003                          */
 2004                         FXP_UNLOCK(sc);
 2005                         (*ifp->if_input)(ifp, m);
 2006                         FXP_LOCK(sc);
 2007                         rx_npkts++;
 2008                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 2009                                 return (rx_npkts);
 2010                 } else {
 2011                         /* Reuse RFA and loaded DMA map. */
 2012                         ifp->if_iqdrops++;
 2013                         fxp_discard_rfabuf(sc, rxp);
 2014                 }
 2015                 fxp_add_rfabuf(sc, rxp);
 2016         }
 2017         if (rnr) {
 2018                 fxp_scb_wait(sc);
 2019                 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
 2020                     sc->fxp_desc.rx_head->rx_addr);
 2021                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
 2022         }
 2023         return (rx_npkts);
 2024 }
 2025 
 2026 static void
 2027 fxp_update_stats(struct fxp_softc *sc)
 2028 {
 2029         struct ifnet *ifp = sc->ifp;
 2030         struct fxp_stats *sp = sc->fxp_stats;
 2031         struct fxp_hwstats *hsp;
 2032         uint32_t *status;
 2033 
 2034         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2035 
 2036         bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
 2037             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2038         /* Update statistical counters. */
 2039         if (sc->revision >= FXP_REV_82559_A0)
 2040                 status = &sp->completion_status;
 2041         else if (sc->revision >= FXP_REV_82558_A4)
 2042                 status = (uint32_t *)&sp->tx_tco;
 2043         else
 2044                 status = &sp->tx_pause;
 2045         if (*status == htole32(FXP_STATS_DR_COMPLETE)) {
 2046                 hsp = &sc->fxp_hwstats;
 2047                 hsp->tx_good += le32toh(sp->tx_good);
 2048                 hsp->tx_maxcols += le32toh(sp->tx_maxcols);
 2049                 hsp->tx_latecols += le32toh(sp->tx_latecols);
 2050                 hsp->tx_underruns += le32toh(sp->tx_underruns);
 2051                 hsp->tx_lostcrs += le32toh(sp->tx_lostcrs);
 2052                 hsp->tx_deffered += le32toh(sp->tx_deffered);
 2053                 hsp->tx_single_collisions += le32toh(sp->tx_single_collisions);
 2054                 hsp->tx_multiple_collisions +=
 2055                     le32toh(sp->tx_multiple_collisions);
 2056                 hsp->tx_total_collisions += le32toh(sp->tx_total_collisions);
 2057                 hsp->rx_good += le32toh(sp->rx_good);
 2058                 hsp->rx_crc_errors += le32toh(sp->rx_crc_errors);
 2059                 hsp->rx_alignment_errors += le32toh(sp->rx_alignment_errors);
 2060                 hsp->rx_rnr_errors += le32toh(sp->rx_rnr_errors);
 2061                 hsp->rx_overrun_errors += le32toh(sp->rx_overrun_errors);
 2062                 hsp->rx_cdt_errors += le32toh(sp->rx_cdt_errors);
 2063                 hsp->rx_shortframes += le32toh(sp->rx_shortframes);
 2064                 hsp->tx_pause += le32toh(sp->tx_pause);
 2065                 hsp->rx_pause += le32toh(sp->rx_pause);
 2066                 hsp->rx_controls += le32toh(sp->rx_controls);
 2067                 hsp->tx_tco += le16toh(sp->tx_tco);
 2068                 hsp->rx_tco += le16toh(sp->rx_tco);
 2069 
 2070                 ifp->if_opackets += le32toh(sp->tx_good);
 2071                 ifp->if_collisions += le32toh(sp->tx_total_collisions);
 2072                 if (sp->rx_good) {
 2073                         ifp->if_ipackets += le32toh(sp->rx_good);
 2074                         sc->rx_idle_secs = 0;
 2075                 } else if (sc->flags & FXP_FLAG_RXBUG) {
 2076                         /*
 2077                          * Receiver's been idle for another second.
 2078                          */
 2079                         sc->rx_idle_secs++;
 2080                 }
 2081                 ifp->if_ierrors +=
 2082                     le32toh(sp->rx_crc_errors) +
 2083                     le32toh(sp->rx_alignment_errors) +
 2084                     le32toh(sp->rx_rnr_errors) +
 2085                     le32toh(sp->rx_overrun_errors);
 2086                 /*
 2087                  * If any transmit underruns occured, bump up the transmit
 2088                  * threshold by another 512 bytes (64 * 8).
 2089                  */
 2090                 if (sp->tx_underruns) {
 2091                         ifp->if_oerrors += le32toh(sp->tx_underruns);
 2092                         if (tx_threshold < 192)
 2093                                 tx_threshold += 64;
 2094                 }
 2095                 *status = 0;
 2096                 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
 2097                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2098         }
 2099 }
 2100 
 2101 /*
 2102  * Update packet in/out/collision statistics. The i82557 doesn't
 2103  * allow you to access these counters without doing a fairly
 2104  * expensive DMA to get _all_ of the statistics it maintains, so
 2105  * we do this operation here only once per second. The statistics
 2106  * counters in the kernel are updated from the previous dump-stats
 2107  * DMA and then a new dump-stats DMA is started. The on-chip
 2108  * counters are zeroed when the DMA completes. If we can't start
 2109  * the DMA immediately, we don't wait - we just prepare to read
 2110  * them again next time.
 2111  */
 2112 static void
 2113 fxp_tick(void *xsc)
 2114 {
 2115         struct fxp_softc *sc = xsc;
 2116         struct ifnet *ifp = sc->ifp;
 2117 
 2118         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2119 
 2120         /* Update statistical counters. */
 2121         fxp_update_stats(sc);
 2122 
 2123         /*
 2124          * Release any xmit buffers that have completed DMA. This isn't
 2125          * strictly necessary to do here, but it's advantagous for mbufs
 2126          * with external storage to be released in a timely manner rather
 2127          * than being defered for a potentially long time. This limits
 2128          * the delay to a maximum of one second.
 2129          */
 2130         fxp_txeof(sc);
 2131 
 2132         /*
 2133          * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
 2134          * then assume the receiver has locked up and attempt to clear
 2135          * the condition by reprogramming the multicast filter. This is
 2136          * a work-around for a bug in the 82557 where the receiver locks
 2137          * up if it gets certain types of garbage in the syncronization
 2138          * bits prior to the packet header. This bug is supposed to only
 2139          * occur in 10Mbps mode, but has been seen to occur in 100Mbps
 2140          * mode as well (perhaps due to a 10/100 speed transition).
 2141          */
 2142         if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
 2143                 sc->rx_idle_secs = 0;
 2144                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2145                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2146                         fxp_init_body(sc, 1);
 2147                 }
 2148                 return;
 2149         }
 2150         /*
 2151          * If there is no pending command, start another stats
 2152          * dump. Otherwise punt for now.
 2153          */
 2154         if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
 2155                 /*
 2156                  * Start another stats dump.
 2157                  */
 2158                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
 2159         }
 2160         if (sc->miibus != NULL)
 2161                 mii_tick(device_get_softc(sc->miibus));
 2162 
 2163         /*
 2164          * Check that chip hasn't hung.
 2165          */
 2166         fxp_watchdog(sc);
 2167 
 2168         /*
 2169          * Schedule another timeout one second from now.
 2170          */
 2171         callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
 2172 }
 2173 
 2174 /*
 2175  * Stop the interface. Cancels the statistics updater and resets
 2176  * the interface.
 2177  */
 2178 static void
 2179 fxp_stop(struct fxp_softc *sc)
 2180 {
 2181         struct ifnet *ifp = sc->ifp;
 2182         struct fxp_tx *txp;
 2183         int i;
 2184 
 2185         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2186         sc->watchdog_timer = 0;
 2187 
 2188         /*
 2189          * Cancel stats updater.
 2190          */
 2191         callout_stop(&sc->stat_ch);
 2192 
 2193         /*
 2194          * Preserve PCI configuration, configure, IA/multicast
 2195          * setup and put RU and CU into idle state.
 2196          */
 2197         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
 2198         DELAY(50);
 2199         /* Disable interrupts. */
 2200         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 2201 
 2202         fxp_update_stats(sc);
 2203 
 2204         /*
 2205          * Release any xmit buffers.
 2206          */
 2207         txp = sc->fxp_desc.tx_list;
 2208         if (txp != NULL) {
 2209                 for (i = 0; i < FXP_NTXCB; i++) {
 2210                         if (txp[i].tx_mbuf != NULL) {
 2211                                 bus_dmamap_sync(sc->fxp_txmtag, txp[i].tx_map,
 2212                                     BUS_DMASYNC_POSTWRITE);
 2213                                 bus_dmamap_unload(sc->fxp_txmtag,
 2214                                     txp[i].tx_map);
 2215                                 m_freem(txp[i].tx_mbuf);
 2216                                 txp[i].tx_mbuf = NULL;
 2217                                 /* clear this to reset csum offload bits */
 2218                                 txp[i].tx_cb->tbd[0].tb_addr = 0;
 2219                         }
 2220                 }
 2221         }
 2222         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2223             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2224         sc->tx_queued = 0;
 2225 }
 2226 
 2227 /*
 2228  * Watchdog/transmission transmit timeout handler. Called when a
 2229  * transmission is started on the interface, but no interrupt is
 2230  * received before the timeout. This usually indicates that the
 2231  * card has wedged for some reason.
 2232  */
 2233 static void
 2234 fxp_watchdog(struct fxp_softc *sc)
 2235 {
 2236 
 2237         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2238 
 2239         if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
 2240                 return;
 2241 
 2242         device_printf(sc->dev, "device timeout\n");
 2243         sc->ifp->if_oerrors++;
 2244 
 2245         sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2246         fxp_init_body(sc, 1);
 2247 }
 2248 
 2249 /*
 2250  * Acquire locks and then call the real initialization function.  This
 2251  * is necessary because ether_ioctl() calls if_init() and this would
 2252  * result in mutex recursion if the mutex was held.
 2253  */
 2254 static void
 2255 fxp_init(void *xsc)
 2256 {
 2257         struct fxp_softc *sc = xsc;
 2258 
 2259         FXP_LOCK(sc);
 2260         fxp_init_body(sc, 1);
 2261         FXP_UNLOCK(sc);
 2262 }
 2263 
 2264 /*
 2265  * Perform device initialization. This routine must be called with the
 2266  * softc lock held.
 2267  */
 2268 static void
 2269 fxp_init_body(struct fxp_softc *sc, int setmedia)
 2270 {
 2271         struct ifnet *ifp = sc->ifp;
 2272         struct mii_data *mii;
 2273         struct fxp_cb_config *cbp;
 2274         struct fxp_cb_ias *cb_ias;
 2275         struct fxp_cb_tx *tcbp;
 2276         struct fxp_tx *txp;
 2277         int i, prm;
 2278 
 2279         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2280 
 2281         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2282                 return;
 2283 
 2284         /*
 2285          * Cancel any pending I/O
 2286          */
 2287         fxp_stop(sc);
 2288 
 2289         /*
 2290          * Issue software reset, which also unloads the microcode.
 2291          */
 2292         sc->flags &= ~FXP_FLAG_UCODE;
 2293         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
 2294         DELAY(50);
 2295 
 2296         prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
 2297 
 2298         /*
 2299          * Initialize base of CBL and RFA memory. Loading with zero
 2300          * sets it up for regular linear addressing.
 2301          */
 2302         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
 2303         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
 2304 
 2305         fxp_scb_wait(sc);
 2306         fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
 2307 
 2308         /*
 2309          * Initialize base of dump-stats buffer.
 2310          */
 2311         fxp_scb_wait(sc);
 2312         bzero(sc->fxp_stats, sizeof(struct fxp_stats));
 2313         bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
 2314             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2315         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
 2316         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
 2317 
 2318         /*
 2319          * Attempt to load microcode if requested.
 2320          * For ICH based controllers do not load microcode.
 2321          */
 2322         if (sc->ident->ich == 0) {
 2323                 if (ifp->if_flags & IFF_LINK0 &&
 2324                     (sc->flags & FXP_FLAG_UCODE) == 0)
 2325                         fxp_load_ucode(sc);
 2326         }
 2327 
 2328         /*
 2329          * Set IFF_ALLMULTI status. It's needed in configure action
 2330          * command.
 2331          */
 2332         fxp_mc_addrs(sc);
 2333 
 2334         /*
 2335          * We temporarily use memory that contains the TxCB list to
 2336          * construct the config CB. The TxCB list memory is rebuilt
 2337          * later.
 2338          */
 2339         cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
 2340 
 2341         /*
 2342          * This bcopy is kind of disgusting, but there are a bunch of must be
 2343          * zero and must be one bits in this structure and this is the easiest
 2344          * way to initialize them all to proper values.
 2345          */
 2346         bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
 2347 
 2348         cbp->cb_status =        0;
 2349         cbp->cb_command =       htole16(FXP_CB_COMMAND_CONFIG |
 2350             FXP_CB_COMMAND_EL);
 2351         cbp->link_addr =        0xffffffff;     /* (no) next command */
 2352         cbp->byte_count =       sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
 2353         cbp->rx_fifo_limit =    8;      /* rx fifo threshold (32 bytes) */
 2354         cbp->tx_fifo_limit =    0;      /* tx fifo threshold (0 bytes) */
 2355         cbp->adaptive_ifs =     0;      /* (no) adaptive interframe spacing */
 2356         cbp->mwi_enable =       sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
 2357         cbp->type_enable =      0;      /* actually reserved */
 2358         cbp->read_align_en =    sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
 2359         cbp->end_wr_on_cl =     sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
 2360         cbp->rx_dma_bytecount = 0;      /* (no) rx DMA max */
 2361         cbp->tx_dma_bytecount = 0;      /* (no) tx DMA max */
 2362         cbp->dma_mbce =         0;      /* (disable) dma max counters */
 2363         cbp->late_scb =         0;      /* (don't) defer SCB update */
 2364         cbp->direct_dma_dis =   1;      /* disable direct rcv dma mode */
 2365         cbp->tno_int_or_tco_en =0;      /* (disable) tx not okay interrupt */
 2366         cbp->ci_int =           1;      /* interrupt on CU idle */
 2367         cbp->ext_txcb_dis =     sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
 2368         cbp->ext_stats_dis =    1;      /* disable extended counters */
 2369         cbp->keep_overrun_rx =  0;      /* don't pass overrun frames to host */
 2370         cbp->save_bf =          sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
 2371         cbp->disc_short_rx =    !prm;   /* discard short packets */
 2372         cbp->underrun_retry =   1;      /* retry mode (once) on DMA underrun */
 2373         cbp->two_frames =       0;      /* do not limit FIFO to 2 frames */
 2374         cbp->dyn_tbd =          sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2375         cbp->ext_rfa =          sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2376         cbp->mediatype =        sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
 2377         cbp->csma_dis =         0;      /* (don't) disable link */
 2378         cbp->tcp_udp_cksum =    ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
 2379             (ifp->if_capenable & IFCAP_RXCSUM) != 0) ? 1 : 0;
 2380         cbp->vlan_tco =         0;      /* (don't) enable vlan wakeup */
 2381         cbp->link_wake_en =     0;      /* (don't) assert PME# on link change */
 2382         cbp->arp_wake_en =      0;      /* (don't) assert PME# on arp */
 2383         cbp->mc_wake_en =       0;      /* (don't) enable PME# on mcmatch */
 2384         cbp->nsai =             1;      /* (don't) disable source addr insert */
 2385         cbp->preamble_length =  2;      /* (7 byte) preamble */
 2386         cbp->loopback =         0;      /* (don't) loopback */
 2387         cbp->linear_priority =  0;      /* (normal CSMA/CD operation) */
 2388         cbp->linear_pri_mode =  0;      /* (wait after xmit only) */
 2389         cbp->interfrm_spacing = 6;      /* (96 bits of) interframe spacing */
 2390         cbp->promiscuous =      prm;    /* promiscuous mode */
 2391         cbp->bcast_disable =    0;      /* (don't) disable broadcasts */
 2392         cbp->wait_after_win =   0;      /* (don't) enable modified backoff alg*/
 2393         cbp->ignore_ul =        0;      /* consider U/L bit in IA matching */
 2394         cbp->crc16_en =         0;      /* (don't) enable crc-16 algorithm */
 2395         cbp->crscdt =           sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
 2396 
 2397         cbp->stripping =        !prm;   /* truncate rx packet to byte count */
 2398         cbp->padding =          1;      /* (do) pad short tx packets */
 2399         cbp->rcv_crc_xfer =     0;      /* (don't) xfer CRC to host */
 2400         cbp->long_rx_en =       sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
 2401         cbp->ia_wake_en =       0;      /* (don't) wake up on address match */
 2402         cbp->magic_pkt_dis =    sc->flags & FXP_FLAG_WOL ? 0 : 1;
 2403         cbp->force_fdx =        0;      /* (don't) force full duplex */
 2404         cbp->fdx_pin_en =       1;      /* (enable) FDX# pin */
 2405         cbp->multi_ia =         0;      /* (don't) accept multiple IAs */
 2406         cbp->mc_all =           ifp->if_flags & IFF_ALLMULTI ? 1 : prm;
 2407         cbp->gamla_rx =         sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2408         cbp->vlan_strip_en =    ((sc->flags & FXP_FLAG_EXT_RFA) != 0 &&
 2409             (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) ? 1 : 0;
 2410 
 2411         if (sc->revision == FXP_REV_82557) {
 2412                 /*
 2413                  * The 82557 has no hardware flow control, the values
 2414                  * below are the defaults for the chip.
 2415                  */
 2416                 cbp->fc_delay_lsb =     0;
 2417                 cbp->fc_delay_msb =     0x40;
 2418                 cbp->pri_fc_thresh =    3;
 2419                 cbp->tx_fc_dis =        0;
 2420                 cbp->rx_fc_restop =     0;
 2421                 cbp->rx_fc_restart =    0;
 2422                 cbp->fc_filter =        0;
 2423                 cbp->pri_fc_loc =       1;
 2424         } else {
 2425                 /* Set pause RX FIFO threshold to 1KB. */
 2426                 CSR_WRITE_1(sc, FXP_CSR_FC_THRESH, 1);
 2427                 /* Set pause time. */
 2428                 cbp->fc_delay_lsb =     0xff;
 2429                 cbp->fc_delay_msb =     0xff;
 2430                 cbp->pri_fc_thresh =    3;
 2431                 mii = device_get_softc(sc->miibus);
 2432                 if ((IFM_OPTIONS(mii->mii_media_active) &
 2433                     IFM_ETH_TXPAUSE) != 0)
 2434                         /* enable transmit FC */
 2435                         cbp->tx_fc_dis = 0;
 2436                 else
 2437                         /* disable transmit FC */
 2438                         cbp->tx_fc_dis = 1;
 2439                 if ((IFM_OPTIONS(mii->mii_media_active) &
 2440                     IFM_ETH_RXPAUSE) != 0) {
 2441                         /* enable FC restart/restop frames */
 2442                         cbp->rx_fc_restart = 1;
 2443                         cbp->rx_fc_restop = 1;
 2444                 } else {
 2445                         /* disable FC restart/restop frames */
 2446                         cbp->rx_fc_restart = 0;
 2447                         cbp->rx_fc_restop = 0;
 2448                 }
 2449                 cbp->fc_filter =        !prm;   /* drop FC frames to host */
 2450                 cbp->pri_fc_loc =       1;      /* FC pri location (byte31) */
 2451         }
 2452 
 2453         /* Enable 82558 and 82559 extended statistics functionality. */
 2454         if (sc->revision >= FXP_REV_82558_A4) {
 2455                 if (sc->revision >= FXP_REV_82559_A0) {
 2456                         /*
 2457                          * Extend configuration table size to 32
 2458                          * to include TCO configuration.
 2459                          */
 2460                         cbp->byte_count = 32;
 2461                         cbp->ext_stats_dis = 1;
 2462                         /* Enable TCO stats. */
 2463                         cbp->tno_int_or_tco_en = 1;
 2464                         cbp->gamla_rx = 1;
 2465                 } else
 2466                         cbp->ext_stats_dis = 0;
 2467         }
 2468 
 2469         /*
 2470          * Start the config command/DMA.
 2471          */
 2472         fxp_scb_wait(sc);
 2473         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2474             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2475         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2476         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2477         /* ...and wait for it to complete. */
 2478         fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
 2479 
 2480         /*
 2481          * Now initialize the station address. Temporarily use the TxCB
 2482          * memory area like we did above for the config CB.
 2483          */
 2484         cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
 2485         cb_ias->cb_status = 0;
 2486         cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
 2487         cb_ias->link_addr = 0xffffffff;
 2488         bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN);
 2489 
 2490         /*
 2491          * Start the IAS (Individual Address Setup) command/DMA.
 2492          */
 2493         fxp_scb_wait(sc);
 2494         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2495             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2496         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2497         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2498         /* ...and wait for it to complete. */
 2499         fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
 2500 
 2501         /*
 2502          * Initialize the multicast address list.
 2503          */
 2504         fxp_mc_setup(sc);
 2505 
 2506         /*
 2507          * Initialize transmit control block (TxCB) list.
 2508          */
 2509         txp = sc->fxp_desc.tx_list;
 2510         tcbp = sc->fxp_desc.cbl_list;
 2511         bzero(tcbp, FXP_TXCB_SZ);
 2512         for (i = 0; i < FXP_NTXCB; i++) {
 2513                 txp[i].tx_mbuf = NULL;
 2514                 tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
 2515                 tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
 2516                 tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
 2517                     (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
 2518                 if (sc->flags & FXP_FLAG_EXT_TXCB)
 2519                         tcbp[i].tbd_array_addr =
 2520                             htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
 2521                 else
 2522                         tcbp[i].tbd_array_addr =
 2523                             htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
 2524                 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
 2525         }
 2526         /*
 2527          * Set the suspend flag on the first TxCB and start the control
 2528          * unit. It will execute the NOP and then suspend.
 2529          */
 2530         tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
 2531         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2532             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2533         sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
 2534         sc->tx_queued = 1;
 2535 
 2536         fxp_scb_wait(sc);
 2537         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2538         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2539 
 2540         /*
 2541          * Initialize receiver buffer area - RFA.
 2542          */
 2543         fxp_scb_wait(sc);
 2544         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
 2545         fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
 2546 
 2547         if (sc->miibus != NULL && setmedia != 0)
 2548                 mii_mediachg(device_get_softc(sc->miibus));
 2549 
 2550         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2551         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2552 
 2553         /*
 2554          * Enable interrupts.
 2555          */
 2556 #ifdef DEVICE_POLLING
 2557         /*
 2558          * ... but only do that if we are not polling. And because (presumably)
 2559          * the default is interrupts on, we need to disable them explicitly!
 2560          */
 2561         if (ifp->if_capenable & IFCAP_POLLING )
 2562                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 2563         else
 2564 #endif /* DEVICE_POLLING */
 2565         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
 2566 
 2567         /*
 2568          * Start stats updater.
 2569          */
 2570         callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
 2571 }
 2572 
 2573 static int
 2574 fxp_serial_ifmedia_upd(struct ifnet *ifp)
 2575 {
 2576 
 2577         return (0);
 2578 }
 2579 
 2580 static void
 2581 fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2582 {
 2583 
 2584         ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
 2585 }
 2586 
 2587 /*
 2588  * Change media according to request.
 2589  */
 2590 static int
 2591 fxp_ifmedia_upd(struct ifnet *ifp)
 2592 {
 2593         struct fxp_softc *sc = ifp->if_softc;
 2594         struct mii_data *mii;
 2595                 struct mii_softc        *miisc;
 2596 
 2597         mii = device_get_softc(sc->miibus);
 2598         FXP_LOCK(sc);
 2599         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 2600                 mii_phy_reset(miisc);
 2601         mii_mediachg(mii);
 2602         FXP_UNLOCK(sc);
 2603         return (0);
 2604 }
 2605 
 2606 /*
 2607  * Notify the world which media we're using.
 2608  */
 2609 static void
 2610 fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2611 {
 2612         struct fxp_softc *sc = ifp->if_softc;
 2613         struct mii_data *mii;
 2614 
 2615         mii = device_get_softc(sc->miibus);
 2616         FXP_LOCK(sc);
 2617         mii_pollstat(mii);
 2618         ifmr->ifm_active = mii->mii_media_active;
 2619         ifmr->ifm_status = mii->mii_media_status;
 2620         FXP_UNLOCK(sc);
 2621 }
 2622 
 2623 /*
 2624  * Add a buffer to the end of the RFA buffer list.
 2625  * Return 0 if successful, 1 for failure. A failure results in
 2626  * reusing the RFA buffer.
 2627  * The RFA struct is stuck at the beginning of mbuf cluster and the
 2628  * data pointer is fixed up to point just past it.
 2629  */
 2630 static int
 2631 fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
 2632 {
 2633         struct mbuf *m;
 2634         struct fxp_rfa *rfa;
 2635         bus_dmamap_t tmp_map;
 2636         int error;
 2637 
 2638         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 2639         if (m == NULL)
 2640                 return (ENOBUFS);
 2641 
 2642         /*
 2643          * Move the data pointer up so that the incoming data packet
 2644          * will be 32-bit aligned.
 2645          */
 2646         m->m_data += RFA_ALIGNMENT_FUDGE;
 2647 
 2648         /*
 2649          * Get a pointer to the base of the mbuf cluster and move
 2650          * data start past it.
 2651          */
 2652         rfa = mtod(m, struct fxp_rfa *);
 2653         m->m_data += sc->rfa_size;
 2654         rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
 2655 
 2656         rfa->rfa_status = 0;
 2657         rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
 2658         rfa->actual_size = 0;
 2659         m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE -
 2660             sc->rfa_size;
 2661 
 2662         /*
 2663          * Initialize the rest of the RFA.  Note that since the RFA
 2664          * is misaligned, we cannot store values directly.  We're thus
 2665          * using the le32enc() function which handles endianness and
 2666          * is also alignment-safe.
 2667          */
 2668         le32enc(&rfa->link_addr, 0xffffffff);
 2669         le32enc(&rfa->rbd_addr, 0xffffffff);
 2670 
 2671         /* Map the RFA into DMA memory. */
 2672         error = bus_dmamap_load(sc->fxp_rxmtag, sc->spare_map, rfa,
 2673             MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
 2674             &rxp->rx_addr, BUS_DMA_NOWAIT);
 2675         if (error) {
 2676                 m_freem(m);
 2677                 return (error);
 2678         }
 2679 
 2680         if (rxp->rx_mbuf != NULL)
 2681                 bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map);
 2682         tmp_map = sc->spare_map;
 2683         sc->spare_map = rxp->rx_map;
 2684         rxp->rx_map = tmp_map;
 2685         rxp->rx_mbuf = m;
 2686 
 2687         bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
 2688             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2689         return (0);
 2690 }
 2691 
 2692 static void
 2693 fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
 2694 {
 2695         struct fxp_rfa *p_rfa;
 2696         struct fxp_rx *p_rx;
 2697 
 2698         /*
 2699          * If there are other buffers already on the list, attach this
 2700          * one to the end by fixing up the tail to point to this one.
 2701          */
 2702         if (sc->fxp_desc.rx_head != NULL) {
 2703                 p_rx = sc->fxp_desc.rx_tail;
 2704                 p_rfa = (struct fxp_rfa *)
 2705                     (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
 2706                 p_rx->rx_next = rxp;
 2707                 le32enc(&p_rfa->link_addr, rxp->rx_addr);
 2708                 p_rfa->rfa_control = 0;
 2709                 bus_dmamap_sync(sc->fxp_rxmtag, p_rx->rx_map,
 2710                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2711         } else {
 2712                 rxp->rx_next = NULL;
 2713                 sc->fxp_desc.rx_head = rxp;
 2714         }
 2715         sc->fxp_desc.rx_tail = rxp;
 2716 }
 2717 
 2718 static void
 2719 fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
 2720 {
 2721         struct mbuf *m;
 2722         struct fxp_rfa *rfa;
 2723 
 2724         m = rxp->rx_mbuf;
 2725         m->m_data = m->m_ext.ext_buf;
 2726         /*
 2727          * Move the data pointer up so that the incoming data packet
 2728          * will be 32-bit aligned.
 2729          */
 2730         m->m_data += RFA_ALIGNMENT_FUDGE;
 2731 
 2732         /*
 2733          * Get a pointer to the base of the mbuf cluster and move
 2734          * data start past it.
 2735          */
 2736         rfa = mtod(m, struct fxp_rfa *);
 2737         m->m_data += sc->rfa_size;
 2738         rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
 2739 
 2740         rfa->rfa_status = 0;
 2741         rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
 2742         rfa->actual_size = 0;
 2743 
 2744         /*
 2745          * Initialize the rest of the RFA.  Note that since the RFA
 2746          * is misaligned, we cannot store values directly.  We're thus
 2747          * using the le32enc() function which handles endianness and
 2748          * is also alignment-safe.
 2749          */
 2750         le32enc(&rfa->link_addr, 0xffffffff);
 2751         le32enc(&rfa->rbd_addr, 0xffffffff);
 2752 
 2753         bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
 2754             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2755 }
 2756 
 2757 static int
 2758 fxp_miibus_readreg(device_t dev, int phy, int reg)
 2759 {
 2760         struct fxp_softc *sc = device_get_softc(dev);
 2761         int count = 10000;
 2762         int value;
 2763 
 2764         CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
 2765             (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
 2766 
 2767         while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
 2768             && count--)
 2769                 DELAY(10);
 2770 
 2771         if (count <= 0)
 2772                 device_printf(dev, "fxp_miibus_readreg: timed out\n");
 2773 
 2774         return (value & 0xffff);
 2775 }
 2776 
 2777 static int
 2778 fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
 2779 {
 2780         struct fxp_softc *sc = device_get_softc(dev);
 2781         int count = 10000;
 2782 
 2783         CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
 2784             (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
 2785             (value & 0xffff));
 2786 
 2787         while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
 2788             count--)
 2789                 DELAY(10);
 2790 
 2791         if (count <= 0)
 2792                 device_printf(dev, "fxp_miibus_writereg: timed out\n");
 2793         return (0);
 2794 }
 2795 
 2796 static void
 2797 fxp_miibus_statchg(device_t dev)
 2798 {
 2799         struct fxp_softc *sc;
 2800         struct mii_data *mii;
 2801         struct ifnet *ifp;
 2802 
 2803         sc = device_get_softc(dev);
 2804         mii = device_get_softc(sc->miibus);
 2805         ifp = sc->ifp;
 2806         if (mii == NULL || ifp == NULL ||
 2807             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
 2808             (mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
 2809             (IFM_AVALID | IFM_ACTIVE))
 2810                 return;
 2811 
 2812         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T &&
 2813             sc->flags & FXP_FLAG_CU_RESUME_BUG)
 2814                 sc->cu_resume_bug = 1;
 2815         else
 2816                 sc->cu_resume_bug = 0;
 2817         /*
 2818          * Call fxp_init_body in order to adjust the flow control settings.
 2819          * Note that the 82557 doesn't support hardware flow control.
 2820          */
 2821         if (sc->revision == FXP_REV_82557)
 2822                 return;
 2823         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2824         fxp_init_body(sc, 0);
 2825 }
 2826 
 2827 static int
 2828 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 2829 {
 2830         struct fxp_softc *sc = ifp->if_softc;
 2831         struct ifreq *ifr = (struct ifreq *)data;
 2832         struct mii_data *mii;
 2833         int flag, mask, error = 0, reinit;
 2834 
 2835         switch (command) {
 2836         case SIOCSIFFLAGS:
 2837                 FXP_LOCK(sc);
 2838                 /*
 2839                  * If interface is marked up and not running, then start it.
 2840                  * If it is marked down and running, stop it.
 2841                  * XXX If it's up then re-initialize it. This is so flags
 2842                  * such as IFF_PROMISC are handled.
 2843                  */
 2844                 if (ifp->if_flags & IFF_UP) {
 2845                         if (((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) &&
 2846                             ((ifp->if_flags ^ sc->if_flags) &
 2847                             (IFF_PROMISC | IFF_ALLMULTI | IFF_LINK0)) != 0) {
 2848                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2849                                 fxp_init_body(sc, 0);
 2850                         } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 2851                                 fxp_init_body(sc, 1);
 2852                 } else {
 2853                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2854                                 fxp_stop(sc);
 2855                 }
 2856                 sc->if_flags = ifp->if_flags;
 2857                 FXP_UNLOCK(sc);
 2858                 break;
 2859 
 2860         case SIOCADDMULTI:
 2861         case SIOCDELMULTI:
 2862                 FXP_LOCK(sc);
 2863                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2864                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2865                         fxp_init_body(sc, 0);
 2866                 }
 2867                 FXP_UNLOCK(sc);
 2868                 break;
 2869 
 2870         case SIOCSIFMEDIA:
 2871         case SIOCGIFMEDIA:
 2872                 if (sc->miibus != NULL) {
 2873                         mii = device_get_softc(sc->miibus);
 2874                         error = ifmedia_ioctl(ifp, ifr,
 2875                             &mii->mii_media, command);
 2876                 } else {
 2877                         error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
 2878                 }
 2879                 break;
 2880 
 2881         case SIOCSIFCAP:
 2882                 reinit = 0;
 2883                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
 2884 #ifdef DEVICE_POLLING
 2885                 if (mask & IFCAP_POLLING) {
 2886                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 2887                                 error = ether_poll_register(fxp_poll, ifp);
 2888                                 if (error)
 2889                                         return(error);
 2890                                 FXP_LOCK(sc);
 2891                                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
 2892                                     FXP_SCB_INTR_DISABLE);
 2893                                 ifp->if_capenable |= IFCAP_POLLING;
 2894                                 FXP_UNLOCK(sc);
 2895                         } else {
 2896                                 error = ether_poll_deregister(ifp);
 2897                                 /* Enable interrupts in any case */
 2898                                 FXP_LOCK(sc);
 2899                                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
 2900                                 ifp->if_capenable &= ~IFCAP_POLLING;
 2901                                 FXP_UNLOCK(sc);
 2902                         }
 2903                 }
 2904 #endif
 2905                 FXP_LOCK(sc);
 2906                 if ((mask & IFCAP_TXCSUM) != 0 &&
 2907                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 2908                         ifp->if_capenable ^= IFCAP_TXCSUM;
 2909                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 2910                                 ifp->if_hwassist |= FXP_CSUM_FEATURES;
 2911                         else
 2912                                 ifp->if_hwassist &= ~FXP_CSUM_FEATURES;
 2913                 }
 2914                 if ((mask & IFCAP_RXCSUM) != 0 &&
 2915                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
 2916                         ifp->if_capenable ^= IFCAP_RXCSUM;
 2917                         if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0)
 2918                                 reinit++;
 2919                 }
 2920                 if ((mask & IFCAP_TSO4) != 0 &&
 2921                     (ifp->if_capabilities & IFCAP_TSO4) != 0) {
 2922                         ifp->if_capenable ^= IFCAP_TSO4;
 2923                         if ((ifp->if_capenable & IFCAP_TSO4) != 0)
 2924                                 ifp->if_hwassist |= CSUM_TSO;
 2925                         else
 2926                                 ifp->if_hwassist &= ~CSUM_TSO;
 2927                 }
 2928                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 2929                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 2930                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 2931                 if ((mask & IFCAP_VLAN_MTU) != 0 &&
 2932                     (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0) {
 2933                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
 2934                         if (sc->revision != FXP_REV_82557)
 2935                                 flag = FXP_FLAG_LONG_PKT_EN;
 2936                         else /* a hack to get long frames on the old chip */
 2937                                 flag = FXP_FLAG_SAVE_BAD;
 2938                         sc->flags ^= flag;
 2939                         if (ifp->if_flags & IFF_UP)
 2940                                 reinit++;
 2941                 }
 2942                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 2943                     (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
 2944                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 2945                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 2946                     (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
 2947                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 2948                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 2949                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
 2950                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2951                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
 2952                                 ifp->if_capenable &=
 2953                                     ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
 2954                         reinit++;
 2955                 }
 2956                 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2957                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2958                         fxp_init_body(sc, 0);
 2959                 }
 2960                 FXP_UNLOCK(sc);
 2961                 VLAN_CAPABILITIES(ifp);
 2962                 break;
 2963 
 2964         default:
 2965                 error = ether_ioctl(ifp, command, data);
 2966         }
 2967         return (error);
 2968 }
 2969 
 2970 /*
 2971  * Fill in the multicast address list and return number of entries.
 2972  */
 2973 static int
 2974 fxp_mc_addrs(struct fxp_softc *sc)
 2975 {
 2976         struct fxp_cb_mcs *mcsp = sc->mcsp;
 2977         struct ifnet *ifp = sc->ifp;
 2978         struct ifmultiaddr *ifma;
 2979         int nmcasts;
 2980 
 2981         nmcasts = 0;
 2982         if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
 2983                 if_maddr_rlock(ifp);
 2984                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2985                         if (ifma->ifma_addr->sa_family != AF_LINK)
 2986                                 continue;
 2987                         if (nmcasts >= MAXMCADDR) {
 2988                                 ifp->if_flags |= IFF_ALLMULTI;
 2989                                 nmcasts = 0;
 2990                                 break;
 2991                         }
 2992                         bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
 2993                             &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
 2994                         nmcasts++;
 2995                 }
 2996                 if_maddr_runlock(ifp);
 2997         }
 2998         mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
 2999         return (nmcasts);
 3000 }
 3001 
 3002 /*
 3003  * Program the multicast filter.
 3004  *
 3005  * We have an artificial restriction that the multicast setup command
 3006  * must be the first command in the chain, so we take steps to ensure
 3007  * this. By requiring this, it allows us to keep up the performance of
 3008  * the pre-initialized command ring (esp. link pointers) by not actually
 3009  * inserting the mcsetup command in the ring - i.e. its link pointer
 3010  * points to the TxCB ring, but the mcsetup descriptor itself is not part
 3011  * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
 3012  * lead into the regular TxCB ring when it completes.
 3013  */
 3014 static void
 3015 fxp_mc_setup(struct fxp_softc *sc)
 3016 {
 3017         struct fxp_cb_mcs *mcsp;
 3018         int count;
 3019 
 3020         FXP_LOCK_ASSERT(sc, MA_OWNED);
 3021 
 3022         mcsp = sc->mcsp;
 3023         mcsp->cb_status = 0;
 3024         mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
 3025         mcsp->link_addr = 0xffffffff;
 3026         fxp_mc_addrs(sc);
 3027 
 3028         /*
 3029          * Wait until command unit is idle. This should never be the
 3030          * case when nothing is queued, but make sure anyway.
 3031          */
 3032         count = 100;
 3033         while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) !=
 3034             FXP_SCB_CUS_IDLE && --count)
 3035                 DELAY(10);
 3036         if (count == 0) {
 3037                 device_printf(sc->dev, "command queue timeout\n");
 3038                 return;
 3039         }
 3040 
 3041         /*
 3042          * Start the multicast setup command.
 3043          */
 3044         fxp_scb_wait(sc);
 3045         bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
 3046             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3047         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
 3048         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 3049         /* ...and wait for it to complete. */
 3050         fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
 3051 }
 3052 
 3053 static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
 3054 static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
 3055 static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
 3056 static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
 3057 static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
 3058 static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
 3059 static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE;
 3060 
 3061 #define UCODE(x)        x, sizeof(x)/sizeof(uint32_t)
 3062 
 3063 static const struct ucode {
 3064         uint32_t        revision;
 3065         uint32_t        *ucode;
 3066         int             length;
 3067         u_short         int_delay_offset;
 3068         u_short         bundle_max_offset;
 3069 } ucode_table[] = {
 3070         { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
 3071         { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
 3072         { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
 3073             D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
 3074         { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
 3075             D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
 3076         { FXP_REV_82550, UCODE(fxp_ucode_d102),
 3077             D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
 3078         { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
 3079             D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
 3080         { FXP_REV_82551_F, UCODE(fxp_ucode_d102e),
 3081             D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
 3082         { FXP_REV_82551_10, UCODE(fxp_ucode_d102e),
 3083             D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
 3084         { 0, NULL, 0, 0, 0 }
 3085 };
 3086 
 3087 static void
 3088 fxp_load_ucode(struct fxp_softc *sc)
 3089 {
 3090         const struct ucode *uc;
 3091         struct fxp_cb_ucode *cbp;
 3092         int i;
 3093 
 3094         if (sc->flags & FXP_FLAG_NO_UCODE)
 3095                 return;
 3096 
 3097         for (uc = ucode_table; uc->ucode != NULL; uc++)
 3098                 if (sc->revision == uc->revision)
 3099                         break;
 3100         if (uc->ucode == NULL)
 3101                 return;
 3102         cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
 3103         cbp->cb_status = 0;
 3104         cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
 3105         cbp->link_addr = 0xffffffff;            /* (no) next command */
 3106         for (i = 0; i < uc->length; i++)
 3107                 cbp->ucode[i] = htole32(uc->ucode[i]);
 3108         if (uc->int_delay_offset)
 3109                 *(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
 3110                     htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
 3111         if (uc->bundle_max_offset)
 3112                 *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
 3113                     htole16(sc->tunable_bundle_max);
 3114         /*
 3115          * Download the ucode to the chip.
 3116          */
 3117         fxp_scb_wait(sc);
 3118         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 3119             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3120         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 3121         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 3122         /* ...and wait for it to complete. */
 3123         fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
 3124         device_printf(sc->dev,
 3125             "Microcode loaded, int_delay: %d usec  bundle_max: %d\n",
 3126             sc->tunable_int_delay,
 3127             uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
 3128         sc->flags |= FXP_FLAG_UCODE;
 3129         bzero(cbp, FXP_TXCB_SZ);
 3130 }
 3131 
 3132 #define FXP_SYSCTL_STAT_ADD(c, h, n, p, d)      \
 3133         SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
 3134 
 3135 static void
 3136 fxp_sysctl_node(struct fxp_softc *sc)
 3137 {
 3138         struct sysctl_ctx_list *ctx;
 3139         struct sysctl_oid_list *child, *parent;
 3140         struct sysctl_oid *tree;
 3141         struct fxp_hwstats *hsp;
 3142 
 3143         ctx = device_get_sysctl_ctx(sc->dev);
 3144         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 3145 
 3146         SYSCTL_ADD_PROC(ctx, child,
 3147             OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW,
 3148             &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
 3149             "FXP driver receive interrupt microcode bundling delay");
 3150         SYSCTL_ADD_PROC(ctx, child,
 3151             OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW,
 3152             &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
 3153             "FXP driver receive interrupt microcode bundle size limit");
 3154         SYSCTL_ADD_INT(ctx, child,OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
 3155             "FXP RNR events");
 3156 
 3157         /*
 3158          * Pull in device tunables.
 3159          */
 3160         sc->tunable_int_delay = TUNABLE_INT_DELAY;
 3161         sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
 3162         (void) resource_int_value(device_get_name(sc->dev),
 3163             device_get_unit(sc->dev), "int_delay", &sc->tunable_int_delay);
 3164         (void) resource_int_value(device_get_name(sc->dev),
 3165             device_get_unit(sc->dev), "bundle_max", &sc->tunable_bundle_max);
 3166         sc->rnr = 0;
 3167 
 3168         hsp = &sc->fxp_hwstats;
 3169         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
 3170             NULL, "FXP statistics");
 3171         parent = SYSCTL_CHILDREN(tree);
 3172 
 3173         /* Rx MAC statistics. */
 3174         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
 3175             NULL, "Rx MAC statistics");
 3176         child = SYSCTL_CHILDREN(tree);
 3177         FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames",
 3178             &hsp->rx_good, "Good frames");
 3179         FXP_SYSCTL_STAT_ADD(ctx, child, "crc_errors",
 3180             &hsp->rx_crc_errors, "CRC errors");
 3181         FXP_SYSCTL_STAT_ADD(ctx, child, "alignment_errors",
 3182             &hsp->rx_alignment_errors, "Alignment errors");
 3183         FXP_SYSCTL_STAT_ADD(ctx, child, "rnr_errors",
 3184             &hsp->rx_rnr_errors, "RNR errors");
 3185         FXP_SYSCTL_STAT_ADD(ctx, child, "overrun_errors",
 3186             &hsp->rx_overrun_errors, "Overrun errors");
 3187         FXP_SYSCTL_STAT_ADD(ctx, child, "cdt_errors",
 3188             &hsp->rx_cdt_errors, "Collision detect errors");
 3189         FXP_SYSCTL_STAT_ADD(ctx, child, "shortframes",
 3190             &hsp->rx_shortframes, "Short frame errors");
 3191         if (sc->revision >= FXP_REV_82558_A4) {
 3192                 FXP_SYSCTL_STAT_ADD(ctx, child, "pause",
 3193                     &hsp->rx_pause, "Pause frames");
 3194                 FXP_SYSCTL_STAT_ADD(ctx, child, "controls",
 3195                     &hsp->rx_controls, "Unsupported control frames");
 3196         }
 3197         if (sc->revision >= FXP_REV_82559_A0)
 3198                 FXP_SYSCTL_STAT_ADD(ctx, child, "tco",
 3199                     &hsp->rx_tco, "TCO frames");
 3200 
 3201         /* Tx MAC statistics. */
 3202         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
 3203             NULL, "Tx MAC statistics");
 3204         child = SYSCTL_CHILDREN(tree);
 3205         FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames",
 3206             &hsp->tx_good, "Good frames");
 3207         FXP_SYSCTL_STAT_ADD(ctx, child, "maxcols",
 3208             &hsp->tx_maxcols, "Maximum collisions errors");
 3209         FXP_SYSCTL_STAT_ADD(ctx, child, "latecols",
 3210             &hsp->tx_latecols, "Late collisions errors");
 3211         FXP_SYSCTL_STAT_ADD(ctx, child, "underruns",
 3212             &hsp->tx_underruns, "Underrun errors");
 3213         FXP_SYSCTL_STAT_ADD(ctx, child, "lostcrs",
 3214             &hsp->tx_lostcrs, "Lost carrier sense");
 3215         FXP_SYSCTL_STAT_ADD(ctx, child, "deffered",
 3216             &hsp->tx_deffered, "Deferred");
 3217         FXP_SYSCTL_STAT_ADD(ctx, child, "single_collisions",
 3218             &hsp->tx_single_collisions, "Single collisions");
 3219         FXP_SYSCTL_STAT_ADD(ctx, child, "multiple_collisions",
 3220             &hsp->tx_multiple_collisions, "Multiple collisions");
 3221         FXP_SYSCTL_STAT_ADD(ctx, child, "total_collisions",
 3222             &hsp->tx_total_collisions, "Total collisions");
 3223         if (sc->revision >= FXP_REV_82558_A4)
 3224                 FXP_SYSCTL_STAT_ADD(ctx, child, "pause",
 3225                     &hsp->tx_pause, "Pause frames");
 3226         if (sc->revision >= FXP_REV_82559_A0)
 3227                 FXP_SYSCTL_STAT_ADD(ctx, child, "tco",
 3228                     &hsp->tx_tco, "TCO frames");
 3229 }
 3230 
 3231 #undef FXP_SYSCTL_STAT_ADD
 3232 
 3233 static int
 3234 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3235 {
 3236         int error, value;
 3237 
 3238         value = *(int *)arg1;
 3239         error = sysctl_handle_int(oidp, &value, 0, req);
 3240         if (error || !req->newptr)
 3241                 return (error);
 3242         if (value < low || value > high)
 3243                 return (EINVAL);
 3244         *(int *)arg1 = value;
 3245         return (0);
 3246 }
 3247 
 3248 /*
 3249  * Interrupt delay is expressed in microseconds, a multiplier is used
 3250  * to convert this to the appropriate clock ticks before using.
 3251  */
 3252 static int
 3253 sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
 3254 {
 3255 
 3256         return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
 3257 }
 3258 
 3259 static int
 3260 sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
 3261 {
 3262 
 3263         return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
 3264 }

Cache object: 6367e55afa2862ca3a435dcc0dc45b07


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.