The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/fxp/if_fxp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
    3  *
    4  * Copyright (c) 1995, David Greenman
    5  * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice unmodified, this list of conditions, and the following
   13  *    disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 /*
   36  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
   37  */
   38 
   39 #ifdef HAVE_KERNEL_OPTION_HEADERS
   40 #include "opt_device_polling.h"
   41 #endif
   42 
   43 #include <sys/param.h>
   44 #include <sys/systm.h>
   45 #include <sys/bus.h>
   46 #include <sys/endian.h>
   47 #include <sys/kernel.h>
   48 #include <sys/mbuf.h>
   49 #include <sys/lock.h>
   50 #include <sys/malloc.h>
   51 #include <sys/module.h>
   52 #include <sys/mutex.h>
   53 #include <sys/rman.h>
   54 #include <sys/socket.h>
   55 #include <sys/sockio.h>
   56 #include <sys/sysctl.h>
   57 
   58 #include <net/bpf.h>
   59 #include <net/ethernet.h>
   60 #include <net/if.h>
   61 #include <net/if_var.h>
   62 #include <net/if_arp.h>
   63 #include <net/if_dl.h>
   64 #include <net/if_media.h>
   65 #include <net/if_types.h>
   66 #include <net/if_vlan_var.h>
   67 
   68 #include <netinet/in.h>
   69 #include <netinet/in_systm.h>
   70 #include <netinet/ip.h>
   71 #include <netinet/tcp.h>
   72 #include <netinet/udp.h>
   73 
   74 #include <machine/bus.h>
   75 #include <machine/in_cksum.h>
   76 #include <machine/resource.h>
   77 
   78 #include <dev/pci/pcivar.h>
   79 #include <dev/pci/pcireg.h>             /* for PCIM_CMD_xxx */
   80 
   81 #include <dev/mii/mii.h>
   82 #include <dev/mii/miivar.h>
   83 
   84 #include <dev/fxp/if_fxpreg.h>
   85 #include <dev/fxp/if_fxpvar.h>
   86 #include <dev/fxp/rcvbundl.h>
   87 
   88 MODULE_DEPEND(fxp, pci, 1, 1, 1);
   89 MODULE_DEPEND(fxp, ether, 1, 1, 1);
   90 MODULE_DEPEND(fxp, miibus, 1, 1, 1);
   91 #include "miibus_if.h"
   92 
   93 /*
   94  * NOTE!  On !x86 we typically have an alignment constraint.  The
   95  * card DMAs the packet immediately following the RFA.  However,
   96  * the first thing in the packet is a 14-byte Ethernet header.
   97  * This means that the packet is misaligned.  To compensate,
   98  * we actually offset the RFA 2 bytes into the cluster.  This
   99  * alignes the packet after the Ethernet header at a 32-bit
  100  * boundary.  HOWEVER!  This means that the RFA is misaligned!
  101  */
  102 #define RFA_ALIGNMENT_FUDGE     2
  103 
  104 /*
  105  * Set initial transmit threshold at 64 (512 bytes). This is
  106  * increased by 64 (512 bytes) at a time, to maximum of 192
  107  * (1536 bytes), if an underrun occurs.
  108  */
  109 static int tx_threshold = 64;
  110 
  111 /*
  112  * The configuration byte map has several undefined fields which
  113  * must be one or must be zero.  Set up a template for these bits.
  114  * The actual configuration is performed in fxp_init_body.
  115  *
  116  * See struct fxp_cb_config for the bit definitions.
  117  */
  118 static const u_char fxp_cb_config_template[] = {
  119         0x0, 0x0,               /* cb_status */
  120         0x0, 0x0,               /* cb_command */
  121         0x0, 0x0, 0x0, 0x0,     /* link_addr */
  122         0x0,    /*  0 */
  123         0x0,    /*  1 */
  124         0x0,    /*  2 */
  125         0x0,    /*  3 */
  126         0x0,    /*  4 */
  127         0x0,    /*  5 */
  128         0x32,   /*  6 */
  129         0x0,    /*  7 */
  130         0x0,    /*  8 */
  131         0x0,    /*  9 */
  132         0x6,    /* 10 */
  133         0x0,    /* 11 */
  134         0x0,    /* 12 */
  135         0x0,    /* 13 */
  136         0xf2,   /* 14 */
  137         0x48,   /* 15 */
  138         0x0,    /* 16 */
  139         0x40,   /* 17 */
  140         0xf0,   /* 18 */
  141         0x0,    /* 19 */
  142         0x3f,   /* 20 */
  143         0x5,    /* 21 */
  144         0x0,    /* 22 */
  145         0x0,    /* 23 */
  146         0x0,    /* 24 */
  147         0x0,    /* 25 */
  148         0x0,    /* 26 */
  149         0x0,    /* 27 */
  150         0x0,    /* 28 */
  151         0x0,    /* 29 */
  152         0x0,    /* 30 */
  153         0x0     /* 31 */
  154 };
  155 
  156 /*
  157  * Claim various Intel PCI device identifiers for this driver.  The
  158  * sub-vendor and sub-device field are extensively used to identify
  159  * particular variants, but we don't currently differentiate between
  160  * them.
  161  */
  162 static const struct fxp_ident fxp_ident_table[] = {
  163     { 0x8086, 0x1029,   -1,     0, "Intel 82559 PCI/CardBus Pro/100" },
  164     { 0x8086, 0x1030,   -1,     0, "Intel 82559 Pro/100 Ethernet" },
  165     { 0x8086, 0x1031,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
  166     { 0x8086, 0x1032,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
  167     { 0x8086, 0x1033,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  168     { 0x8086, 0x1034,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  169     { 0x8086, 0x1035,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  170     { 0x8086, 0x1036,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  171     { 0x8086, 0x1037,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
  172     { 0x8086, 0x1038,   -1,     3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
  173     { 0x8086, 0x1039,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
  174     { 0x8086, 0x103A,   -1,     4, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
  175     { 0x8086, 0x103B,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
  176     { 0x8086, 0x103C,   -1,     4, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
  177     { 0x8086, 0x103D,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
  178     { 0x8086, 0x103E,   -1,     4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
  179     { 0x8086, 0x1050,   -1,     5, "Intel 82801BA (D865) Pro/100 VE Ethernet" },
  180     { 0x8086, 0x1051,   -1,     5, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
  181     { 0x8086, 0x1059,   -1,     0, "Intel 82551QM Pro/100 M Mobile Connection" },
  182     { 0x8086, 0x1064,   -1,     6, "Intel 82562EZ (ICH6)" },
  183     { 0x8086, 0x1065,   -1,     6, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" },
  184     { 0x8086, 0x1068,   -1,     6, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
  185     { 0x8086, 0x1069,   -1,     6, "Intel 82562EM/EX/GX Pro/100 Ethernet" },
  186     { 0x8086, 0x1091,   -1,     7, "Intel 82562GX Pro/100 Ethernet" },
  187     { 0x8086, 0x1092,   -1,     7, "Intel Pro/100 VE Network Connection" },
  188     { 0x8086, 0x1093,   -1,     7, "Intel Pro/100 VM Network Connection" },
  189     { 0x8086, 0x1094,   -1,     7, "Intel Pro/100 946GZ (ICH7) Network Connection" },
  190     { 0x8086, 0x1209,   -1,     0, "Intel 82559ER Embedded 10/100 Ethernet" },
  191     { 0x8086, 0x1229,   0x01,   0, "Intel 82557 Pro/100 Ethernet" },
  192     { 0x8086, 0x1229,   0x02,   0, "Intel 82557 Pro/100 Ethernet" },
  193     { 0x8086, 0x1229,   0x03,   0, "Intel 82557 Pro/100 Ethernet" },
  194     { 0x8086, 0x1229,   0x04,   0, "Intel 82558 Pro/100 Ethernet" },
  195     { 0x8086, 0x1229,   0x05,   0, "Intel 82558 Pro/100 Ethernet" },
  196     { 0x8086, 0x1229,   0x06,   0, "Intel 82559 Pro/100 Ethernet" },
  197     { 0x8086, 0x1229,   0x07,   0, "Intel 82559 Pro/100 Ethernet" },
  198     { 0x8086, 0x1229,   0x08,   0, "Intel 82559 Pro/100 Ethernet" },
  199     { 0x8086, 0x1229,   0x09,   0, "Intel 82559ER Pro/100 Ethernet" },
  200     { 0x8086, 0x1229,   0x0c,   0, "Intel 82550 Pro/100 Ethernet" },
  201     { 0x8086, 0x1229,   0x0d,   0, "Intel 82550C Pro/100 Ethernet" },
  202     { 0x8086, 0x1229,   0x0e,   0, "Intel 82550 Pro/100 Ethernet" },
  203     { 0x8086, 0x1229,   0x0f,   0, "Intel 82551 Pro/100 Ethernet" },
  204     { 0x8086, 0x1229,   0x10,   0, "Intel 82551 Pro/100 Ethernet" },
  205     { 0x8086, 0x1229,   -1,     0, "Intel 82557/8/9 Pro/100 Ethernet" },
  206     { 0x8086, 0x2449,   -1,     2, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
  207     { 0x8086, 0x27dc,   -1,     7, "Intel 82801GB (ICH7) 10/100 Ethernet" },
  208     { 0,      0,        -1,     0, NULL },
  209 };
  210 
  211 #ifdef FXP_IP_CSUM_WAR
  212 #define FXP_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  213 #else
  214 #define FXP_CSUM_FEATURES    (CSUM_TCP | CSUM_UDP)
  215 #endif
  216 
  217 static int              fxp_probe(device_t dev);
  218 static int              fxp_attach(device_t dev);
  219 static int              fxp_detach(device_t dev);
  220 static int              fxp_shutdown(device_t dev);
  221 static int              fxp_suspend(device_t dev);
  222 static int              fxp_resume(device_t dev);
  223 
  224 static const struct fxp_ident *fxp_find_ident(device_t dev);
  225 static void             fxp_intr(void *xsc);
  226 static void             fxp_rxcsum(struct fxp_softc *sc, if_t ifp,
  227                             struct mbuf *m, uint16_t status, int pos);
  228 static int              fxp_intr_body(struct fxp_softc *sc, if_t ifp,
  229                             uint8_t statack, int count);
  230 static void             fxp_init(void *xsc);
  231 static void             fxp_init_body(struct fxp_softc *sc, int);
  232 static void             fxp_tick(void *xsc);
  233 static void             fxp_start(if_t ifp);
  234 static void             fxp_start_body(if_t ifp);
  235 static int              fxp_encap(struct fxp_softc *sc, struct mbuf **m_head);
  236 static void             fxp_txeof(struct fxp_softc *sc);
  237 static void             fxp_stop(struct fxp_softc *sc);
  238 static void             fxp_release(struct fxp_softc *sc);
  239 static int              fxp_ioctl(if_t ifp, u_long command,
  240                             caddr_t data);
  241 static void             fxp_watchdog(struct fxp_softc *sc);
  242 static void             fxp_add_rfabuf(struct fxp_softc *sc,
  243                             struct fxp_rx *rxp);
  244 static void             fxp_discard_rfabuf(struct fxp_softc *sc,
  245                             struct fxp_rx *rxp);
  246 static int              fxp_new_rfabuf(struct fxp_softc *sc,
  247                             struct fxp_rx *rxp);
  248 static void             fxp_mc_addrs(struct fxp_softc *sc);
  249 static void             fxp_mc_setup(struct fxp_softc *sc);
  250 static uint16_t         fxp_eeprom_getword(struct fxp_softc *sc, int offset,
  251                             int autosize);
  252 static void             fxp_eeprom_putword(struct fxp_softc *sc, int offset,
  253                             uint16_t data);
  254 static void             fxp_autosize_eeprom(struct fxp_softc *sc);
  255 static void             fxp_load_eeprom(struct fxp_softc *sc);
  256 static void             fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
  257                             int offset, int words);
  258 static void             fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
  259                             int offset, int words);
  260 static int              fxp_ifmedia_upd(if_t ifp);
  261 static void             fxp_ifmedia_sts(if_t ifp,
  262                             struct ifmediareq *ifmr);
  263 static int              fxp_serial_ifmedia_upd(if_t ifp);
  264 static void             fxp_serial_ifmedia_sts(if_t ifp,
  265                             struct ifmediareq *ifmr);
  266 static int              fxp_miibus_readreg(device_t dev, int phy, int reg);
  267 static int              fxp_miibus_writereg(device_t dev, int phy, int reg,
  268                             int value);
  269 static void             fxp_miibus_statchg(device_t dev);
  270 static void             fxp_load_ucode(struct fxp_softc *sc);
  271 static void             fxp_update_stats(struct fxp_softc *sc);
  272 static void             fxp_sysctl_node(struct fxp_softc *sc);
  273 static int              sysctl_int_range(SYSCTL_HANDLER_ARGS,
  274                             int low, int high);
  275 static int              sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
  276 static int              sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
  277 static void             fxp_scb_wait(struct fxp_softc *sc);
  278 static void             fxp_scb_cmd(struct fxp_softc *sc, int cmd);
  279 static void             fxp_dma_wait(struct fxp_softc *sc,
  280                             volatile uint16_t *status, bus_dma_tag_t dmat,
  281                             bus_dmamap_t map);
  282 
  283 static device_method_t fxp_methods[] = {
  284         /* Device interface */
  285         DEVMETHOD(device_probe,         fxp_probe),
  286         DEVMETHOD(device_attach,        fxp_attach),
  287         DEVMETHOD(device_detach,        fxp_detach),
  288         DEVMETHOD(device_shutdown,      fxp_shutdown),
  289         DEVMETHOD(device_suspend,       fxp_suspend),
  290         DEVMETHOD(device_resume,        fxp_resume),
  291 
  292         /* MII interface */
  293         DEVMETHOD(miibus_readreg,       fxp_miibus_readreg),
  294         DEVMETHOD(miibus_writereg,      fxp_miibus_writereg),
  295         DEVMETHOD(miibus_statchg,       fxp_miibus_statchg),
  296 
  297         DEVMETHOD_END
  298 };
  299 
  300 static driver_t fxp_driver = {
  301         "fxp",
  302         fxp_methods,
  303         sizeof(struct fxp_softc),
  304 };
  305 
  306 DRIVER_MODULE_ORDERED(fxp, pci, fxp_driver, NULL, NULL, SI_ORDER_ANY);
  307 MODULE_PNP_INFO("U16:vendor;U16:device", pci, fxp, fxp_ident_table,
  308     nitems(fxp_ident_table) - 1);
  309 DRIVER_MODULE(miibus, fxp, miibus_driver, NULL, NULL);
  310 
  311 static struct resource_spec fxp_res_spec_mem[] = {
  312         { SYS_RES_MEMORY,       FXP_PCI_MMBA,   RF_ACTIVE },
  313         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  314         { -1, 0 }
  315 };
  316 
  317 static struct resource_spec fxp_res_spec_io[] = {
  318         { SYS_RES_IOPORT,       FXP_PCI_IOBA,   RF_ACTIVE },
  319         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  320         { -1, 0 }
  321 };
  322 
  323 /*
  324  * Wait for the previous command to be accepted (but not necessarily
  325  * completed).
  326  */
  327 static void
  328 fxp_scb_wait(struct fxp_softc *sc)
  329 {
  330         union {
  331                 uint16_t w;
  332                 uint8_t b[2];
  333         } flowctl;
  334         int i = 10000;
  335 
  336         while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
  337                 DELAY(2);
  338         if (i == 0) {
  339                 flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FC_THRESH);
  340                 flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FC_STATUS);
  341                 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
  342                     CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
  343                     CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
  344                     CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w);
  345         }
  346 }
  347 
  348 static void
  349 fxp_scb_cmd(struct fxp_softc *sc, int cmd)
  350 {
  351 
  352         if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
  353                 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
  354                 fxp_scb_wait(sc);
  355         }
  356         CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
  357 }
  358 
  359 static void
  360 fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
  361     bus_dma_tag_t dmat, bus_dmamap_t map)
  362 {
  363         int i;
  364 
  365         for (i = 10000; i > 0; i--) {
  366                 DELAY(2);
  367                 bus_dmamap_sync(dmat, map,
  368                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  369                 if ((le16toh(*status) & FXP_CB_STATUS_C) != 0)
  370                         break;
  371         }
  372         if (i == 0)
  373                 device_printf(sc->dev, "DMA timeout\n");
  374 }
  375 
  376 static const struct fxp_ident *
  377 fxp_find_ident(device_t dev)
  378 {
  379         uint16_t vendor;
  380         uint16_t device;
  381         uint8_t revid;
  382         const struct fxp_ident *ident;
  383 
  384         vendor = pci_get_vendor(dev);
  385         device = pci_get_device(dev);
  386         revid = pci_get_revid(dev);
  387         for (ident = fxp_ident_table; ident->name != NULL; ident++) {
  388                 if (ident->vendor == vendor && ident->device == device &&
  389                     (ident->revid == revid || ident->revid == -1)) {
  390                         return (ident);
  391                 }
  392         }
  393         return (NULL);
  394 }
  395 
  396 /*
  397  * Return identification string if this device is ours.
  398  */
  399 static int
  400 fxp_probe(device_t dev)
  401 {
  402         const struct fxp_ident *ident;
  403 
  404         ident = fxp_find_ident(dev);
  405         if (ident != NULL) {
  406                 device_set_desc(dev, ident->name);
  407                 return (BUS_PROBE_DEFAULT);
  408         }
  409         return (ENXIO);
  410 }
  411 
  412 static void
  413 fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  414 {
  415         uint32_t *addr;
  416 
  417         if (error)
  418                 return;
  419 
  420         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  421         addr = arg;
  422         *addr = segs->ds_addr;
  423 }
  424 
  425 static int
  426 fxp_attach(device_t dev)
  427 {
  428         struct fxp_softc *sc;
  429         struct fxp_cb_tx *tcbp;
  430         struct fxp_tx *txp;
  431         struct fxp_rx *rxp;
  432         if_t ifp;
  433         uint32_t val;
  434         uint16_t data;
  435         u_char eaddr[ETHER_ADDR_LEN];
  436         int error, flags, i, pmc, prefer_iomap;
  437 
  438         error = 0;
  439         sc = device_get_softc(dev);
  440         sc->dev = dev;
  441         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  442             MTX_DEF);
  443         callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0);
  444         ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
  445             fxp_serial_ifmedia_sts);
  446 
  447         ifp = sc->ifp = if_gethandle(IFT_ETHER);
  448         if (ifp == (void *)NULL) {
  449                 device_printf(dev, "can not if_alloc()\n");
  450                 error = ENOSPC;
  451                 goto fail;
  452         }
  453 
  454         /*
  455          * Enable bus mastering.
  456          */
  457         pci_enable_busmaster(dev);
  458 
  459         /*
  460          * Figure out which we should try first - memory mapping or i/o mapping?
  461          * We default to memory mapping. Then we accept an override from the
  462          * command line. Then we check to see which one is enabled.
  463          */
  464         prefer_iomap = 0;
  465         resource_int_value(device_get_name(dev), device_get_unit(dev),
  466             "prefer_iomap", &prefer_iomap);
  467         if (prefer_iomap)
  468                 sc->fxp_spec = fxp_res_spec_io;
  469         else
  470                 sc->fxp_spec = fxp_res_spec_mem;
  471 
  472         error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
  473         if (error) {
  474                 if (sc->fxp_spec == fxp_res_spec_mem)
  475                         sc->fxp_spec = fxp_res_spec_io;
  476                 else
  477                         sc->fxp_spec = fxp_res_spec_mem;
  478                 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
  479         }
  480         if (error) {
  481                 device_printf(dev, "could not allocate resources\n");
  482                 error = ENXIO;
  483                 goto fail;
  484         }
  485 
  486         if (bootverbose) {
  487                 device_printf(dev, "using %s space register mapping\n",
  488                    sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O");
  489         }
  490 
  491         /*
  492          * Put CU/RU idle state and prepare full reset.
  493          */
  494         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
  495         DELAY(10);
  496         /* Full reset and disable interrupts. */
  497         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
  498         DELAY(10);
  499         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
  500 
  501         /*
  502          * Find out how large of an SEEPROM we have.
  503          */
  504         fxp_autosize_eeprom(sc);
  505         fxp_load_eeprom(sc);
  506 
  507         /*
  508          * Find out the chip revision; lump all 82557 revs together.
  509          */
  510         sc->ident = fxp_find_ident(dev);
  511         if (sc->ident->ich > 0) {
  512                 /* Assume ICH controllers are 82559. */
  513                 sc->revision = FXP_REV_82559_A0;
  514         } else {
  515                 data = sc->eeprom[FXP_EEPROM_MAP_CNTR];
  516                 if ((data >> 8) == 1)
  517                         sc->revision = FXP_REV_82557;
  518                 else
  519                         sc->revision = pci_get_revid(dev);
  520         }
  521 
  522         /*
  523          * Check availability of WOL. 82559ER does not support WOL.
  524          */
  525         if (sc->revision >= FXP_REV_82558_A4 &&
  526             sc->revision != FXP_REV_82559S_A) {
  527                 data = sc->eeprom[FXP_EEPROM_MAP_ID];
  528                 if ((data & 0x20) != 0 &&
  529                     pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0)
  530                         sc->flags |= FXP_FLAG_WOLCAP;
  531         }
  532 
  533         if (sc->revision == FXP_REV_82550_C) {
  534                 /*
  535                  * 82550C with server extension requires microcode to
  536                  * receive fragmented UDP datagrams.  However if the
  537                  * microcode is used for client-only featured 82550C
  538                  * it locks up controller.
  539                  */
  540                 data = sc->eeprom[FXP_EEPROM_MAP_COMPAT];
  541                 if ((data & 0x0400) == 0)
  542                         sc->flags |= FXP_FLAG_NO_UCODE;
  543         }
  544 
  545         /* Receiver lock-up workaround detection. */
  546         if (sc->revision < FXP_REV_82558_A4) {
  547                 data = sc->eeprom[FXP_EEPROM_MAP_COMPAT];
  548                 if ((data & 0x03) != 0x03) {
  549                         sc->flags |= FXP_FLAG_RXBUG;
  550                         device_printf(dev, "Enabling Rx lock-up workaround\n");
  551                 }
  552         }
  553 
  554         /*
  555          * Determine whether we must use the 503 serial interface.
  556          */
  557         data = sc->eeprom[FXP_EEPROM_MAP_PRI_PHY];
  558         if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
  559             && (data & FXP_PHY_SERIAL_ONLY))
  560                 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
  561 
  562         fxp_sysctl_node(sc);
  563         /*
  564          * Enable workarounds for certain chip revision deficiencies.
  565          *
  566          * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
  567          * some systems based a normal 82559 design, have a defect where
  568          * the chip can cause a PCI protocol violation if it receives
  569          * a CU_RESUME command when it is entering the IDLE state.  The
  570          * workaround is to disable Dynamic Standby Mode, so the chip never
  571          * deasserts CLKRUN#, and always remains in an active state.
  572          *
  573          * See Intel 82801BA/82801BAM Specification Update, Errata #30.
  574          */
  575         if ((sc->ident->ich >= 2 && sc->ident->ich <= 3) ||
  576             (sc->ident->ich == 0 && sc->revision >= FXP_REV_82559_A0)) {
  577                 data = sc->eeprom[FXP_EEPROM_MAP_ID];
  578                 if (data & 0x02) {                      /* STB enable */
  579                         uint16_t cksum;
  580                         int i;
  581 
  582                         device_printf(dev,
  583                             "Disabling dynamic standby mode in EEPROM\n");
  584                         data &= ~0x02;
  585                         sc->eeprom[FXP_EEPROM_MAP_ID] = data;
  586                         fxp_write_eeprom(sc, &data, FXP_EEPROM_MAP_ID, 1);
  587                         device_printf(dev, "New EEPROM ID: 0x%x\n", data);
  588                         cksum = 0;
  589                         for (i = 0; i < (1 << sc->eeprom_size) - 1; i++)
  590                                 cksum += sc->eeprom[i];
  591                         i = (1 << sc->eeprom_size) - 1;
  592                         cksum = 0xBABA - cksum;
  593                         fxp_write_eeprom(sc, &cksum, i, 1);
  594                         device_printf(dev,
  595                             "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
  596                             i, sc->eeprom[i], cksum);
  597                         sc->eeprom[i] = cksum;
  598                         /*
  599                          * If the user elects to continue, try the software
  600                          * workaround, as it is better than nothing.
  601                          */
  602                         sc->flags |= FXP_FLAG_CU_RESUME_BUG;
  603                 }
  604         }
  605 
  606         /*
  607          * If we are not a 82557 chip, we can enable extended features.
  608          */
  609         if (sc->revision != FXP_REV_82557) {
  610                 /*
  611                  * If MWI is enabled in the PCI configuration, and there
  612                  * is a valid cacheline size (8 or 16 dwords), then tell
  613                  * the board to turn on MWI.
  614                  */
  615                 val = pci_read_config(dev, PCIR_COMMAND, 2);
  616                 if (val & PCIM_CMD_MWRICEN &&
  617                     pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
  618                         sc->flags |= FXP_FLAG_MWI_ENABLE;
  619 
  620                 /* turn on the extended TxCB feature */
  621                 sc->flags |= FXP_FLAG_EXT_TXCB;
  622 
  623                 /* enable reception of long frames for VLAN */
  624                 sc->flags |= FXP_FLAG_LONG_PKT_EN;
  625         } else {
  626                 /* a hack to get long VLAN frames on a 82557 */
  627                 sc->flags |= FXP_FLAG_SAVE_BAD;
  628         }
  629 
  630         /* For 82559 or later chips, Rx checksum offload is supported. */
  631         if (sc->revision >= FXP_REV_82559_A0) {
  632                 /* 82559ER does not support Rx checksum offloading. */
  633                 if (sc->ident->device != 0x1209)
  634                         sc->flags |= FXP_FLAG_82559_RXCSUM;
  635         }
  636         /*
  637          * Enable use of extended RFDs and TCBs for 82550
  638          * and later chips. Note: we need extended TXCB support
  639          * too, but that's already enabled by the code above.
  640          * Be careful to do this only on the right devices.
  641          */
  642         if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
  643             sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
  644             || sc->revision == FXP_REV_82551_10) {
  645                 sc->rfa_size = sizeof (struct fxp_rfa);
  646                 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
  647                 sc->flags |= FXP_FLAG_EXT_RFA;
  648                 /* Use extended RFA instead of 82559 checksum mode. */
  649                 sc->flags &= ~FXP_FLAG_82559_RXCSUM;
  650         } else {
  651                 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
  652                 sc->tx_cmd = FXP_CB_COMMAND_XMIT;
  653         }
  654 
  655         /*
  656          * Allocate DMA tags and DMA safe memory.
  657          */
  658         sc->maxtxseg = FXP_NTXSEG;
  659         sc->maxsegsize = MCLBYTES;
  660         if (sc->flags & FXP_FLAG_EXT_RFA) {
  661                 sc->maxtxseg--;
  662                 sc->maxsegsize = FXP_TSO_SEGSIZE;
  663         }
  664         error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
  665             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  666             sc->maxsegsize * sc->maxtxseg + sizeof(struct ether_vlan_header),
  667             sc->maxtxseg, sc->maxsegsize, 0, NULL, NULL, &sc->fxp_txmtag);
  668         if (error) {
  669                 device_printf(dev, "could not create TX DMA tag\n");
  670                 goto fail;
  671         }
  672 
  673         error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
  674             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  675             MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->fxp_rxmtag);
  676         if (error) {
  677                 device_printf(dev, "could not create RX DMA tag\n");
  678                 goto fail;
  679         }
  680 
  681         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  682             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  683             sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0,
  684             NULL, NULL, &sc->fxp_stag);
  685         if (error) {
  686                 device_printf(dev, "could not create stats DMA tag\n");
  687                 goto fail;
  688         }
  689 
  690         error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
  691             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->fxp_smap);
  692         if (error) {
  693                 device_printf(dev, "could not allocate stats DMA memory\n");
  694                 goto fail;
  695         }
  696         error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
  697             sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr,
  698             BUS_DMA_NOWAIT);
  699         if (error) {
  700                 device_printf(dev, "could not load the stats DMA buffer\n");
  701                 goto fail;
  702         }
  703 
  704         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  705             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  706             FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0, NULL, NULL, &sc->cbl_tag);
  707         if (error) {
  708                 device_printf(dev, "could not create TxCB DMA tag\n");
  709                 goto fail;
  710         }
  711 
  712         error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
  713             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->cbl_map);
  714         if (error) {
  715                 device_printf(dev, "could not allocate TxCB DMA memory\n");
  716                 goto fail;
  717         }
  718 
  719         error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
  720             sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
  721             &sc->fxp_desc.cbl_addr, BUS_DMA_NOWAIT);
  722         if (error) {
  723                 device_printf(dev, "could not load TxCB DMA buffer\n");
  724                 goto fail;
  725         }
  726 
  727         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  728             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  729             sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0,
  730             NULL, NULL, &sc->mcs_tag);
  731         if (error) {
  732                 device_printf(dev,
  733                     "could not create multicast setup DMA tag\n");
  734                 goto fail;
  735         }
  736 
  737         error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
  738             BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->mcs_map);
  739         if (error) {
  740                 device_printf(dev,
  741                     "could not allocate multicast setup DMA memory\n");
  742                 goto fail;
  743         }
  744         error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
  745             sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr,
  746             BUS_DMA_NOWAIT);
  747         if (error) {
  748                 device_printf(dev,
  749                     "can't load the multicast setup DMA buffer\n");
  750                 goto fail;
  751         }
  752 
  753         /*
  754          * Pre-allocate the TX DMA maps and setup the pointers to
  755          * the TX command blocks.
  756          */
  757         txp = sc->fxp_desc.tx_list;
  758         tcbp = sc->fxp_desc.cbl_list;
  759         for (i = 0; i < FXP_NTXCB; i++) {
  760                 txp[i].tx_cb = tcbp + i;
  761                 error = bus_dmamap_create(sc->fxp_txmtag, 0, &txp[i].tx_map);
  762                 if (error) {
  763                         device_printf(dev, "can't create DMA map for TX\n");
  764                         goto fail;
  765                 }
  766         }
  767         error = bus_dmamap_create(sc->fxp_rxmtag, 0, &sc->spare_map);
  768         if (error) {
  769                 device_printf(dev, "can't create spare DMA map\n");
  770                 goto fail;
  771         }
  772 
  773         /*
  774          * Pre-allocate our receive buffers.
  775          */
  776         sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
  777         for (i = 0; i < FXP_NRFABUFS; i++) {
  778                 rxp = &sc->fxp_desc.rx_list[i];
  779                 error = bus_dmamap_create(sc->fxp_rxmtag, 0, &rxp->rx_map);
  780                 if (error) {
  781                         device_printf(dev, "can't create DMA map for RX\n");
  782                         goto fail;
  783                 }
  784                 if (fxp_new_rfabuf(sc, rxp) != 0) {
  785                         error = ENOMEM;
  786                         goto fail;
  787                 }
  788                 fxp_add_rfabuf(sc, rxp);
  789         }
  790 
  791         /*
  792          * Read MAC address.
  793          */
  794         eaddr[0] = sc->eeprom[FXP_EEPROM_MAP_IA0] & 0xff;
  795         eaddr[1] = sc->eeprom[FXP_EEPROM_MAP_IA0] >> 8;
  796         eaddr[2] = sc->eeprom[FXP_EEPROM_MAP_IA1] & 0xff;
  797         eaddr[3] = sc->eeprom[FXP_EEPROM_MAP_IA1] >> 8;
  798         eaddr[4] = sc->eeprom[FXP_EEPROM_MAP_IA2] & 0xff;
  799         eaddr[5] = sc->eeprom[FXP_EEPROM_MAP_IA2] >> 8;
  800         if (bootverbose) {
  801                 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
  802                     pci_get_vendor(dev), pci_get_device(dev),
  803                     pci_get_subvendor(dev), pci_get_subdevice(dev),
  804                     pci_get_revid(dev));
  805                 device_printf(dev, "Dynamic Standby mode is %s\n",
  806                     sc->eeprom[FXP_EEPROM_MAP_ID] & 0x02 ? "enabled" :
  807                     "disabled");
  808         }
  809 
  810         /*
  811          * If this is only a 10Mbps device, then there is no MII, and
  812          * the PHY will use a serial interface instead.
  813          *
  814          * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
  815          * doesn't have a programming interface of any sort.  The
  816          * media is sensed automatically based on how the link partner
  817          * is configured.  This is, in essence, manual configuration.
  818          */
  819         if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
  820                 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
  821                 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
  822         } else {
  823                 /*
  824                  * i82557 wedge when isolating all of their PHYs.
  825                  */
  826                 flags = MIIF_NOISOLATE;
  827                 if (sc->revision >= FXP_REV_82558_A4)
  828                         flags |= MIIF_DOPAUSE;
  829                 error = mii_attach(dev, &sc->miibus, ifp,
  830                     (ifm_change_cb_t)fxp_ifmedia_upd,
  831                     (ifm_stat_cb_t)fxp_ifmedia_sts, BMSR_DEFCAPMASK,
  832                     MII_PHY_ANY, MII_OFFSET_ANY, flags);
  833                 if (error != 0) {
  834                         device_printf(dev, "attaching PHYs failed\n");
  835                         goto fail;
  836                 }
  837         }
  838 
  839         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  840         if_setdev(ifp, dev);
  841         if_setinitfn(ifp, fxp_init);
  842         if_setsoftc(ifp, sc);
  843         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
  844         if_setioctlfn(ifp, fxp_ioctl);
  845         if_setstartfn(ifp, fxp_start);
  846 
  847         if_setcapabilities(ifp, 0);
  848         if_setcapenable(ifp, 0);
  849 
  850         /* Enable checksum offload/TSO for 82550 or better chips */
  851         if (sc->flags & FXP_FLAG_EXT_RFA) {
  852                 if_sethwassist(ifp, FXP_CSUM_FEATURES | CSUM_TSO);
  853                 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
  854                 if_setcapenablebit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
  855         }
  856 
  857         if (sc->flags & FXP_FLAG_82559_RXCSUM) {
  858                 if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
  859                 if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
  860         }
  861 
  862         if (sc->flags & FXP_FLAG_WOLCAP) {
  863                 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
  864                 if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0);
  865         }
  866 
  867 #ifdef DEVICE_POLLING
  868         /* Inform the world we support polling. */
  869         if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
  870 #endif
  871 
  872         /*
  873          * Attach the interface.
  874          */
  875         ether_ifattach(ifp, eaddr);
  876 
  877         /*
  878          * Tell the upper layer(s) we support long frames.
  879          * Must appear after the call to ether_ifattach() because
  880          * ether_ifattach() sets ifi_hdrlen to the default value.
  881          */
  882         if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
  883         if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
  884         if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
  885         if ((sc->flags & FXP_FLAG_EXT_RFA) != 0) {
  886                 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING |
  887                     IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
  888                 if_setcapenablebit(ifp, IFCAP_VLAN_HWTAGGING |
  889                     IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
  890         }
  891 
  892         /*
  893          * Let the system queue as many packets as we have available
  894          * TX descriptors.
  895          */
  896         if_setsendqlen(ifp, FXP_NTXCB - 1);
  897         if_setsendqready(ifp);
  898 
  899         /*
  900          * Hook our interrupt after all initialization is complete.
  901          */
  902         error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE,
  903                                NULL, fxp_intr, sc, &sc->ih);
  904         if (error) {
  905                 device_printf(dev, "could not setup irq\n");
  906                 ether_ifdetach(sc->ifp);
  907                 goto fail;
  908         }
  909 
  910         /*
  911          * Configure hardware to reject magic frames otherwise
  912          * system will hang on recipt of magic frames.
  913          */
  914         if ((sc->flags & FXP_FLAG_WOLCAP) != 0) {
  915                 FXP_LOCK(sc);
  916                 /* Clear wakeup events. */
  917                 CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR));
  918                 fxp_init_body(sc, 0);
  919                 fxp_stop(sc);
  920                 FXP_UNLOCK(sc);
  921         }
  922 
  923 fail:
  924         if (error)
  925                 fxp_release(sc);
  926         return (error);
  927 }
  928 
  929 /*
  930  * Release all resources.  The softc lock should not be held and the
  931  * interrupt should already be torn down.
  932  */
  933 static void
  934 fxp_release(struct fxp_softc *sc)
  935 {
  936         struct fxp_rx *rxp;
  937         struct fxp_tx *txp;
  938         int i;
  939 
  940         FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
  941         KASSERT(sc->ih == NULL,
  942             ("fxp_release() called with intr handle still active"));
  943         if (sc->miibus)
  944                 device_delete_child(sc->dev, sc->miibus);
  945         bus_generic_detach(sc->dev);
  946         ifmedia_removeall(&sc->sc_media);
  947         if (sc->fxp_desc.cbl_list) {
  948                 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
  949                 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
  950                     sc->cbl_map);
  951         }
  952         if (sc->fxp_stats) {
  953                 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
  954                 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
  955         }
  956         if (sc->mcsp) {
  957                 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
  958                 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
  959         }
  960         bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res);
  961         if (sc->fxp_rxmtag) {
  962                 for (i = 0; i < FXP_NRFABUFS; i++) {
  963                         rxp = &sc->fxp_desc.rx_list[i];
  964                         if (rxp->rx_mbuf != NULL) {
  965                                 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
  966                                     BUS_DMASYNC_POSTREAD);
  967                                 bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map);
  968                                 m_freem(rxp->rx_mbuf);
  969                         }
  970                         bus_dmamap_destroy(sc->fxp_rxmtag, rxp->rx_map);
  971                 }
  972                 bus_dmamap_destroy(sc->fxp_rxmtag, sc->spare_map);
  973                 bus_dma_tag_destroy(sc->fxp_rxmtag);
  974         }
  975         if (sc->fxp_txmtag) {
  976                 for (i = 0; i < FXP_NTXCB; i++) {
  977                         txp = &sc->fxp_desc.tx_list[i];
  978                         if (txp->tx_mbuf != NULL) {
  979                                 bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map,
  980                                     BUS_DMASYNC_POSTWRITE);
  981                                 bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map);
  982                                 m_freem(txp->tx_mbuf);
  983                         }
  984                         bus_dmamap_destroy(sc->fxp_txmtag, txp->tx_map);
  985                 }
  986                 bus_dma_tag_destroy(sc->fxp_txmtag);
  987         }
  988         if (sc->fxp_stag)
  989                 bus_dma_tag_destroy(sc->fxp_stag);
  990         if (sc->cbl_tag)
  991                 bus_dma_tag_destroy(sc->cbl_tag);
  992         if (sc->mcs_tag)
  993                 bus_dma_tag_destroy(sc->mcs_tag);
  994         if (sc->ifp)
  995                 if_free(sc->ifp);
  996 
  997         mtx_destroy(&sc->sc_mtx);
  998 }
  999 
 1000 /*
 1001  * Detach interface.
 1002  */
 1003 static int
 1004 fxp_detach(device_t dev)
 1005 {
 1006         struct fxp_softc *sc = device_get_softc(dev);
 1007 
 1008 #ifdef DEVICE_POLLING
 1009         if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
 1010                 ether_poll_deregister(sc->ifp);
 1011 #endif
 1012 
 1013         FXP_LOCK(sc);
 1014         /*
 1015          * Stop DMA and drop transmit queue, but disable interrupts first.
 1016          */
 1017         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 1018         fxp_stop(sc);
 1019         FXP_UNLOCK(sc);
 1020         callout_drain(&sc->stat_ch);
 1021 
 1022         /*
 1023          * Close down routes etc.
 1024          */
 1025         ether_ifdetach(sc->ifp);
 1026 
 1027         /*
 1028          * Unhook interrupt before dropping lock. This is to prevent
 1029          * races with fxp_intr().
 1030          */
 1031         bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih);
 1032         sc->ih = NULL;
 1033 
 1034         /* Release our allocated resources. */
 1035         fxp_release(sc);
 1036         return (0);
 1037 }
 1038 
 1039 /*
 1040  * Device shutdown routine. Called at system shutdown after sync. The
 1041  * main purpose of this routine is to shut off receiver DMA so that
 1042  * kernel memory doesn't get clobbered during warmboot.
 1043  */
 1044 static int
 1045 fxp_shutdown(device_t dev)
 1046 {
 1047 
 1048         /*
 1049          * Make sure that DMA is disabled prior to reboot. Not doing
 1050          * do could allow DMA to corrupt kernel memory during the
 1051          * reboot before the driver initializes.
 1052          */
 1053         return (fxp_suspend(dev));
 1054 }
 1055 
 1056 /*
 1057  * Device suspend routine.  Stop the interface and save some PCI
 1058  * settings in case the BIOS doesn't restore them properly on
 1059  * resume.
 1060  */
 1061 static int
 1062 fxp_suspend(device_t dev)
 1063 {
 1064         struct fxp_softc *sc = device_get_softc(dev);
 1065         if_t ifp;
 1066         int pmc;
 1067         uint16_t pmstat;
 1068 
 1069         FXP_LOCK(sc);
 1070 
 1071         ifp = sc->ifp;
 1072         if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
 1073                 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
 1074                 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 1075                 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
 1076                         /* Request PME. */
 1077                         pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 1078                         sc->flags |= FXP_FLAG_WOL;
 1079                         /* Reconfigure hardware to accept magic frames. */
 1080                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 1081                         fxp_init_body(sc, 0);
 1082                 }
 1083                 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1084         }
 1085         fxp_stop(sc);
 1086 
 1087         sc->suspended = 1;
 1088 
 1089         FXP_UNLOCK(sc);
 1090         return (0);
 1091 }
 1092 
 1093 /*
 1094  * Device resume routine. re-enable busmastering, and restart the interface if
 1095  * appropriate.
 1096  */
 1097 static int
 1098 fxp_resume(device_t dev)
 1099 {
 1100         struct fxp_softc *sc = device_get_softc(dev);
 1101         if_t ifp = sc->ifp;
 1102         int pmc;
 1103         uint16_t pmstat;
 1104 
 1105         FXP_LOCK(sc);
 1106 
 1107         if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
 1108                 sc->flags &= ~FXP_FLAG_WOL;
 1109                 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
 1110                 /* Disable PME and clear PME status. */
 1111                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
 1112                 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1113                 if ((sc->flags & FXP_FLAG_WOLCAP) != 0)
 1114                         CSR_WRITE_1(sc, FXP_CSR_PMDR,
 1115                             CSR_READ_1(sc, FXP_CSR_PMDR));
 1116         }
 1117 
 1118         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
 1119         DELAY(10);
 1120 
 1121         /* reinitialize interface if necessary */
 1122         if (if_getflags(ifp) & IFF_UP)
 1123                 fxp_init_body(sc, 1);
 1124 
 1125         sc->suspended = 0;
 1126 
 1127         FXP_UNLOCK(sc);
 1128         return (0);
 1129 }
 1130 
 1131 static void
 1132 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
 1133 {
 1134         uint16_t reg;
 1135         int x;
 1136 
 1137         /*
 1138          * Shift in data.
 1139          */
 1140         for (x = 1 << (length - 1); x; x >>= 1) {
 1141                 if (data & x)
 1142                         reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
 1143                 else
 1144                         reg = FXP_EEPROM_EECS;
 1145                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1146                 DELAY(1);
 1147                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1148                 DELAY(1);
 1149                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1150                 DELAY(1);
 1151         }
 1152 }
 1153 
 1154 /*
 1155  * Read from the serial EEPROM. Basically, you manually shift in
 1156  * the read opcode (one bit at a time) and then shift in the address,
 1157  * and then you shift out the data (all of this one bit at a time).
 1158  * The word size is 16 bits, so you have to provide the address for
 1159  * every 16 bits of data.
 1160  */
 1161 static uint16_t
 1162 fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
 1163 {
 1164         uint16_t reg, data;
 1165         int x;
 1166 
 1167         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1168         /*
 1169          * Shift in read opcode.
 1170          */
 1171         fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
 1172         /*
 1173          * Shift in address.
 1174          */
 1175         data = 0;
 1176         for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
 1177                 if (offset & x)
 1178                         reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
 1179                 else
 1180                         reg = FXP_EEPROM_EECS;
 1181                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1182                 DELAY(1);
 1183                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1184                 DELAY(1);
 1185                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1186                 DELAY(1);
 1187                 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
 1188                 data++;
 1189                 if (autosize && reg == 0) {
 1190                         sc->eeprom_size = data;
 1191                         break;
 1192                 }
 1193         }
 1194         /*
 1195          * Shift out data.
 1196          */
 1197         data = 0;
 1198         reg = FXP_EEPROM_EECS;
 1199         for (x = 1 << 15; x; x >>= 1) {
 1200                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
 1201                 DELAY(1);
 1202                 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
 1203                         data |= x;
 1204                 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
 1205                 DELAY(1);
 1206         }
 1207         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1208         DELAY(1);
 1209 
 1210         return (data);
 1211 }
 1212 
 1213 static void
 1214 fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
 1215 {
 1216         int i;
 1217 
 1218         /*
 1219          * Erase/write enable.
 1220          */
 1221         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1222         fxp_eeprom_shiftin(sc, 0x4, 3);
 1223         fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
 1224         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1225         DELAY(1);
 1226         /*
 1227          * Shift in write opcode, address, data.
 1228          */
 1229         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1230         fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
 1231         fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
 1232         fxp_eeprom_shiftin(sc, data, 16);
 1233         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1234         DELAY(1);
 1235         /*
 1236          * Wait for EEPROM to finish up.
 1237          */
 1238         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1239         DELAY(1);
 1240         for (i = 0; i < 1000; i++) {
 1241                 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
 1242                         break;
 1243                 DELAY(50);
 1244         }
 1245         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1246         DELAY(1);
 1247         /*
 1248          * Erase/write disable.
 1249          */
 1250         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
 1251         fxp_eeprom_shiftin(sc, 0x4, 3);
 1252         fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
 1253         CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
 1254         DELAY(1);
 1255 }
 1256 
 1257 /*
 1258  * From NetBSD:
 1259  *
 1260  * Figure out EEPROM size.
 1261  *
 1262  * 559's can have either 64-word or 256-word EEPROMs, the 558
 1263  * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
 1264  * talks about the existence of 16 to 256 word EEPROMs.
 1265  *
 1266  * The only known sizes are 64 and 256, where the 256 version is used
 1267  * by CardBus cards to store CIS information.
 1268  *
 1269  * The address is shifted in msb-to-lsb, and after the last
 1270  * address-bit the EEPROM is supposed to output a `dummy zero' bit,
 1271  * after which follows the actual data. We try to detect this zero, by
 1272  * probing the data-out bit in the EEPROM control register just after
 1273  * having shifted in a bit. If the bit is zero, we assume we've
 1274  * shifted enough address bits. The data-out should be tri-state,
 1275  * before this, which should translate to a logical one.
 1276  */
 1277 static void
 1278 fxp_autosize_eeprom(struct fxp_softc *sc)
 1279 {
 1280 
 1281         /* guess maximum size of 256 words */
 1282         sc->eeprom_size = 8;
 1283 
 1284         /* autosize */
 1285         (void) fxp_eeprom_getword(sc, 0, 1);
 1286 }
 1287 
 1288 static void
 1289 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
 1290 {
 1291         int i;
 1292 
 1293         for (i = 0; i < words; i++)
 1294                 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
 1295 }
 1296 
 1297 static void
 1298 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
 1299 {
 1300         int i;
 1301 
 1302         for (i = 0; i < words; i++)
 1303                 fxp_eeprom_putword(sc, offset + i, data[i]);
 1304 }
 1305 
 1306 static void
 1307 fxp_load_eeprom(struct fxp_softc *sc)
 1308 {
 1309         int i;
 1310         uint16_t cksum;
 1311 
 1312         fxp_read_eeprom(sc, sc->eeprom, 0, 1 << sc->eeprom_size);
 1313         cksum = 0;
 1314         for (i = 0; i < (1 << sc->eeprom_size) - 1; i++)
 1315                 cksum += sc->eeprom[i];
 1316         cksum = 0xBABA - cksum;
 1317         if (cksum != sc->eeprom[(1 << sc->eeprom_size) - 1])
 1318                 device_printf(sc->dev,
 1319                     "EEPROM checksum mismatch! (0x%04x -> 0x%04x)\n",
 1320                     cksum, sc->eeprom[(1 << sc->eeprom_size) - 1]);
 1321 }
 1322 
 1323 /*
 1324  * Grab the softc lock and call the real fxp_start_body() routine
 1325  */
 1326 static void
 1327 fxp_start(if_t ifp)
 1328 {
 1329         struct fxp_softc *sc = if_getsoftc(ifp);
 1330 
 1331         FXP_LOCK(sc);
 1332         fxp_start_body(ifp);
 1333         FXP_UNLOCK(sc);
 1334 }
 1335 
 1336 /*
 1337  * Start packet transmission on the interface.
 1338  * This routine must be called with the softc lock held, and is an
 1339  * internal entry point only.
 1340  */
 1341 static void
 1342 fxp_start_body(if_t ifp)
 1343 {
 1344         struct fxp_softc *sc = if_getsoftc(ifp);
 1345         struct mbuf *mb_head;
 1346         int txqueued;
 1347 
 1348         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1349 
 1350         if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1351             IFF_DRV_RUNNING)
 1352                 return;
 1353 
 1354         if (sc->tx_queued > FXP_NTXCB_HIWAT)
 1355                 fxp_txeof(sc);
 1356         /*
 1357          * We're finished if there is nothing more to add to the list or if
 1358          * we're all filled up with buffers to transmit.
 1359          * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
 1360          *       a NOP command when needed.
 1361          */
 1362         txqueued = 0;
 1363         while (!if_sendq_empty(ifp) && sc->tx_queued < FXP_NTXCB - 1) {
 1364 
 1365                 /*
 1366                  * Grab a packet to transmit.
 1367                  */
 1368                 mb_head = if_dequeue(ifp);
 1369                 if (mb_head == NULL)
 1370                         break;
 1371 
 1372                 if (fxp_encap(sc, &mb_head)) {
 1373                         if (mb_head == NULL)
 1374                                 break;
 1375                         if_sendq_prepend(ifp, mb_head);
 1376                         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
 1377                 }
 1378                 txqueued++;
 1379                 /*
 1380                  * Pass packet to bpf if there is a listener.
 1381                  */
 1382                 if_bpfmtap(ifp, mb_head);
 1383         }
 1384 
 1385         /*
 1386          * We're finished. If we added to the list, issue a RESUME to get DMA
 1387          * going again if suspended.
 1388          */
 1389         if (txqueued > 0) {
 1390                 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 1391                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1392                 fxp_scb_wait(sc);
 1393                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
 1394                 /*
 1395                  * Set a 5 second timer just in case we don't hear
 1396                  * from the card again.
 1397                  */
 1398                 sc->watchdog_timer = 5;
 1399         }
 1400 }
 1401 
 1402 static int
 1403 fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
 1404 {
 1405         struct mbuf *m;
 1406         struct fxp_tx *txp;
 1407         struct fxp_cb_tx *cbp;
 1408         struct tcphdr *tcp;
 1409         bus_dma_segment_t segs[FXP_NTXSEG];
 1410         int error, i, nseg, tcp_payload;
 1411 
 1412         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1413 
 1414         tcp_payload = 0;
 1415         tcp = NULL;
 1416         /*
 1417          * Get pointer to next available tx desc.
 1418          */
 1419         txp = sc->fxp_desc.tx_last->tx_next;
 1420 
 1421         /*
 1422          * A note in Appendix B of the Intel 8255x 10/100 Mbps
 1423          * Ethernet Controller Family Open Source Software
 1424          * Developer Manual says:
 1425          *   Using software parsing is only allowed with legal
 1426          *   TCP/IP or UDP/IP packets.
 1427          *   ...
 1428          *   For all other datagrams, hardware parsing must
 1429          *   be used.
 1430          * Software parsing appears to truncate ICMP and
 1431          * fragmented UDP packets that contain one to three
 1432          * bytes in the second (and final) mbuf of the packet.
 1433          */
 1434         if (sc->flags & FXP_FLAG_EXT_RFA)
 1435                 txp->tx_cb->ipcb_ip_activation_high =
 1436                     FXP_IPCB_HARDWAREPARSING_ENABLE;
 1437 
 1438         m = *m_head;
 1439         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
 1440                 /*
 1441                  * 82550/82551 requires ethernet/IP/TCP headers must be
 1442                  * contained in the first active transmit buffer.
 1443                  */
 1444                 struct ether_header *eh;
 1445                 struct ip *ip;
 1446                 uint32_t ip_off, poff;
 1447 
 1448                 if (M_WRITABLE(*m_head) == 0) {
 1449                         /* Get a writable copy. */
 1450                         m = m_dup(*m_head, M_NOWAIT);
 1451                         m_freem(*m_head);
 1452                         if (m == NULL) {
 1453                                 *m_head = NULL;
 1454                                 return (ENOBUFS);
 1455                         }
 1456                         *m_head = m;
 1457                 }
 1458                 ip_off = sizeof(struct ether_header);
 1459                 m = m_pullup(*m_head, ip_off);
 1460                 if (m == NULL) {
 1461                         *m_head = NULL;
 1462                         return (ENOBUFS);
 1463                 }
 1464                 eh = mtod(m, struct ether_header *);
 1465                 /* Check the existence of VLAN tag. */
 1466                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 1467                         ip_off = sizeof(struct ether_vlan_header);
 1468                         m = m_pullup(m, ip_off);
 1469                         if (m == NULL) {
 1470                                 *m_head = NULL;
 1471                                 return (ENOBUFS);
 1472                         }
 1473                 }
 1474                 m = m_pullup(m, ip_off + sizeof(struct ip));
 1475                 if (m == NULL) {
 1476                         *m_head = NULL;
 1477                         return (ENOBUFS);
 1478                 }
 1479                 ip = (struct ip *)(mtod(m, char *) + ip_off);
 1480                 poff = ip_off + (ip->ip_hl << 2);
 1481                 m = m_pullup(m, poff + sizeof(struct tcphdr));
 1482                 if (m == NULL) {
 1483                         *m_head = NULL;
 1484                         return (ENOBUFS);
 1485                 }
 1486                 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
 1487                 m = m_pullup(m, poff + (tcp->th_off << 2));
 1488                 if (m == NULL) {
 1489                         *m_head = NULL;
 1490                         return (ENOBUFS);
 1491                 }
 1492 
 1493                 /*
 1494                  * Since 82550/82551 doesn't modify IP length and pseudo
 1495                  * checksum in the first frame driver should compute it.
 1496                  */
 1497                 ip = (struct ip *)(mtod(m, char *) + ip_off);
 1498                 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
 1499                 ip->ip_sum = 0;
 1500                 ip->ip_len = htons(m->m_pkthdr.tso_segsz + (ip->ip_hl << 2) +
 1501                     (tcp->th_off << 2));
 1502                 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
 1503                     htons(IPPROTO_TCP + (tcp->th_off << 2) +
 1504                     m->m_pkthdr.tso_segsz));
 1505                 /* Compute total TCP payload. */
 1506                 tcp_payload = m->m_pkthdr.len - ip_off - (ip->ip_hl << 2);
 1507                 tcp_payload -= tcp->th_off << 2;
 1508                 *m_head = m;
 1509         } else if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) {
 1510                 /*
 1511                  * Deal with TCP/IP checksum offload. Note that
 1512                  * in order for TCP checksum offload to work,
 1513                  * the pseudo header checksum must have already
 1514                  * been computed and stored in the checksum field
 1515                  * in the TCP header. The stack should have
 1516                  * already done this for us.
 1517                  */
 1518                 txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
 1519                 if (m->m_pkthdr.csum_flags & CSUM_TCP)
 1520                         txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET;
 1521 
 1522 #ifdef FXP_IP_CSUM_WAR
 1523                 /*
 1524                  * XXX The 82550 chip appears to have trouble
 1525                  * dealing with IP header checksums in very small
 1526                  * datagrams, namely fragments from 1 to 3 bytes
 1527                  * in size. For example, say you want to transmit
 1528                  * a UDP packet of 1473 bytes. The packet will be
 1529                  * fragmented over two IP datagrams, the latter
 1530                  * containing only one byte of data. The 82550 will
 1531                  * botch the header checksum on the 1-byte fragment.
 1532                  * As long as the datagram contains 4 or more bytes
 1533                  * of data, you're ok.
 1534                  *
 1535                  * The following code attempts to work around this
 1536                  * problem: if the datagram is less than 38 bytes
 1537                  * in size (14 bytes ether header, 20 bytes IP header,
 1538                  * plus 4 bytes of data), we punt and compute the IP
 1539                  * header checksum by hand. This workaround doesn't
 1540                  * work very well, however, since it can be fooled
 1541                  * by things like VLAN tags and IP options that make
 1542                  * the header sizes/offsets vary.
 1543                  */
 1544 
 1545                 if (m->m_pkthdr.csum_flags & CSUM_IP) {
 1546                         if (m->m_pkthdr.len < 38) {
 1547                                 struct ip *ip;
 1548                                 m->m_data += ETHER_HDR_LEN;
 1549                                 ip = mtod(m, struct ip *);
 1550                                 ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
 1551                                 m->m_data -= ETHER_HDR_LEN;
 1552                                 m->m_pkthdr.csum_flags &= ~CSUM_IP;
 1553                         } else {
 1554                                 txp->tx_cb->ipcb_ip_activation_high =
 1555                                     FXP_IPCB_HARDWAREPARSING_ENABLE;
 1556                                 txp->tx_cb->ipcb_ip_schedule |=
 1557                                     FXP_IPCB_IP_CHECKSUM_ENABLE;
 1558                         }
 1559                 }
 1560 #endif
 1561         }
 1562 
 1563         error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, *m_head,
 1564             segs, &nseg, 0);
 1565         if (error == EFBIG) {
 1566                 m = m_collapse(*m_head, M_NOWAIT, sc->maxtxseg);
 1567                 if (m == NULL) {
 1568                         m_freem(*m_head);
 1569                         *m_head = NULL;
 1570                         return (ENOMEM);
 1571                 }
 1572                 *m_head = m;
 1573                 error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map,
 1574                     *m_head, segs, &nseg, 0);
 1575                 if (error != 0) {
 1576                         m_freem(*m_head);
 1577                         *m_head = NULL;
 1578                         return (ENOMEM);
 1579                 }
 1580         } else if (error != 0)
 1581                 return (error);
 1582         if (nseg == 0) {
 1583                 m_freem(*m_head);
 1584                 *m_head = NULL;
 1585                 return (EIO);
 1586         }
 1587 
 1588         KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments"));
 1589         bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, BUS_DMASYNC_PREWRITE);
 1590 
 1591         cbp = txp->tx_cb;
 1592         for (i = 0; i < nseg; i++) {
 1593                 /*
 1594                  * If this is an 82550/82551, then we're using extended
 1595                  * TxCBs _and_ we're using checksum offload. This means
 1596                  * that the TxCB is really an IPCB. One major difference
 1597                  * between the two is that with plain extended TxCBs,
 1598                  * the bottom half of the TxCB contains two entries from
 1599                  * the TBD array, whereas IPCBs contain just one entry:
 1600                  * one entry (8 bytes) has been sacrificed for the TCP/IP
 1601                  * checksum offload control bits. So to make things work
 1602                  * right, we have to start filling in the TBD array
 1603                  * starting from a different place depending on whether
 1604                  * the chip is an 82550/82551 or not.
 1605                  */
 1606                 if (sc->flags & FXP_FLAG_EXT_RFA) {
 1607                         cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
 1608                         cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
 1609                 } else {
 1610                         cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
 1611                         cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
 1612                 }
 1613         }
 1614         if (sc->flags & FXP_FLAG_EXT_RFA) {
 1615                 /* Configure dynamic TBD for 82550/82551. */
 1616                 cbp->tbd_number = 0xFF;
 1617                 cbp->tbd[nseg].tb_size |= htole32(0x8000);
 1618         } else
 1619                 cbp->tbd_number = nseg;
 1620         /* Configure TSO. */
 1621         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
 1622                 cbp->tbdtso.tb_size = htole32(m->m_pkthdr.tso_segsz << 16);
 1623                 cbp->tbd[1].tb_size |= htole32(tcp_payload << 16);
 1624                 cbp->ipcb_ip_schedule |= FXP_IPCB_LARGESEND_ENABLE |
 1625                     FXP_IPCB_IP_CHECKSUM_ENABLE |
 1626                     FXP_IPCB_TCP_PACKET |
 1627                     FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
 1628         }
 1629         /* Configure VLAN hardware tag insertion. */
 1630         if ((m->m_flags & M_VLANTAG) != 0) {
 1631                 cbp->ipcb_vlan_id = htons(m->m_pkthdr.ether_vtag);
 1632                 txp->tx_cb->ipcb_ip_activation_high |=
 1633                     FXP_IPCB_INSERTVLAN_ENABLE;
 1634         }
 1635 
 1636         txp->tx_mbuf = m;
 1637         txp->tx_cb->cb_status = 0;
 1638         txp->tx_cb->byte_count = 0;
 1639         if (sc->tx_queued != FXP_CXINT_THRESH - 1)
 1640                 txp->tx_cb->cb_command =
 1641                     htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
 1642                     FXP_CB_COMMAND_S);
 1643         else
 1644                 txp->tx_cb->cb_command =
 1645                     htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
 1646                     FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
 1647         if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0)
 1648                 txp->tx_cb->tx_threshold = tx_threshold;
 1649 
 1650         /*
 1651          * Advance the end of list forward.
 1652          */
 1653         sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S);
 1654         sc->fxp_desc.tx_last = txp;
 1655 
 1656         /*
 1657          * Advance the beginning of the list forward if there are
 1658          * no other packets queued (when nothing is queued, tx_first
 1659          * sits on the last TxCB that was sent out).
 1660          */
 1661         if (sc->tx_queued == 0)
 1662                 sc->fxp_desc.tx_first = txp;
 1663 
 1664         sc->tx_queued++;
 1665 
 1666         return (0);
 1667 }
 1668 
 1669 #ifdef DEVICE_POLLING
 1670 static poll_handler_t fxp_poll;
 1671 
 1672 static int
 1673 fxp_poll(if_t ifp, enum poll_cmd cmd, int count)
 1674 {
 1675         struct fxp_softc *sc = if_getsoftc(ifp);
 1676         uint8_t statack;
 1677         int rx_npkts = 0;
 1678 
 1679         FXP_LOCK(sc);
 1680         if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
 1681                 FXP_UNLOCK(sc);
 1682                 return (rx_npkts);
 1683         }
 1684 
 1685         statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
 1686             FXP_SCB_STATACK_FR;
 1687         if (cmd == POLL_AND_CHECK_STATUS) {
 1688                 uint8_t tmp;
 1689 
 1690                 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
 1691                 if (tmp == 0xff || tmp == 0) {
 1692                         FXP_UNLOCK(sc);
 1693                         return (rx_npkts); /* nothing to do */
 1694                 }
 1695                 tmp &= ~statack;
 1696                 /* ack what we can */
 1697                 if (tmp != 0)
 1698                         CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
 1699                 statack |= tmp;
 1700         }
 1701         rx_npkts = fxp_intr_body(sc, ifp, statack, count);
 1702         FXP_UNLOCK(sc);
 1703         return (rx_npkts);
 1704 }
 1705 #endif /* DEVICE_POLLING */
 1706 
 1707 /*
 1708  * Process interface interrupts.
 1709  */
 1710 static void
 1711 fxp_intr(void *xsc)
 1712 {
 1713         struct fxp_softc *sc = xsc;
 1714         if_t ifp = sc->ifp;
 1715         uint8_t statack;
 1716 
 1717         FXP_LOCK(sc);
 1718         if (sc->suspended) {
 1719                 FXP_UNLOCK(sc);
 1720                 return;
 1721         }
 1722 
 1723 #ifdef DEVICE_POLLING
 1724         if (if_getcapenable(ifp) & IFCAP_POLLING) {
 1725                 FXP_UNLOCK(sc);
 1726                 return;
 1727         }
 1728 #endif
 1729         while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
 1730                 /*
 1731                  * It should not be possible to have all bits set; the
 1732                  * FXP_SCB_INTR_SWI bit always returns 0 on a read.  If
 1733                  * all bits are set, this may indicate that the card has
 1734                  * been physically ejected, so ignore it.
 1735                  */
 1736                 if (statack == 0xff) {
 1737                         FXP_UNLOCK(sc);
 1738                         return;
 1739                 }
 1740 
 1741                 /*
 1742                  * First ACK all the interrupts in this pass.
 1743                  */
 1744                 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
 1745                 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
 1746                         fxp_intr_body(sc, ifp, statack, -1);
 1747         }
 1748         FXP_UNLOCK(sc);
 1749 }
 1750 
 1751 static void
 1752 fxp_txeof(struct fxp_softc *sc)
 1753 {
 1754         if_t ifp;
 1755         struct fxp_tx *txp;
 1756 
 1757         ifp = sc->ifp;
 1758         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 1759             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1760         for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
 1761             (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
 1762             txp = txp->tx_next) {
 1763                 if (txp->tx_mbuf != NULL) {
 1764                         bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map,
 1765                             BUS_DMASYNC_POSTWRITE);
 1766                         bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map);
 1767                         m_freem(txp->tx_mbuf);
 1768                         txp->tx_mbuf = NULL;
 1769                         /* clear this to reset csum offload bits */
 1770                         txp->tx_cb->tbd[0].tb_addr = 0;
 1771                 }
 1772                 sc->tx_queued--;
 1773                 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
 1774         }
 1775         sc->fxp_desc.tx_first = txp;
 1776         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 1777             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1778         if (sc->tx_queued == 0)
 1779                 sc->watchdog_timer = 0;
 1780 }
 1781 
 1782 static void
 1783 fxp_rxcsum(struct fxp_softc *sc, if_t ifp, struct mbuf *m,
 1784     uint16_t status, int pos)
 1785 {
 1786         struct ether_header *eh;
 1787         struct ip *ip;
 1788         struct udphdr *uh;
 1789         int32_t hlen, len, pktlen, temp32;
 1790         uint16_t csum, *opts;
 1791 
 1792         if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) {
 1793                 if ((status & FXP_RFA_STATUS_PARSE) != 0) {
 1794                         if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID)
 1795                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1796                         if (status & FXP_RFDX_CS_IP_CSUM_VALID)
 1797                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1798                         if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
 1799                             (status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
 1800                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 1801                                     CSUM_PSEUDO_HDR;
 1802                                 m->m_pkthdr.csum_data = 0xffff;
 1803                         }
 1804                 }
 1805                 return;
 1806         }
 1807 
 1808         pktlen = m->m_pkthdr.len;
 1809         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1810                 return;
 1811         eh = mtod(m, struct ether_header *);
 1812         if (eh->ether_type != htons(ETHERTYPE_IP))
 1813                 return;
 1814         ip = (struct ip *)(eh + 1);
 1815         if (ip->ip_v != IPVERSION)
 1816                 return;
 1817 
 1818         hlen = ip->ip_hl << 2;
 1819         pktlen -= sizeof(struct ether_header);
 1820         if (hlen < sizeof(struct ip))
 1821                 return;
 1822         if (ntohs(ip->ip_len) < hlen)
 1823                 return;
 1824         if (ntohs(ip->ip_len) != pktlen)
 1825                 return;
 1826         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1827                 return; /* can't handle fragmented packet */
 1828 
 1829         switch (ip->ip_p) {
 1830         case IPPROTO_TCP:
 1831                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1832                         return;
 1833                 break;
 1834         case IPPROTO_UDP:
 1835                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1836                         return;
 1837                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1838                 if (uh->uh_sum == 0)
 1839                         return; /* no checksum */
 1840                 break;
 1841         default:
 1842                 return;
 1843         }
 1844         /* Extract computed checksum. */
 1845         csum = be16dec(mtod(m, char *) + pos);
 1846         /* checksum fixup for IP options */
 1847         len = hlen - sizeof(struct ip);
 1848         if (len > 0) {
 1849                 opts = (uint16_t *)(ip + 1);
 1850                 for (; len > 0; len -= sizeof(uint16_t), opts++) {
 1851                         temp32 = csum - *opts;
 1852                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1853                         csum = temp32 & 65535;
 1854                 }
 1855         }
 1856         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1857         m->m_pkthdr.csum_data = csum;
 1858 }
 1859 
 1860 static int
 1861 fxp_intr_body(struct fxp_softc *sc, if_t ifp, uint8_t statack,
 1862     int count)
 1863 {
 1864         struct mbuf *m;
 1865         struct fxp_rx *rxp;
 1866         struct fxp_rfa *rfa;
 1867         int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
 1868         int rx_npkts;
 1869         uint16_t status;
 1870 
 1871         rx_npkts = 0;
 1872         FXP_LOCK_ASSERT(sc, MA_OWNED);
 1873 
 1874         if (rnr)
 1875                 sc->rnr++;
 1876 #ifdef DEVICE_POLLING
 1877         /* Pick up a deferred RNR condition if `count' ran out last time. */
 1878         if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
 1879                 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
 1880                 rnr = 1;
 1881         }
 1882 #endif
 1883 
 1884         /*
 1885          * Free any finished transmit mbuf chains.
 1886          *
 1887          * Handle the CNA event likt a CXTNO event. It used to
 1888          * be that this event (control unit not ready) was not
 1889          * encountered, but it is now with the SMPng modifications.
 1890          * The exact sequence of events that occur when the interface
 1891          * is brought up are different now, and if this event
 1892          * goes unhandled, the configuration/rxfilter setup sequence
 1893          * can stall for several seconds. The result is that no
 1894          * packets go out onto the wire for about 5 to 10 seconds
 1895          * after the interface is ifconfig'ed for the first time.
 1896          */
 1897         if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA))
 1898                 fxp_txeof(sc);
 1899 
 1900         /*
 1901          * Try to start more packets transmitting.
 1902          */
 1903         if (!if_sendq_empty(ifp))
 1904                 fxp_start_body(ifp);
 1905 
 1906         /*
 1907          * Just return if nothing happened on the receive side.
 1908          */
 1909         if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
 1910                 return (rx_npkts);
 1911 
 1912         /*
 1913          * Process receiver interrupts. If a no-resource (RNR)
 1914          * condition exists, get whatever packets we can and
 1915          * re-start the receiver.
 1916          *
 1917          * When using polling, we do not process the list to completion,
 1918          * so when we get an RNR interrupt we must defer the restart
 1919          * until we hit the last buffer with the C bit set.
 1920          * If we run out of cycles and rfa_headm has the C bit set,
 1921          * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
 1922          * that the info will be used in the subsequent polling cycle.
 1923          */
 1924         for (;;) {
 1925                 rxp = sc->fxp_desc.rx_head;
 1926                 m = rxp->rx_mbuf;
 1927                 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
 1928                     RFA_ALIGNMENT_FUDGE);
 1929                 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
 1930                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1931 
 1932 #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
 1933                 if (count >= 0 && count-- == 0) {
 1934                         if (rnr) {
 1935                                 /* Defer RNR processing until the next time. */
 1936                                 sc->flags |= FXP_FLAG_DEFERRED_RNR;
 1937                                 rnr = 0;
 1938                         }
 1939                         break;
 1940                 }
 1941 #endif /* DEVICE_POLLING */
 1942 
 1943                 status = le16toh(rfa->rfa_status);
 1944                 if ((status & FXP_RFA_STATUS_C) == 0)
 1945                         break;
 1946 
 1947                 if ((status & FXP_RFA_STATUS_RNR) != 0)
 1948                         rnr++;
 1949                 /*
 1950                  * Advance head forward.
 1951                  */
 1952                 sc->fxp_desc.rx_head = rxp->rx_next;
 1953 
 1954                 /*
 1955                  * Add a new buffer to the receive chain.
 1956                  * If this fails, the old buffer is recycled
 1957                  * instead.
 1958                  */
 1959                 if (fxp_new_rfabuf(sc, rxp) == 0) {
 1960                         int total_len;
 1961 
 1962                         /*
 1963                          * Fetch packet length (the top 2 bits of
 1964                          * actual_size are flags set by the controller
 1965                          * upon completion), and drop the packet in case
 1966                          * of bogus length or CRC errors.
 1967                          */
 1968                         total_len = le16toh(rfa->actual_size) & 0x3fff;
 1969                         if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
 1970                             (if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
 1971                                 /* Adjust for appended checksum bytes. */
 1972                                 total_len -= 2;
 1973                         }
 1974                         if (total_len < (int)sizeof(struct ether_header) ||
 1975                             total_len > (MCLBYTES - RFA_ALIGNMENT_FUDGE -
 1976                             sc->rfa_size) ||
 1977                             status & (FXP_RFA_STATUS_CRC |
 1978                             FXP_RFA_STATUS_ALIGN | FXP_RFA_STATUS_OVERRUN)) {
 1979                                 m_freem(m);
 1980                                 fxp_add_rfabuf(sc, rxp);
 1981                                 continue;
 1982                         }
 1983 
 1984                         m->m_pkthdr.len = m->m_len = total_len;
 1985                         if_setrcvif(m, ifp);
 1986 
 1987                         /* Do IP checksum checking. */
 1988                         if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
 1989                                 fxp_rxcsum(sc, ifp, m, status, total_len);
 1990                         if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
 1991                             (status & FXP_RFA_STATUS_VLAN) != 0) {
 1992                                 m->m_pkthdr.ether_vtag =
 1993                                     ntohs(rfa->rfax_vlan_id);
 1994                                 m->m_flags |= M_VLANTAG;
 1995                         }
 1996                         /*
 1997                          * Drop locks before calling if_input() since it
 1998                          * may re-enter fxp_start() in the netisr case.
 1999                          * This would result in a lock reversal.  Better
 2000                          * performance might be obtained by chaining all
 2001                          * packets received, dropping the lock, and then
 2002                          * calling if_input() on each one.
 2003                          */
 2004                         FXP_UNLOCK(sc);
 2005                         if_input(ifp, m);
 2006                         FXP_LOCK(sc);
 2007                         rx_npkts++;
 2008                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
 2009                                 return (rx_npkts);
 2010                 } else {
 2011                         /* Reuse RFA and loaded DMA map. */
 2012                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2013                         fxp_discard_rfabuf(sc, rxp);
 2014                 }
 2015                 fxp_add_rfabuf(sc, rxp);
 2016         }
 2017         if (rnr) {
 2018                 fxp_scb_wait(sc);
 2019                 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
 2020                     sc->fxp_desc.rx_head->rx_addr);
 2021                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
 2022         }
 2023         return (rx_npkts);
 2024 }
 2025 
 2026 static void
 2027 fxp_update_stats(struct fxp_softc *sc)
 2028 {
 2029         if_t ifp = sc->ifp;
 2030         struct fxp_stats *sp = sc->fxp_stats;
 2031         struct fxp_hwstats *hsp;
 2032         uint32_t *status;
 2033 
 2034         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2035 
 2036         bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
 2037             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2038         /* Update statistical counters. */
 2039         if (sc->revision >= FXP_REV_82559_A0)
 2040                 status = &sp->completion_status;
 2041         else if (sc->revision >= FXP_REV_82558_A4)
 2042                 status = (uint32_t *)&sp->tx_tco;
 2043         else
 2044                 status = &sp->tx_pause;
 2045         if (*status == htole32(FXP_STATS_DR_COMPLETE)) {
 2046                 hsp = &sc->fxp_hwstats;
 2047                 hsp->tx_good += le32toh(sp->tx_good);
 2048                 hsp->tx_maxcols += le32toh(sp->tx_maxcols);
 2049                 hsp->tx_latecols += le32toh(sp->tx_latecols);
 2050                 hsp->tx_underruns += le32toh(sp->tx_underruns);
 2051                 hsp->tx_lostcrs += le32toh(sp->tx_lostcrs);
 2052                 hsp->tx_deffered += le32toh(sp->tx_deffered);
 2053                 hsp->tx_single_collisions += le32toh(sp->tx_single_collisions);
 2054                 hsp->tx_multiple_collisions +=
 2055                     le32toh(sp->tx_multiple_collisions);
 2056                 hsp->tx_total_collisions += le32toh(sp->tx_total_collisions);
 2057                 hsp->rx_good += le32toh(sp->rx_good);
 2058                 hsp->rx_crc_errors += le32toh(sp->rx_crc_errors);
 2059                 hsp->rx_alignment_errors += le32toh(sp->rx_alignment_errors);
 2060                 hsp->rx_rnr_errors += le32toh(sp->rx_rnr_errors);
 2061                 hsp->rx_overrun_errors += le32toh(sp->rx_overrun_errors);
 2062                 hsp->rx_cdt_errors += le32toh(sp->rx_cdt_errors);
 2063                 hsp->rx_shortframes += le32toh(sp->rx_shortframes);
 2064                 hsp->tx_pause += le32toh(sp->tx_pause);
 2065                 hsp->rx_pause += le32toh(sp->rx_pause);
 2066                 hsp->rx_controls += le32toh(sp->rx_controls);
 2067                 hsp->tx_tco += le16toh(sp->tx_tco);
 2068                 hsp->rx_tco += le16toh(sp->rx_tco);
 2069 
 2070                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, le32toh(sp->tx_good));
 2071                 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
 2072                     le32toh(sp->tx_total_collisions));
 2073                 if (sp->rx_good) {
 2074                         if_inc_counter(ifp, IFCOUNTER_IPACKETS,
 2075                             le32toh(sp->rx_good));
 2076                         sc->rx_idle_secs = 0;
 2077                 } else if (sc->flags & FXP_FLAG_RXBUG) {
 2078                         /*
 2079                          * Receiver's been idle for another second.
 2080                          */
 2081                         sc->rx_idle_secs++;
 2082                 }
 2083                 if_inc_counter(ifp, IFCOUNTER_IERRORS,
 2084                     le32toh(sp->rx_crc_errors) +
 2085                     le32toh(sp->rx_alignment_errors) +
 2086                     le32toh(sp->rx_rnr_errors) +
 2087                     le32toh(sp->rx_overrun_errors));
 2088                 /*
 2089                  * If any transmit underruns occurred, bump up the transmit
 2090                  * threshold by another 512 bytes (64 * 8).
 2091                  */
 2092                 if (sp->tx_underruns) {
 2093                         if_inc_counter(ifp, IFCOUNTER_OERRORS,
 2094                             le32toh(sp->tx_underruns));
 2095                         if (tx_threshold < 192)
 2096                                 tx_threshold += 64;
 2097                 }
 2098                 *status = 0;
 2099                 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
 2100                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2101         }
 2102 }
 2103 
 2104 /*
 2105  * Update packet in/out/collision statistics. The i82557 doesn't
 2106  * allow you to access these counters without doing a fairly
 2107  * expensive DMA to get _all_ of the statistics it maintains, so
 2108  * we do this operation here only once per second. The statistics
 2109  * counters in the kernel are updated from the previous dump-stats
 2110  * DMA and then a new dump-stats DMA is started. The on-chip
 2111  * counters are zeroed when the DMA completes. If we can't start
 2112  * the DMA immediately, we don't wait - we just prepare to read
 2113  * them again next time.
 2114  */
 2115 static void
 2116 fxp_tick(void *xsc)
 2117 {
 2118         struct fxp_softc *sc = xsc;
 2119         if_t ifp = sc->ifp;
 2120 
 2121         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2122 
 2123         /* Update statistical counters. */
 2124         fxp_update_stats(sc);
 2125 
 2126         /*
 2127          * Release any xmit buffers that have completed DMA. This isn't
 2128          * strictly necessary to do here, but it's advantagous for mbufs
 2129          * with external storage to be released in a timely manner rather
 2130          * than being defered for a potentially long time. This limits
 2131          * the delay to a maximum of one second.
 2132          */
 2133         fxp_txeof(sc);
 2134 
 2135         /*
 2136          * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
 2137          * then assume the receiver has locked up and attempt to clear
 2138          * the condition by reprogramming the multicast filter. This is
 2139          * a work-around for a bug in the 82557 where the receiver locks
 2140          * up if it gets certain types of garbage in the synchronization
 2141          * bits prior to the packet header. This bug is supposed to only
 2142          * occur in 10Mbps mode, but has been seen to occur in 100Mbps
 2143          * mode as well (perhaps due to a 10/100 speed transition).
 2144          */
 2145         if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
 2146                 sc->rx_idle_secs = 0;
 2147                 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 2148                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2149                         fxp_init_body(sc, 1);
 2150                 }
 2151                 return;
 2152         }
 2153         /*
 2154          * If there is no pending command, start another stats
 2155          * dump. Otherwise punt for now.
 2156          */
 2157         if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
 2158                 /*
 2159                  * Start another stats dump.
 2160                  */
 2161                 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
 2162         }
 2163         if (sc->miibus != NULL)
 2164                 mii_tick(device_get_softc(sc->miibus));
 2165 
 2166         /*
 2167          * Check that chip hasn't hung.
 2168          */
 2169         fxp_watchdog(sc);
 2170 
 2171         /*
 2172          * Schedule another timeout one second from now.
 2173          */
 2174         callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
 2175 }
 2176 
 2177 /*
 2178  * Stop the interface. Cancels the statistics updater and resets
 2179  * the interface.
 2180  */
 2181 static void
 2182 fxp_stop(struct fxp_softc *sc)
 2183 {
 2184         if_t ifp = sc->ifp;
 2185         struct fxp_tx *txp;
 2186         int i;
 2187 
 2188         if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
 2189         sc->watchdog_timer = 0;
 2190 
 2191         /*
 2192          * Cancel stats updater.
 2193          */
 2194         callout_stop(&sc->stat_ch);
 2195 
 2196         /*
 2197          * Preserve PCI configuration, configure, IA/multicast
 2198          * setup and put RU and CU into idle state.
 2199          */
 2200         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
 2201         DELAY(50);
 2202         /* Disable interrupts. */
 2203         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 2204 
 2205         fxp_update_stats(sc);
 2206 
 2207         /*
 2208          * Release any xmit buffers.
 2209          */
 2210         txp = sc->fxp_desc.tx_list;
 2211         for (i = 0; i < FXP_NTXCB; i++) {
 2212                 if (txp[i].tx_mbuf != NULL) {
 2213                         bus_dmamap_sync(sc->fxp_txmtag, txp[i].tx_map,
 2214                             BUS_DMASYNC_POSTWRITE);
 2215                         bus_dmamap_unload(sc->fxp_txmtag, txp[i].tx_map);
 2216                         m_freem(txp[i].tx_mbuf);
 2217                         txp[i].tx_mbuf = NULL;
 2218                         /* clear this to reset csum offload bits */
 2219                         txp[i].tx_cb->tbd[0].tb_addr = 0;
 2220                 }
 2221         }
 2222         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2223             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2224         sc->tx_queued = 0;
 2225 }
 2226 
 2227 /*
 2228  * Watchdog/transmission transmit timeout handler. Called when a
 2229  * transmission is started on the interface, but no interrupt is
 2230  * received before the timeout. This usually indicates that the
 2231  * card has wedged for some reason.
 2232  */
 2233 static void
 2234 fxp_watchdog(struct fxp_softc *sc)
 2235 {
 2236         if_t ifp = sc->ifp;
 2237 
 2238         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2239 
 2240         if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
 2241                 return;
 2242 
 2243         device_printf(sc->dev, "device timeout\n");
 2244         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2245 
 2246         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2247         fxp_init_body(sc, 1);
 2248 }
 2249 
 2250 /*
 2251  * Acquire locks and then call the real initialization function.  This
 2252  * is necessary because ether_ioctl() calls if_init() and this would
 2253  * result in mutex recursion if the mutex was held.
 2254  */
 2255 static void
 2256 fxp_init(void *xsc)
 2257 {
 2258         struct fxp_softc *sc = xsc;
 2259 
 2260         FXP_LOCK(sc);
 2261         fxp_init_body(sc, 1);
 2262         FXP_UNLOCK(sc);
 2263 }
 2264 
 2265 /*
 2266  * Perform device initialization. This routine must be called with the
 2267  * softc lock held.
 2268  */
 2269 static void
 2270 fxp_init_body(struct fxp_softc *sc, int setmedia)
 2271 {
 2272         if_t ifp = sc->ifp;
 2273         struct mii_data *mii;
 2274         struct fxp_cb_config *cbp;
 2275         struct fxp_cb_ias *cb_ias;
 2276         struct fxp_cb_tx *tcbp;
 2277         struct fxp_tx *txp;
 2278         int i, prm;
 2279 
 2280         FXP_LOCK_ASSERT(sc, MA_OWNED);
 2281 
 2282         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
 2283                 return;
 2284 
 2285         /*
 2286          * Cancel any pending I/O
 2287          */
 2288         fxp_stop(sc);
 2289 
 2290         /*
 2291          * Issue software reset, which also unloads the microcode.
 2292          */
 2293         sc->flags &= ~FXP_FLAG_UCODE;
 2294         CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
 2295         DELAY(50);
 2296 
 2297         prm = (if_getflags(ifp) & IFF_PROMISC) ? 1 : 0;
 2298 
 2299         /*
 2300          * Initialize base of CBL and RFA memory. Loading with zero
 2301          * sets it up for regular linear addressing.
 2302          */
 2303         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
 2304         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
 2305 
 2306         fxp_scb_wait(sc);
 2307         fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
 2308 
 2309         /*
 2310          * Initialize base of dump-stats buffer.
 2311          */
 2312         fxp_scb_wait(sc);
 2313         bzero(sc->fxp_stats, sizeof(struct fxp_stats));
 2314         bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
 2315             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2316         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
 2317         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
 2318 
 2319         /*
 2320          * Attempt to load microcode if requested.
 2321          * For ICH based controllers do not load microcode.
 2322          */
 2323         if (sc->ident->ich == 0) {
 2324                 if (if_getflags(ifp) & IFF_LINK0 &&
 2325                     (sc->flags & FXP_FLAG_UCODE) == 0)
 2326                         fxp_load_ucode(sc);
 2327         }
 2328 
 2329         /*
 2330          * Set IFF_ALLMULTI status. It's needed in configure action
 2331          * command.
 2332          */
 2333         fxp_mc_addrs(sc);
 2334 
 2335         /*
 2336          * We temporarily use memory that contains the TxCB list to
 2337          * construct the config CB. The TxCB list memory is rebuilt
 2338          * later.
 2339          */
 2340         cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
 2341 
 2342         /*
 2343          * This bcopy is kind of disgusting, but there are a bunch of must be
 2344          * zero and must be one bits in this structure and this is the easiest
 2345          * way to initialize them all to proper values.
 2346          */
 2347         bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
 2348 
 2349         cbp->cb_status =        0;
 2350         cbp->cb_command =       htole16(FXP_CB_COMMAND_CONFIG |
 2351             FXP_CB_COMMAND_EL);
 2352         cbp->link_addr =        0xffffffff;     /* (no) next command */
 2353         cbp->byte_count =       sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
 2354         cbp->rx_fifo_limit =    8;      /* rx fifo threshold (32 bytes) */
 2355         cbp->tx_fifo_limit =    0;      /* tx fifo threshold (0 bytes) */
 2356         cbp->adaptive_ifs =     0;      /* (no) adaptive interframe spacing */
 2357         cbp->mwi_enable =       sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
 2358         cbp->type_enable =      0;      /* actually reserved */
 2359         cbp->read_align_en =    sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
 2360         cbp->end_wr_on_cl =     sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
 2361         cbp->rx_dma_bytecount = 0;      /* (no) rx DMA max */
 2362         cbp->tx_dma_bytecount = 0;      /* (no) tx DMA max */
 2363         cbp->dma_mbce =         0;      /* (disable) dma max counters */
 2364         cbp->late_scb =         0;      /* (don't) defer SCB update */
 2365         cbp->direct_dma_dis =   1;      /* disable direct rcv dma mode */
 2366         cbp->tno_int_or_tco_en =0;      /* (disable) tx not okay interrupt */
 2367         cbp->ci_int =           1;      /* interrupt on CU idle */
 2368         cbp->ext_txcb_dis =     sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
 2369         cbp->ext_stats_dis =    1;      /* disable extended counters */
 2370         cbp->keep_overrun_rx =  0;      /* don't pass overrun frames to host */
 2371         cbp->save_bf =          sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
 2372         cbp->disc_short_rx =    !prm;   /* discard short packets */
 2373         cbp->underrun_retry =   1;      /* retry mode (once) on DMA underrun */
 2374         cbp->two_frames =       0;      /* do not limit FIFO to 2 frames */
 2375         cbp->dyn_tbd =          sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2376         cbp->ext_rfa =          sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2377         cbp->mediatype =        sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
 2378         cbp->csma_dis =         0;      /* (don't) disable link */
 2379         cbp->tcp_udp_cksum =    ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
 2380             (if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) ? 1 : 0;
 2381         cbp->vlan_tco =         0;      /* (don't) enable vlan wakeup */
 2382         cbp->link_wake_en =     0;      /* (don't) assert PME# on link change */
 2383         cbp->arp_wake_en =      0;      /* (don't) assert PME# on arp */
 2384         cbp->mc_wake_en =       0;      /* (don't) enable PME# on mcmatch */
 2385         cbp->nsai =             1;      /* (don't) disable source addr insert */
 2386         cbp->preamble_length =  2;      /* (7 byte) preamble */
 2387         cbp->loopback =         0;      /* (don't) loopback */
 2388         cbp->linear_priority =  0;      /* (normal CSMA/CD operation) */
 2389         cbp->linear_pri_mode =  0;      /* (wait after xmit only) */
 2390         cbp->interfrm_spacing = 6;      /* (96 bits of) interframe spacing */
 2391         cbp->promiscuous =      prm;    /* promiscuous mode */
 2392         cbp->bcast_disable =    0;      /* (don't) disable broadcasts */
 2393         cbp->wait_after_win =   0;      /* (don't) enable modified backoff alg*/
 2394         cbp->ignore_ul =        0;      /* consider U/L bit in IA matching */
 2395         cbp->crc16_en =         0;      /* (don't) enable crc-16 algorithm */
 2396         cbp->crscdt =           sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
 2397 
 2398         cbp->stripping =        !prm;   /* truncate rx packet to byte count */
 2399         cbp->padding =          1;      /* (do) pad short tx packets */
 2400         cbp->rcv_crc_xfer =     0;      /* (don't) xfer CRC to host */
 2401         cbp->long_rx_en =       sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
 2402         cbp->ia_wake_en =       0;      /* (don't) wake up on address match */
 2403         cbp->magic_pkt_dis =    sc->flags & FXP_FLAG_WOL ? 0 : 1;
 2404         cbp->force_fdx =        0;      /* (don't) force full duplex */
 2405         cbp->fdx_pin_en =       1;      /* (enable) FDX# pin */
 2406         cbp->multi_ia =         0;      /* (don't) accept multiple IAs */
 2407         cbp->mc_all =           if_getflags(ifp) & IFF_ALLMULTI ? 1 : prm;
 2408         cbp->gamla_rx =         sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
 2409         cbp->vlan_strip_en =    ((sc->flags & FXP_FLAG_EXT_RFA) != 0 &&
 2410             (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) ? 1 : 0;
 2411 
 2412         if (sc->revision == FXP_REV_82557) {
 2413                 /*
 2414                  * The 82557 has no hardware flow control, the values
 2415                  * below are the defaults for the chip.
 2416                  */
 2417                 cbp->fc_delay_lsb =     0;
 2418                 cbp->fc_delay_msb =     0x40;
 2419                 cbp->pri_fc_thresh =    3;
 2420                 cbp->tx_fc_dis =        0;
 2421                 cbp->rx_fc_restop =     0;
 2422                 cbp->rx_fc_restart =    0;
 2423                 cbp->fc_filter =        0;
 2424                 cbp->pri_fc_loc =       1;
 2425         } else {
 2426                 /* Set pause RX FIFO threshold to 1KB. */
 2427                 CSR_WRITE_1(sc, FXP_CSR_FC_THRESH, 1);
 2428                 /* Set pause time. */
 2429                 cbp->fc_delay_lsb =     0xff;
 2430                 cbp->fc_delay_msb =     0xff;
 2431                 cbp->pri_fc_thresh =    3;
 2432                 mii = device_get_softc(sc->miibus);
 2433                 if ((IFM_OPTIONS(mii->mii_media_active) &
 2434                     IFM_ETH_TXPAUSE) != 0)
 2435                         /* enable transmit FC */
 2436                         cbp->tx_fc_dis = 0;
 2437                 else
 2438                         /* disable transmit FC */
 2439                         cbp->tx_fc_dis = 1;
 2440                 if ((IFM_OPTIONS(mii->mii_media_active) &
 2441                     IFM_ETH_RXPAUSE) != 0) {
 2442                         /* enable FC restart/restop frames */
 2443                         cbp->rx_fc_restart = 1;
 2444                         cbp->rx_fc_restop = 1;
 2445                 } else {
 2446                         /* disable FC restart/restop frames */
 2447                         cbp->rx_fc_restart = 0;
 2448                         cbp->rx_fc_restop = 0;
 2449                 }
 2450                 cbp->fc_filter =        !prm;   /* drop FC frames to host */
 2451                 cbp->pri_fc_loc =       1;      /* FC pri location (byte31) */
 2452         }
 2453 
 2454         /* Enable 82558 and 82559 extended statistics functionality. */
 2455         if (sc->revision >= FXP_REV_82558_A4) {
 2456                 if (sc->revision >= FXP_REV_82559_A0) {
 2457                         /*
 2458                          * Extend configuration table size to 32
 2459                          * to include TCO configuration.
 2460                          */
 2461                         cbp->byte_count = 32;
 2462                         cbp->ext_stats_dis = 1;
 2463                         /* Enable TCO stats. */
 2464                         cbp->tno_int_or_tco_en = 1;
 2465                         cbp->gamla_rx = 1;
 2466                 } else
 2467                         cbp->ext_stats_dis = 0;
 2468         }
 2469 
 2470         /*
 2471          * Start the config command/DMA.
 2472          */
 2473         fxp_scb_wait(sc);
 2474         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2475             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2476         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2477         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2478         /* ...and wait for it to complete. */
 2479         fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
 2480 
 2481         /*
 2482          * Now initialize the station address. Temporarily use the TxCB
 2483          * memory area like we did above for the config CB.
 2484          */
 2485         cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
 2486         cb_ias->cb_status = 0;
 2487         cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
 2488         cb_ias->link_addr = 0xffffffff;
 2489         bcopy(if_getlladdr(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN);
 2490 
 2491         /*
 2492          * Start the IAS (Individual Address Setup) command/DMA.
 2493          */
 2494         fxp_scb_wait(sc);
 2495         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2496             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2497         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2498         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2499         /* ...and wait for it to complete. */
 2500         fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
 2501 
 2502         /*
 2503          * Initialize the multicast address list.
 2504          */
 2505         fxp_mc_setup(sc);
 2506 
 2507         /*
 2508          * Initialize transmit control block (TxCB) list.
 2509          */
 2510         txp = sc->fxp_desc.tx_list;
 2511         tcbp = sc->fxp_desc.cbl_list;
 2512         bzero(tcbp, FXP_TXCB_SZ);
 2513         for (i = 0; i < FXP_NTXCB; i++) {
 2514                 txp[i].tx_mbuf = NULL;
 2515                 tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
 2516                 tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
 2517                 tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
 2518                     (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
 2519                 if (sc->flags & FXP_FLAG_EXT_TXCB)
 2520                         tcbp[i].tbd_array_addr =
 2521                             htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
 2522                 else
 2523                         tcbp[i].tbd_array_addr =
 2524                             htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
 2525                 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
 2526         }
 2527         /*
 2528          * Set the suspend flag on the first TxCB and start the control
 2529          * unit. It will execute the NOP and then suspend.
 2530          */
 2531         tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
 2532         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 2533             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2534         sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
 2535         sc->tx_queued = 1;
 2536 
 2537         fxp_scb_wait(sc);
 2538         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 2539         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 2540 
 2541         /*
 2542          * Initialize receiver buffer area - RFA.
 2543          */
 2544         fxp_scb_wait(sc);
 2545         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
 2546         fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
 2547 
 2548         if (sc->miibus != NULL && setmedia != 0)
 2549                 mii_mediachg(device_get_softc(sc->miibus));
 2550 
 2551         if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
 2552 
 2553         /*
 2554          * Enable interrupts.
 2555          */
 2556 #ifdef DEVICE_POLLING
 2557         /*
 2558          * ... but only do that if we are not polling. And because (presumably)
 2559          * the default is interrupts on, we need to disable them explicitly!
 2560          */
 2561         if (if_getcapenable(ifp) & IFCAP_POLLING )
 2562                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
 2563         else
 2564 #endif /* DEVICE_POLLING */
 2565         CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
 2566 
 2567         /*
 2568          * Start stats updater.
 2569          */
 2570         callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
 2571 }
 2572 
 2573 static int
 2574 fxp_serial_ifmedia_upd(if_t ifp)
 2575 {
 2576 
 2577         return (0);
 2578 }
 2579 
 2580 static void
 2581 fxp_serial_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
 2582 {
 2583 
 2584         ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
 2585 }
 2586 
 2587 /*
 2588  * Change media according to request.
 2589  */
 2590 static int
 2591 fxp_ifmedia_upd(if_t ifp)
 2592 {
 2593         struct fxp_softc *sc = if_getsoftc(ifp);
 2594         struct mii_data *mii;
 2595         struct mii_softc        *miisc;
 2596 
 2597         mii = device_get_softc(sc->miibus);
 2598         FXP_LOCK(sc);
 2599         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 2600                 PHY_RESET(miisc);
 2601         mii_mediachg(mii);
 2602         FXP_UNLOCK(sc);
 2603         return (0);
 2604 }
 2605 
 2606 /*
 2607  * Notify the world which media we're using.
 2608  */
 2609 static void
 2610 fxp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
 2611 {
 2612         struct fxp_softc *sc = if_getsoftc(ifp);
 2613         struct mii_data *mii;
 2614 
 2615         mii = device_get_softc(sc->miibus);
 2616         FXP_LOCK(sc);
 2617         mii_pollstat(mii);
 2618         ifmr->ifm_active = mii->mii_media_active;
 2619         ifmr->ifm_status = mii->mii_media_status;
 2620         FXP_UNLOCK(sc);
 2621 }
 2622 
 2623 /*
 2624  * Add a buffer to the end of the RFA buffer list.
 2625  * Return 0 if successful, 1 for failure. A failure results in
 2626  * reusing the RFA buffer.
 2627  * The RFA struct is stuck at the beginning of mbuf cluster and the
 2628  * data pointer is fixed up to point just past it.
 2629  */
 2630 static int
 2631 fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
 2632 {
 2633         struct mbuf *m;
 2634         struct fxp_rfa *rfa;
 2635         bus_dmamap_t tmp_map;
 2636         int error;
 2637 
 2638         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 2639         if (m == NULL)
 2640                 return (ENOBUFS);
 2641 
 2642         /*
 2643          * Move the data pointer up so that the incoming data packet
 2644          * will be 32-bit aligned.
 2645          */
 2646         m->m_data += RFA_ALIGNMENT_FUDGE;
 2647 
 2648         /*
 2649          * Get a pointer to the base of the mbuf cluster and move
 2650          * data start past it.
 2651          */
 2652         rfa = mtod(m, struct fxp_rfa *);
 2653         m->m_data += sc->rfa_size;
 2654         rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
 2655 
 2656         rfa->rfa_status = 0;
 2657         rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
 2658         rfa->actual_size = 0;
 2659         m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE -
 2660             sc->rfa_size;
 2661 
 2662         /*
 2663          * Initialize the rest of the RFA.  Note that since the RFA
 2664          * is misaligned, we cannot store values directly.  We're thus
 2665          * using the le32enc() function which handles endianness and
 2666          * is also alignment-safe.
 2667          */
 2668         le32enc(&rfa->link_addr, 0xffffffff);
 2669         le32enc(&rfa->rbd_addr, 0xffffffff);
 2670 
 2671         /* Map the RFA into DMA memory. */
 2672         error = bus_dmamap_load(sc->fxp_rxmtag, sc->spare_map, rfa,
 2673             MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
 2674             &rxp->rx_addr, BUS_DMA_NOWAIT);
 2675         if (error) {
 2676                 m_freem(m);
 2677                 return (error);
 2678         }
 2679 
 2680         if (rxp->rx_mbuf != NULL)
 2681                 bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map);
 2682         tmp_map = sc->spare_map;
 2683         sc->spare_map = rxp->rx_map;
 2684         rxp->rx_map = tmp_map;
 2685         rxp->rx_mbuf = m;
 2686 
 2687         bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
 2688             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2689         return (0);
 2690 }
 2691 
 2692 static void
 2693 fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
 2694 {
 2695         struct fxp_rfa *p_rfa;
 2696         struct fxp_rx *p_rx;
 2697 
 2698         /*
 2699          * If there are other buffers already on the list, attach this
 2700          * one to the end by fixing up the tail to point to this one.
 2701          */
 2702         if (sc->fxp_desc.rx_head != NULL) {
 2703                 p_rx = sc->fxp_desc.rx_tail;
 2704                 p_rfa = (struct fxp_rfa *)
 2705                     (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
 2706                 p_rx->rx_next = rxp;
 2707                 le32enc(&p_rfa->link_addr, rxp->rx_addr);
 2708                 p_rfa->rfa_control = 0;
 2709                 bus_dmamap_sync(sc->fxp_rxmtag, p_rx->rx_map,
 2710                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2711         } else {
 2712                 rxp->rx_next = NULL;
 2713                 sc->fxp_desc.rx_head = rxp;
 2714         }
 2715         sc->fxp_desc.rx_tail = rxp;
 2716 }
 2717 
 2718 static void
 2719 fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
 2720 {
 2721         struct mbuf *m;
 2722         struct fxp_rfa *rfa;
 2723 
 2724         m = rxp->rx_mbuf;
 2725         m->m_data = m->m_ext.ext_buf;
 2726         /*
 2727          * Move the data pointer up so that the incoming data packet
 2728          * will be 32-bit aligned.
 2729          */
 2730         m->m_data += RFA_ALIGNMENT_FUDGE;
 2731 
 2732         /*
 2733          * Get a pointer to the base of the mbuf cluster and move
 2734          * data start past it.
 2735          */
 2736         rfa = mtod(m, struct fxp_rfa *);
 2737         m->m_data += sc->rfa_size;
 2738         rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
 2739 
 2740         rfa->rfa_status = 0;
 2741         rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
 2742         rfa->actual_size = 0;
 2743 
 2744         /*
 2745          * Initialize the rest of the RFA.  Note that since the RFA
 2746          * is misaligned, we cannot store values directly.  We're thus
 2747          * using the le32enc() function which handles endianness and
 2748          * is also alignment-safe.
 2749          */
 2750         le32enc(&rfa->link_addr, 0xffffffff);
 2751         le32enc(&rfa->rbd_addr, 0xffffffff);
 2752 
 2753         bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
 2754             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2755 }
 2756 
 2757 static int
 2758 fxp_miibus_readreg(device_t dev, int phy, int reg)
 2759 {
 2760         struct fxp_softc *sc = device_get_softc(dev);
 2761         int count = 10000;
 2762         int value;
 2763 
 2764         CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
 2765             (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
 2766 
 2767         while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
 2768             && count--)
 2769                 DELAY(10);
 2770 
 2771         if (count <= 0)
 2772                 device_printf(dev, "fxp_miibus_readreg: timed out\n");
 2773 
 2774         return (value & 0xffff);
 2775 }
 2776 
 2777 static int
 2778 fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
 2779 {
 2780         struct fxp_softc *sc = device_get_softc(dev);
 2781         int count = 10000;
 2782 
 2783         CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
 2784             (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
 2785             (value & 0xffff));
 2786 
 2787         while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
 2788             count--)
 2789                 DELAY(10);
 2790 
 2791         if (count <= 0)
 2792                 device_printf(dev, "fxp_miibus_writereg: timed out\n");
 2793         return (0);
 2794 }
 2795 
 2796 static void
 2797 fxp_miibus_statchg(device_t dev)
 2798 {
 2799         struct fxp_softc *sc;
 2800         struct mii_data *mii;
 2801         if_t ifp;
 2802 
 2803         sc = device_get_softc(dev);
 2804         mii = device_get_softc(sc->miibus);
 2805         ifp = sc->ifp;
 2806         if (mii == NULL || ifp == (void *)NULL ||
 2807             (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
 2808             (mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
 2809             (IFM_AVALID | IFM_ACTIVE))
 2810                 return;
 2811 
 2812         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T &&
 2813             sc->flags & FXP_FLAG_CU_RESUME_BUG)
 2814                 sc->cu_resume_bug = 1;
 2815         else
 2816                 sc->cu_resume_bug = 0;
 2817         /*
 2818          * Call fxp_init_body in order to adjust the flow control settings.
 2819          * Note that the 82557 doesn't support hardware flow control.
 2820          */
 2821         if (sc->revision == FXP_REV_82557)
 2822                 return;
 2823         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2824         fxp_init_body(sc, 0);
 2825 }
 2826 
 2827 static int
 2828 fxp_ioctl(if_t ifp, u_long command, caddr_t data)
 2829 {
 2830         struct fxp_softc *sc = if_getsoftc(ifp);
 2831         struct ifreq *ifr = (struct ifreq *)data;
 2832         struct mii_data *mii;
 2833         int flag, mask, error = 0, reinit;
 2834 
 2835         switch (command) {
 2836         case SIOCSIFFLAGS:
 2837                 FXP_LOCK(sc);
 2838                 /*
 2839                  * If interface is marked up and not running, then start it.
 2840                  * If it is marked down and running, stop it.
 2841                  * XXX If it's up then re-initialize it. This is so flags
 2842                  * such as IFF_PROMISC are handled.
 2843                  */
 2844                 if (if_getflags(ifp) & IFF_UP) {
 2845                         if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) &&
 2846                             ((if_getflags(ifp) ^ sc->if_flags) &
 2847                             (IFF_PROMISC | IFF_ALLMULTI | IFF_LINK0)) != 0) {
 2848                                 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2849                                 fxp_init_body(sc, 0);
 2850                         } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
 2851                                 fxp_init_body(sc, 1);
 2852                 } else {
 2853                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
 2854                                 fxp_stop(sc);
 2855                 }
 2856                 sc->if_flags = if_getflags(ifp);
 2857                 FXP_UNLOCK(sc);
 2858                 break;
 2859 
 2860         case SIOCADDMULTI:
 2861         case SIOCDELMULTI:
 2862                 FXP_LOCK(sc);
 2863                 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 2864                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2865                         fxp_init_body(sc, 0);
 2866                 }
 2867                 FXP_UNLOCK(sc);
 2868                 break;
 2869 
 2870         case SIOCSIFMEDIA:
 2871         case SIOCGIFMEDIA:
 2872                 if (sc->miibus != NULL) {
 2873                         mii = device_get_softc(sc->miibus);
 2874                         error = ifmedia_ioctl(ifp, ifr,
 2875                             &mii->mii_media, command);
 2876                 } else {
 2877                         error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
 2878                 }
 2879                 break;
 2880 
 2881         case SIOCSIFCAP:
 2882                 reinit = 0;
 2883                 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
 2884 #ifdef DEVICE_POLLING
 2885                 if (mask & IFCAP_POLLING) {
 2886                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 2887                                 error = ether_poll_register(fxp_poll, ifp);
 2888                                 if (error)
 2889                                         return(error);
 2890                                 FXP_LOCK(sc);
 2891                                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
 2892                                     FXP_SCB_INTR_DISABLE);
 2893                                 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
 2894                                 FXP_UNLOCK(sc);
 2895                         } else {
 2896                                 error = ether_poll_deregister(ifp);
 2897                                 /* Enable interrupts in any case */
 2898                                 FXP_LOCK(sc);
 2899                                 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
 2900                                 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
 2901                                 FXP_UNLOCK(sc);
 2902                         }
 2903                 }
 2904 #endif
 2905                 FXP_LOCK(sc);
 2906                 if ((mask & IFCAP_TXCSUM) != 0 &&
 2907                     (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
 2908                         if_togglecapenable(ifp, IFCAP_TXCSUM);
 2909                         if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
 2910                                 if_sethwassistbits(ifp, FXP_CSUM_FEATURES, 0);
 2911                         else
 2912                                 if_sethwassistbits(ifp, 0, FXP_CSUM_FEATURES);
 2913                 }
 2914                 if ((mask & IFCAP_RXCSUM) != 0 &&
 2915                     (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
 2916                         if_togglecapenable(ifp, IFCAP_RXCSUM);
 2917                         if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0)
 2918                                 reinit++;
 2919                 }
 2920                 if ((mask & IFCAP_TSO4) != 0 &&
 2921                     (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
 2922                         if_togglecapenable(ifp, IFCAP_TSO4);
 2923                         if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
 2924                                 if_sethwassistbits(ifp, CSUM_TSO, 0);
 2925                         else
 2926                                 if_sethwassistbits(ifp, 0, CSUM_TSO);
 2927                 }
 2928                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 2929                     (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
 2930                         if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
 2931                 if ((mask & IFCAP_VLAN_MTU) != 0 &&
 2932                     (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) != 0) {
 2933                         if_togglecapenable(ifp, IFCAP_VLAN_MTU);
 2934                         if (sc->revision != FXP_REV_82557)
 2935                                 flag = FXP_FLAG_LONG_PKT_EN;
 2936                         else /* a hack to get long frames on the old chip */
 2937                                 flag = FXP_FLAG_SAVE_BAD;
 2938                         sc->flags ^= flag;
 2939                         if (if_getflags(ifp) & IFF_UP)
 2940                                 reinit++;
 2941                 }
 2942                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 2943                     (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
 2944                         if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
 2945                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 2946                     (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
 2947                         if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
 2948                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 2949                     (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
 2950                         if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
 2951                         if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
 2952                                 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO |
 2953                                     IFCAP_VLAN_HWCSUM);
 2954                         reinit++;
 2955                 }
 2956                 if (reinit > 0 &&
 2957                     (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 2958                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2959                         fxp_init_body(sc, 0);
 2960                 }
 2961                 FXP_UNLOCK(sc);
 2962                 if_vlancap(ifp);
 2963                 break;
 2964 
 2965         default:
 2966                 error = ether_ioctl(ifp, command, data);
 2967         }
 2968         return (error);
 2969 }
 2970 
 2971 static u_int
 2972 fxp_setup_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
 2973 {
 2974         struct fxp_softc *sc = arg;
 2975         struct fxp_cb_mcs *mcsp = sc->mcsp;
 2976 
 2977         if (mcsp->mc_cnt < MAXMCADDR)
 2978                 bcopy(LLADDR(sdl), mcsp->mc_addr[mcsp->mc_cnt * ETHER_ADDR_LEN],
 2979                     ETHER_ADDR_LEN);
 2980         mcsp->mc_cnt++;
 2981         return (1);
 2982 }
 2983 
 2984 /*
 2985  * Fill in the multicast address list and return number of entries.
 2986  */
 2987 static void
 2988 fxp_mc_addrs(struct fxp_softc *sc)
 2989 {
 2990         struct fxp_cb_mcs *mcsp = sc->mcsp;
 2991         if_t ifp = sc->ifp;
 2992 
 2993         if ((if_getflags(ifp) & IFF_ALLMULTI) == 0) {
 2994                 mcsp->mc_cnt = 0;
 2995                 if_foreach_llmaddr(sc->ifp, fxp_setup_maddr, sc);
 2996                 if (mcsp->mc_cnt >= MAXMCADDR) {
 2997                         if_setflagbits(ifp, IFF_ALLMULTI, 0);
 2998                         mcsp->mc_cnt = 0;
 2999                 }
 3000         }
 3001         mcsp->mc_cnt = htole16(mcsp->mc_cnt * ETHER_ADDR_LEN);
 3002 }
 3003 
 3004 /*
 3005  * Program the multicast filter.
 3006  *
 3007  * We have an artificial restriction that the multicast setup command
 3008  * must be the first command in the chain, so we take steps to ensure
 3009  * this. By requiring this, it allows us to keep up the performance of
 3010  * the pre-initialized command ring (esp. link pointers) by not actually
 3011  * inserting the mcsetup command in the ring - i.e. its link pointer
 3012  * points to the TxCB ring, but the mcsetup descriptor itself is not part
 3013  * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
 3014  * lead into the regular TxCB ring when it completes.
 3015  */
 3016 static void
 3017 fxp_mc_setup(struct fxp_softc *sc)
 3018 {
 3019         struct fxp_cb_mcs *mcsp;
 3020         int count;
 3021 
 3022         FXP_LOCK_ASSERT(sc, MA_OWNED);
 3023 
 3024         mcsp = sc->mcsp;
 3025         mcsp->cb_status = 0;
 3026         mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
 3027         mcsp->link_addr = 0xffffffff;
 3028         fxp_mc_addrs(sc);
 3029 
 3030         /*
 3031          * Wait until command unit is idle. This should never be the
 3032          * case when nothing is queued, but make sure anyway.
 3033          */
 3034         count = 100;
 3035         while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) !=
 3036             FXP_SCB_CUS_IDLE && --count)
 3037                 DELAY(10);
 3038         if (count == 0) {
 3039                 device_printf(sc->dev, "command queue timeout\n");
 3040                 return;
 3041         }
 3042 
 3043         /*
 3044          * Start the multicast setup command.
 3045          */
 3046         fxp_scb_wait(sc);
 3047         bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
 3048             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3049         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
 3050         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 3051         /* ...and wait for it to complete. */
 3052         fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
 3053 }
 3054 
 3055 static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
 3056 static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
 3057 static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
 3058 static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
 3059 static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
 3060 static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
 3061 static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE;
 3062 
 3063 #define UCODE(x)        x, sizeof(x)/sizeof(uint32_t)
 3064 
 3065 static const struct ucode {
 3066         uint32_t        revision;
 3067         uint32_t        *ucode;
 3068         int             length;
 3069         u_short         int_delay_offset;
 3070         u_short         bundle_max_offset;
 3071 } ucode_table[] = {
 3072         { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
 3073         { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
 3074         { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
 3075             D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
 3076         { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
 3077             D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
 3078         { FXP_REV_82550, UCODE(fxp_ucode_d102),
 3079             D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
 3080         { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
 3081             D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
 3082         { FXP_REV_82551_F, UCODE(fxp_ucode_d102e),
 3083             D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
 3084         { FXP_REV_82551_10, UCODE(fxp_ucode_d102e),
 3085             D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
 3086         { 0, NULL, 0, 0, 0 }
 3087 };
 3088 
 3089 static void
 3090 fxp_load_ucode(struct fxp_softc *sc)
 3091 {
 3092         const struct ucode *uc;
 3093         struct fxp_cb_ucode *cbp;
 3094         int i;
 3095 
 3096         if (sc->flags & FXP_FLAG_NO_UCODE)
 3097                 return;
 3098 
 3099         for (uc = ucode_table; uc->ucode != NULL; uc++)
 3100                 if (sc->revision == uc->revision)
 3101                         break;
 3102         if (uc->ucode == NULL)
 3103                 return;
 3104         cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
 3105         cbp->cb_status = 0;
 3106         cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
 3107         cbp->link_addr = 0xffffffff;            /* (no) next command */
 3108         for (i = 0; i < uc->length; i++)
 3109                 cbp->ucode[i] = htole32(uc->ucode[i]);
 3110         if (uc->int_delay_offset)
 3111                 *(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
 3112                     htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
 3113         if (uc->bundle_max_offset)
 3114                 *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
 3115                     htole16(sc->tunable_bundle_max);
 3116         /*
 3117          * Download the ucode to the chip.
 3118          */
 3119         fxp_scb_wait(sc);
 3120         bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
 3121             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3122         CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
 3123         fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
 3124         /* ...and wait for it to complete. */
 3125         fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
 3126         device_printf(sc->dev,
 3127             "Microcode loaded, int_delay: %d usec  bundle_max: %d\n",
 3128             sc->tunable_int_delay,
 3129             uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
 3130         sc->flags |= FXP_FLAG_UCODE;
 3131         bzero(cbp, FXP_TXCB_SZ);
 3132 }
 3133 
 3134 #define FXP_SYSCTL_STAT_ADD(c, h, n, p, d)      \
 3135         SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
 3136 
 3137 static void
 3138 fxp_sysctl_node(struct fxp_softc *sc)
 3139 {
 3140         struct sysctl_ctx_list *ctx;
 3141         struct sysctl_oid_list *child, *parent;
 3142         struct sysctl_oid *tree;
 3143         struct fxp_hwstats *hsp;
 3144 
 3145         ctx = device_get_sysctl_ctx(sc->dev);
 3146         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 3147 
 3148         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_delay",
 3149             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
 3150             &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
 3151             "FXP driver receive interrupt microcode bundling delay");
 3152         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "bundle_max",
 3153             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
 3154             &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
 3155             "FXP driver receive interrupt microcode bundle size limit");
 3156         SYSCTL_ADD_INT(ctx, child,OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
 3157             "FXP RNR events");
 3158 
 3159         /*
 3160          * Pull in device tunables.
 3161          */
 3162         sc->tunable_int_delay = TUNABLE_INT_DELAY;
 3163         sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
 3164         (void) resource_int_value(device_get_name(sc->dev),
 3165             device_get_unit(sc->dev), "int_delay", &sc->tunable_int_delay);
 3166         (void) resource_int_value(device_get_name(sc->dev),
 3167             device_get_unit(sc->dev), "bundle_max", &sc->tunable_bundle_max);
 3168         sc->rnr = 0;
 3169 
 3170         hsp = &sc->fxp_hwstats;
 3171         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
 3172             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "FXP statistics");
 3173         parent = SYSCTL_CHILDREN(tree);
 3174 
 3175         /* Rx MAC statistics. */
 3176         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
 3177             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
 3178         child = SYSCTL_CHILDREN(tree);
 3179         FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames",
 3180             &hsp->rx_good, "Good frames");
 3181         FXP_SYSCTL_STAT_ADD(ctx, child, "crc_errors",
 3182             &hsp->rx_crc_errors, "CRC errors");
 3183         FXP_SYSCTL_STAT_ADD(ctx, child, "alignment_errors",
 3184             &hsp->rx_alignment_errors, "Alignment errors");
 3185         FXP_SYSCTL_STAT_ADD(ctx, child, "rnr_errors",
 3186             &hsp->rx_rnr_errors, "RNR errors");
 3187         FXP_SYSCTL_STAT_ADD(ctx, child, "overrun_errors",
 3188             &hsp->rx_overrun_errors, "Overrun errors");
 3189         FXP_SYSCTL_STAT_ADD(ctx, child, "cdt_errors",
 3190             &hsp->rx_cdt_errors, "Collision detect errors");
 3191         FXP_SYSCTL_STAT_ADD(ctx, child, "shortframes",
 3192             &hsp->rx_shortframes, "Short frame errors");
 3193         if (sc->revision >= FXP_REV_82558_A4) {
 3194                 FXP_SYSCTL_STAT_ADD(ctx, child, "pause",
 3195                     &hsp->rx_pause, "Pause frames");
 3196                 FXP_SYSCTL_STAT_ADD(ctx, child, "controls",
 3197                     &hsp->rx_controls, "Unsupported control frames");
 3198         }
 3199         if (sc->revision >= FXP_REV_82559_A0)
 3200                 FXP_SYSCTL_STAT_ADD(ctx, child, "tco",
 3201                     &hsp->rx_tco, "TCO frames");
 3202 
 3203         /* Tx MAC statistics. */
 3204         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
 3205             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
 3206         child = SYSCTL_CHILDREN(tree);
 3207         FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames",
 3208             &hsp->tx_good, "Good frames");
 3209         FXP_SYSCTL_STAT_ADD(ctx, child, "maxcols",
 3210             &hsp->tx_maxcols, "Maximum collisions errors");
 3211         FXP_SYSCTL_STAT_ADD(ctx, child, "latecols",
 3212             &hsp->tx_latecols, "Late collisions errors");
 3213         FXP_SYSCTL_STAT_ADD(ctx, child, "underruns",
 3214             &hsp->tx_underruns, "Underrun errors");
 3215         FXP_SYSCTL_STAT_ADD(ctx, child, "lostcrs",
 3216             &hsp->tx_lostcrs, "Lost carrier sense");
 3217         FXP_SYSCTL_STAT_ADD(ctx, child, "deffered",
 3218             &hsp->tx_deffered, "Deferred");
 3219         FXP_SYSCTL_STAT_ADD(ctx, child, "single_collisions",
 3220             &hsp->tx_single_collisions, "Single collisions");
 3221         FXP_SYSCTL_STAT_ADD(ctx, child, "multiple_collisions",
 3222             &hsp->tx_multiple_collisions, "Multiple collisions");
 3223         FXP_SYSCTL_STAT_ADD(ctx, child, "total_collisions",
 3224             &hsp->tx_total_collisions, "Total collisions");
 3225         if (sc->revision >= FXP_REV_82558_A4)
 3226                 FXP_SYSCTL_STAT_ADD(ctx, child, "pause",
 3227                     &hsp->tx_pause, "Pause frames");
 3228         if (sc->revision >= FXP_REV_82559_A0)
 3229                 FXP_SYSCTL_STAT_ADD(ctx, child, "tco",
 3230                     &hsp->tx_tco, "TCO frames");
 3231 }
 3232 
 3233 #undef FXP_SYSCTL_STAT_ADD
 3234 
 3235 static int
 3236 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3237 {
 3238         int error, value;
 3239 
 3240         value = *(int *)arg1;
 3241         error = sysctl_handle_int(oidp, &value, 0, req);
 3242         if (error || !req->newptr)
 3243                 return (error);
 3244         if (value < low || value > high)
 3245                 return (EINVAL);
 3246         *(int *)arg1 = value;
 3247         return (0);
 3248 }
 3249 
 3250 /*
 3251  * Interrupt delay is expressed in microseconds, a multiplier is used
 3252  * to convert this to the appropriate clock ticks before using.
 3253  */
 3254 static int
 3255 sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
 3256 {
 3257 
 3258         return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
 3259 }
 3260 
 3261 static int
 3262 sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
 3263 {
 3264 
 3265         return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
 3266 }

Cache object: ecdf21fd57cb816ac8bc03bb5e5160d6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.