The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/nfe/if_nfe.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
    5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
    6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
    7  *
    8  * Permission to use, copy, modify, and distribute this software for any
    9  * purpose with or without fee is hereby granted, provided that the above
   10  * copyright notice and this permission notice appear in all copies.
   11  *
   12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   19  */
   20 
   21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
   22 
   23 #include <sys/cdefs.h>
   24 __FBSDID("$FreeBSD: releng/8.4/sys/dev/nfe/if_nfe.c 230714 2012-01-29 01:22:48Z marius $");
   25 
   26 #ifdef HAVE_KERNEL_OPTION_HEADERS
   27 #include "opt_device_polling.h"
   28 #endif
   29 
   30 #include <sys/param.h>
   31 #include <sys/endian.h>
   32 #include <sys/systm.h>
   33 #include <sys/sockio.h>
   34 #include <sys/mbuf.h>
   35 #include <sys/malloc.h>
   36 #include <sys/module.h>
   37 #include <sys/kernel.h>
   38 #include <sys/queue.h>
   39 #include <sys/socket.h>
   40 #include <sys/sysctl.h>
   41 #include <sys/taskqueue.h>
   42 
   43 #include <net/if.h>
   44 #include <net/if_arp.h>
   45 #include <net/ethernet.h>
   46 #include <net/if_dl.h>
   47 #include <net/if_media.h>
   48 #include <net/if_types.h>
   49 #include <net/if_vlan_var.h>
   50 
   51 #include <net/bpf.h>
   52 
   53 #include <machine/bus.h>
   54 #include <machine/resource.h>
   55 #include <sys/bus.h>
   56 #include <sys/rman.h>
   57 
   58 #include <dev/mii/mii.h>
   59 #include <dev/mii/miivar.h>
   60 
   61 #include <dev/pci/pcireg.h>
   62 #include <dev/pci/pcivar.h>
   63 
   64 #include <dev/nfe/if_nfereg.h>
   65 #include <dev/nfe/if_nfevar.h>
   66 
   67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
   68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
   69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
   70 
   71 /* "device miibus" required.  See GENERIC if you get errors here. */
   72 #include "miibus_if.h"
   73 
   74 static int  nfe_probe(device_t);
   75 static int  nfe_attach(device_t);
   76 static int  nfe_detach(device_t);
   77 static int  nfe_suspend(device_t);
   78 static int  nfe_resume(device_t);
   79 static int nfe_shutdown(device_t);
   80 static int  nfe_can_use_msix(struct nfe_softc *);
   81 static void nfe_power(struct nfe_softc *);
   82 static int  nfe_miibus_readreg(device_t, int, int);
   83 static int  nfe_miibus_writereg(device_t, int, int, int);
   84 static void nfe_miibus_statchg(device_t);
   85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
   86 static void nfe_set_intr(struct nfe_softc *);
   87 static __inline void nfe_enable_intr(struct nfe_softc *);
   88 static __inline void nfe_disable_intr(struct nfe_softc *);
   89 static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
   90 static void nfe_alloc_msix(struct nfe_softc *, int);
   91 static int nfe_intr(void *);
   92 static void nfe_int_task(void *, int);
   93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
   94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
   95 static int nfe_newbuf(struct nfe_softc *, int);
   96 static int nfe_jnewbuf(struct nfe_softc *, int);
   97 static int  nfe_rxeof(struct nfe_softc *, int, int *);
   98 static int  nfe_jrxeof(struct nfe_softc *, int, int *);
   99 static void nfe_txeof(struct nfe_softc *);
  100 static int  nfe_encap(struct nfe_softc *, struct mbuf **);
  101 static void nfe_setmulti(struct nfe_softc *);
  102 static void nfe_start(struct ifnet *);
  103 static void nfe_start_locked(struct ifnet *);
  104 static void nfe_watchdog(struct ifnet *);
  105 static void nfe_init(void *);
  106 static void nfe_init_locked(void *);
  107 static void nfe_stop(struct ifnet *);
  108 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  110 static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  111 static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  114 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  117 static int  nfe_ifmedia_upd(struct ifnet *);
  118 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  119 static void nfe_tick(void *);
  120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
  121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
  122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
  123 
  124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
  126 static void nfe_sysctl_node(struct nfe_softc *);
  127 static void nfe_stats_clear(struct nfe_softc *);
  128 static void nfe_stats_update(struct nfe_softc *);
  129 static void nfe_set_linkspeed(struct nfe_softc *);
  130 static void nfe_set_wol(struct nfe_softc *);
  131 
  132 #ifdef NFE_DEBUG
  133 static int nfedebug = 0;
  134 #define DPRINTF(sc, ...)        do {                            \
  135         if (nfedebug)                                           \
  136                 device_printf((sc)->nfe_dev, __VA_ARGS__);      \
  137 } while (0)
  138 #define DPRINTFN(sc, n, ...)    do {                            \
  139         if (nfedebug >= (n))                                    \
  140                 device_printf((sc)->nfe_dev, __VA_ARGS__);      \
  141 } while (0)
  142 #else
  143 #define DPRINTF(sc, ...)
  144 #define DPRINTFN(sc, n, ...)
  145 #endif
  146 
  147 #define NFE_LOCK(_sc)           mtx_lock(&(_sc)->nfe_mtx)
  148 #define NFE_UNLOCK(_sc)         mtx_unlock(&(_sc)->nfe_mtx)
  149 #define NFE_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
  150 
  151 /* Tunables. */
  152 static int msi_disable = 0;
  153 static int msix_disable = 0;
  154 static int jumbo_disable = 0;
  155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
  156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
  157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
  158 
  159 static device_method_t nfe_methods[] = {
  160         /* Device interface */
  161         DEVMETHOD(device_probe,         nfe_probe),
  162         DEVMETHOD(device_attach,        nfe_attach),
  163         DEVMETHOD(device_detach,        nfe_detach),
  164         DEVMETHOD(device_suspend,       nfe_suspend),
  165         DEVMETHOD(device_resume,        nfe_resume),
  166         DEVMETHOD(device_shutdown,      nfe_shutdown),
  167 
  168         /* MII interface */
  169         DEVMETHOD(miibus_readreg,       nfe_miibus_readreg),
  170         DEVMETHOD(miibus_writereg,      nfe_miibus_writereg),
  171         DEVMETHOD(miibus_statchg,       nfe_miibus_statchg),
  172 
  173         DEVMETHOD_END
  174 };
  175 
  176 static driver_t nfe_driver = {
  177         "nfe",
  178         nfe_methods,
  179         sizeof(struct nfe_softc)
  180 };
  181 
  182 static devclass_t nfe_devclass;
  183 
  184 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
  185 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
  186 
  187 static struct nfe_type nfe_devs[] = {
  188         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
  189             "NVIDIA nForce MCP Networking Adapter"},
  190         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
  191             "NVIDIA nForce2 MCP2 Networking Adapter"},
  192         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
  193             "NVIDIA nForce2 400 MCP4 Networking Adapter"},
  194         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
  195             "NVIDIA nForce2 400 MCP5 Networking Adapter"},
  196         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
  197             "NVIDIA nForce3 MCP3 Networking Adapter"},
  198         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
  199             "NVIDIA nForce3 250 MCP6 Networking Adapter"},
  200         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
  201             "NVIDIA nForce3 MCP7 Networking Adapter"},
  202         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
  203             "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
  204         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
  205             "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
  206         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
  207             "NVIDIA nForce MCP04 Networking Adapter"},          /* MCP10 */
  208         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
  209             "NVIDIA nForce MCP04 Networking Adapter"},          /* MCP11 */
  210         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
  211             "NVIDIA nForce 430 MCP12 Networking Adapter"},
  212         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
  213             "NVIDIA nForce 430 MCP13 Networking Adapter"},
  214         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
  215             "NVIDIA nForce MCP55 Networking Adapter"},
  216         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
  217             "NVIDIA nForce MCP55 Networking Adapter"},
  218         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
  219             "NVIDIA nForce MCP61 Networking Adapter"},
  220         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
  221             "NVIDIA nForce MCP61 Networking Adapter"},
  222         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
  223             "NVIDIA nForce MCP61 Networking Adapter"},
  224         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
  225             "NVIDIA nForce MCP61 Networking Adapter"},
  226         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
  227             "NVIDIA nForce MCP65 Networking Adapter"},
  228         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
  229             "NVIDIA nForce MCP65 Networking Adapter"},
  230         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
  231             "NVIDIA nForce MCP65 Networking Adapter"},
  232         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
  233             "NVIDIA nForce MCP65 Networking Adapter"},
  234         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
  235             "NVIDIA nForce MCP67 Networking Adapter"},
  236         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
  237             "NVIDIA nForce MCP67 Networking Adapter"},
  238         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
  239             "NVIDIA nForce MCP67 Networking Adapter"},
  240         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
  241             "NVIDIA nForce MCP67 Networking Adapter"},
  242         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
  243             "NVIDIA nForce MCP73 Networking Adapter"},
  244         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
  245             "NVIDIA nForce MCP73 Networking Adapter"},
  246         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
  247             "NVIDIA nForce MCP73 Networking Adapter"},
  248         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
  249             "NVIDIA nForce MCP73 Networking Adapter"},
  250         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
  251             "NVIDIA nForce MCP77 Networking Adapter"},
  252         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
  253             "NVIDIA nForce MCP77 Networking Adapter"},
  254         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
  255             "NVIDIA nForce MCP77 Networking Adapter"},
  256         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
  257             "NVIDIA nForce MCP77 Networking Adapter"},
  258         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
  259             "NVIDIA nForce MCP79 Networking Adapter"},
  260         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
  261             "NVIDIA nForce MCP79 Networking Adapter"},
  262         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
  263             "NVIDIA nForce MCP79 Networking Adapter"},
  264         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
  265             "NVIDIA nForce MCP79 Networking Adapter"},
  266         {0, 0, NULL}
  267 };
  268 
  269 
  270 /* Probe for supported hardware ID's */
  271 static int
  272 nfe_probe(device_t dev)
  273 {
  274         struct nfe_type *t;
  275 
  276         t = nfe_devs;
  277         /* Check for matching PCI DEVICE ID's */
  278         while (t->name != NULL) {
  279                 if ((pci_get_vendor(dev) == t->vid_id) &&
  280                     (pci_get_device(dev) == t->dev_id)) {
  281                         device_set_desc(dev, t->name);
  282                         return (BUS_PROBE_DEFAULT);
  283                 }
  284                 t++;
  285         }
  286 
  287         return (ENXIO);
  288 }
  289 
  290 static void
  291 nfe_alloc_msix(struct nfe_softc *sc, int count)
  292 {
  293         int rid;
  294 
  295         rid = PCIR_BAR(2);
  296         sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
  297             &rid, RF_ACTIVE);
  298         if (sc->nfe_msix_res == NULL) {
  299                 device_printf(sc->nfe_dev,
  300                     "couldn't allocate MSIX table resource\n");
  301                 return;
  302         }
  303         rid = PCIR_BAR(3);
  304         sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
  305             SYS_RES_MEMORY, &rid, RF_ACTIVE);
  306         if (sc->nfe_msix_pba_res == NULL) {
  307                 device_printf(sc->nfe_dev,
  308                     "couldn't allocate MSIX PBA resource\n");
  309                 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
  310                     sc->nfe_msix_res);
  311                 sc->nfe_msix_res = NULL;
  312                 return;
  313         }
  314 
  315         if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
  316                 if (count == NFE_MSI_MESSAGES) {
  317                         if (bootverbose)
  318                                 device_printf(sc->nfe_dev,
  319                                     "Using %d MSIX messages\n", count);
  320                         sc->nfe_msix = 1;
  321                 } else {
  322                         if (bootverbose)
  323                                 device_printf(sc->nfe_dev,
  324                                     "couldn't allocate MSIX\n");
  325                         pci_release_msi(sc->nfe_dev);
  326                         bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
  327                             PCIR_BAR(3), sc->nfe_msix_pba_res);
  328                         bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
  329                             PCIR_BAR(2), sc->nfe_msix_res);
  330                         sc->nfe_msix_pba_res = NULL;
  331                         sc->nfe_msix_res = NULL;
  332                 }
  333         }
  334 }
  335 
  336 static int
  337 nfe_attach(device_t dev)
  338 {
  339         struct nfe_softc *sc;
  340         struct ifnet *ifp;
  341         bus_addr_t dma_addr_max;
  342         int error = 0, i, msic, reg, rid;
  343 
  344         sc = device_get_softc(dev);
  345         sc->nfe_dev = dev;
  346 
  347         mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  348             MTX_DEF);
  349         callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
  350 
  351         pci_enable_busmaster(dev);
  352 
  353         rid = PCIR_BAR(0);
  354         sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  355             RF_ACTIVE);
  356         if (sc->nfe_res[0] == NULL) {
  357                 device_printf(dev, "couldn't map memory resources\n");
  358                 mtx_destroy(&sc->nfe_mtx);
  359                 return (ENXIO);
  360         }
  361 
  362         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
  363                 uint16_t v, width;
  364 
  365                 v = pci_read_config(dev, reg + 0x08, 2);
  366                 /* Change max. read request size to 4096. */
  367                 v &= ~(7 << 12);
  368                 v |= (5 << 12);
  369                 pci_write_config(dev, reg + 0x08, v, 2);
  370 
  371                 v = pci_read_config(dev, reg + 0x0c, 2);
  372                 /* link capability */
  373                 v = (v >> 4) & 0x0f;
  374                 width = pci_read_config(dev, reg + 0x12, 2);
  375                 /* negotiated link width */
  376                 width = (width >> 4) & 0x3f;
  377                 if (v != width)
  378                         device_printf(sc->nfe_dev,
  379                             "warning, negotiated width of link(x%d) != "
  380                             "max. width of link(x%d)\n", width, v);
  381         }
  382 
  383         if (nfe_can_use_msix(sc) == 0) {
  384                 device_printf(sc->nfe_dev,
  385                     "MSI/MSI-X capability black-listed, will use INTx\n"); 
  386                 msix_disable = 1;
  387                 msi_disable = 1;
  388         }
  389 
  390         /* Allocate interrupt */
  391         if (msix_disable == 0 || msi_disable == 0) {
  392                 if (msix_disable == 0 &&
  393                     (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
  394                         nfe_alloc_msix(sc, msic);
  395                 if (msi_disable == 0 && sc->nfe_msix == 0 &&
  396                     (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
  397                     pci_alloc_msi(dev, &msic) == 0) {
  398                         if (msic == NFE_MSI_MESSAGES) {
  399                                 if (bootverbose)
  400                                         device_printf(dev,
  401                                             "Using %d MSI messages\n", msic);
  402                                 sc->nfe_msi = 1;
  403                         } else
  404                                 pci_release_msi(dev);
  405                 }
  406         }
  407 
  408         if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
  409                 rid = 0;
  410                 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  411                     RF_SHAREABLE | RF_ACTIVE);
  412                 if (sc->nfe_irq[0] == NULL) {
  413                         device_printf(dev, "couldn't allocate IRQ resources\n");
  414                         error = ENXIO;
  415                         goto fail;
  416                 }
  417         } else {
  418                 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
  419                         sc->nfe_irq[i] = bus_alloc_resource_any(dev,
  420                             SYS_RES_IRQ, &rid, RF_ACTIVE);
  421                         if (sc->nfe_irq[i] == NULL) {
  422                                 device_printf(dev,
  423                                     "couldn't allocate IRQ resources for "
  424                                     "message %d\n", rid);
  425                                 error = ENXIO;
  426                                 goto fail;
  427                         }
  428                 }
  429                 /* Map interrupts to vector 0. */
  430                 if (sc->nfe_msix != 0) {
  431                         NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
  432                         NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
  433                 } else if (sc->nfe_msi != 0) {
  434                         NFE_WRITE(sc, NFE_MSI_MAP0, 0);
  435                         NFE_WRITE(sc, NFE_MSI_MAP1, 0);
  436                 }
  437         }
  438 
  439         /* Set IRQ status/mask register. */
  440         sc->nfe_irq_status = NFE_IRQ_STATUS;
  441         sc->nfe_irq_mask = NFE_IRQ_MASK;
  442         sc->nfe_intrs = NFE_IRQ_WANTED;
  443         sc->nfe_nointrs = 0;
  444         if (sc->nfe_msix != 0) {
  445                 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
  446                 sc->nfe_nointrs = NFE_IRQ_WANTED;
  447         } else if (sc->nfe_msi != 0) {
  448                 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
  449                 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
  450         }
  451 
  452         sc->nfe_devid = pci_get_device(dev);
  453         sc->nfe_revid = pci_get_revid(dev);
  454         sc->nfe_flags = 0;
  455 
  456         switch (sc->nfe_devid) {
  457         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
  458         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
  459         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
  460         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
  461                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
  462                 break;
  463         case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
  464         case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
  465                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
  466                 break;
  467         case PCI_PRODUCT_NVIDIA_CK804_LAN1:
  468         case PCI_PRODUCT_NVIDIA_CK804_LAN2:
  469         case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
  470         case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
  471                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  472                     NFE_MIB_V1;
  473                 break;
  474         case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
  475         case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
  476                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  477                     NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
  478                 break;
  479 
  480         case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
  481         case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
  482         case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
  483         case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
  484         case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
  485         case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
  486         case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
  487         case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
  488         case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
  489         case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
  490         case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
  491         case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
  492                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
  493                     NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
  494                 break;
  495         case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
  496         case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
  497         case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
  498         case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
  499                 /* XXX flow control */
  500                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
  501                     NFE_CORRECT_MACADDR | NFE_MIB_V3;
  502                 break;
  503         case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
  504         case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
  505         case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
  506         case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
  507                 /* XXX flow control */
  508                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  509                     NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
  510                 break;
  511         case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
  512         case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
  513         case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
  514         case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
  515                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
  516                     NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
  517                     NFE_MIB_V2;
  518                 break;
  519         }
  520 
  521         nfe_power(sc);
  522         /* Check for reversed ethernet address */
  523         if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
  524                 sc->nfe_flags |= NFE_CORRECT_MACADDR;
  525         nfe_get_macaddr(sc, sc->eaddr);
  526         /*
  527          * Allocate the parent bus DMA tag appropriate for PCI.
  528          */
  529         dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
  530         if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
  531                 dma_addr_max = NFE_DMA_MAXADDR;
  532         error = bus_dma_tag_create(
  533             bus_get_dma_tag(sc->nfe_dev),       /* parent */
  534             1, 0,                               /* alignment, boundary */
  535             dma_addr_max,                       /* lowaddr */
  536             BUS_SPACE_MAXADDR,                  /* highaddr */
  537             NULL, NULL,                         /* filter, filterarg */
  538             BUS_SPACE_MAXSIZE_32BIT, 0,         /* maxsize, nsegments */
  539             BUS_SPACE_MAXSIZE_32BIT,            /* maxsegsize */
  540             0,                                  /* flags */
  541             NULL, NULL,                         /* lockfunc, lockarg */
  542             &sc->nfe_parent_tag);
  543         if (error)
  544                 goto fail;
  545 
  546         ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
  547         if (ifp == NULL) {
  548                 device_printf(dev, "can not if_alloc()\n");
  549                 error = ENOSPC;
  550                 goto fail;
  551         }
  552 
  553         /*
  554          * Allocate Tx and Rx rings.
  555          */
  556         if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
  557                 goto fail;
  558 
  559         if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
  560                 goto fail;
  561 
  562         nfe_alloc_jrx_ring(sc, &sc->jrxq);
  563         /* Create sysctl node. */
  564         nfe_sysctl_node(sc);
  565 
  566         ifp->if_softc = sc;
  567         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  568         ifp->if_mtu = ETHERMTU;
  569         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  570         ifp->if_ioctl = nfe_ioctl;
  571         ifp->if_start = nfe_start;
  572         ifp->if_hwassist = 0;
  573         ifp->if_capabilities = 0;
  574         ifp->if_init = nfe_init;
  575         IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
  576         ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
  577         IFQ_SET_READY(&ifp->if_snd);
  578 
  579         if (sc->nfe_flags & NFE_HW_CSUM) {
  580                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
  581                 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
  582         }
  583         ifp->if_capenable = ifp->if_capabilities;
  584 
  585         sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
  586         /* VLAN capability setup. */
  587         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  588         if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
  589                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
  590                 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
  591                         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
  592                             IFCAP_VLAN_HWTSO;
  593         }
  594 
  595         if (pci_find_extcap(dev, PCIY_PMG, &reg) == 0)
  596                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
  597         ifp->if_capenable = ifp->if_capabilities;
  598 
  599         /*
  600          * Tell the upper layer(s) we support long frames.
  601          * Must appear after the call to ether_ifattach() because
  602          * ether_ifattach() sets ifi_hdrlen to the default value.
  603          */
  604         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  605 
  606 #ifdef DEVICE_POLLING
  607         ifp->if_capabilities |= IFCAP_POLLING;
  608 #endif
  609 
  610         /* Do MII setup */
  611         error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
  612             nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
  613             MIIF_DOPAUSE | MIIF_FORCEPAUSE);
  614         if (error != 0) {
  615                 device_printf(dev, "attaching PHYs failed\n");
  616                 goto fail;
  617         }
  618         ether_ifattach(ifp, sc->eaddr);
  619 
  620         TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
  621         sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
  622             taskqueue_thread_enqueue, &sc->nfe_tq);
  623         taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
  624             device_get_nameunit(sc->nfe_dev));
  625         error = 0;
  626         if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
  627                 error = bus_setup_intr(dev, sc->nfe_irq[0],
  628                     INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
  629                     &sc->nfe_intrhand[0]);
  630         } else {
  631                 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
  632                         error = bus_setup_intr(dev, sc->nfe_irq[i],
  633                             INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
  634                             &sc->nfe_intrhand[i]);
  635                         if (error != 0)
  636                                 break;
  637                 }
  638         }
  639         if (error) {
  640                 device_printf(dev, "couldn't set up irq\n");
  641                 taskqueue_free(sc->nfe_tq);
  642                 sc->nfe_tq = NULL;
  643                 ether_ifdetach(ifp);
  644                 goto fail;
  645         }
  646 
  647 fail:
  648         if (error)
  649                 nfe_detach(dev);
  650 
  651         return (error);
  652 }
  653 
  654 
  655 static int
  656 nfe_detach(device_t dev)
  657 {
  658         struct nfe_softc *sc;
  659         struct ifnet *ifp;
  660         uint8_t eaddr[ETHER_ADDR_LEN];
  661         int i, rid;
  662 
  663         sc = device_get_softc(dev);
  664         KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
  665         ifp = sc->nfe_ifp;
  666 
  667 #ifdef DEVICE_POLLING
  668         if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
  669                 ether_poll_deregister(ifp);
  670 #endif
  671         if (device_is_attached(dev)) {
  672                 NFE_LOCK(sc);
  673                 nfe_stop(ifp);
  674                 ifp->if_flags &= ~IFF_UP;
  675                 NFE_UNLOCK(sc);
  676                 callout_drain(&sc->nfe_stat_ch);
  677                 ether_ifdetach(ifp);
  678         }
  679 
  680         if (ifp) {
  681                 /* restore ethernet address */
  682                 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
  683                         for (i = 0; i < ETHER_ADDR_LEN; i++) {
  684                                 eaddr[i] = sc->eaddr[5 - i];
  685                         }
  686                 } else
  687                         bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
  688                 nfe_set_macaddr(sc, eaddr);
  689                 if_free(ifp);
  690         }
  691         if (sc->nfe_miibus)
  692                 device_delete_child(dev, sc->nfe_miibus);
  693         bus_generic_detach(dev);
  694         if (sc->nfe_tq != NULL) {
  695                 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
  696                 taskqueue_free(sc->nfe_tq);
  697                 sc->nfe_tq = NULL;
  698         }
  699 
  700         for (i = 0; i < NFE_MSI_MESSAGES; i++) {
  701                 if (sc->nfe_intrhand[i] != NULL) {
  702                         bus_teardown_intr(dev, sc->nfe_irq[i],
  703                             sc->nfe_intrhand[i]);
  704                         sc->nfe_intrhand[i] = NULL;
  705                 }
  706         }
  707 
  708         if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
  709                 if (sc->nfe_irq[0] != NULL)
  710                         bus_release_resource(dev, SYS_RES_IRQ, 0,
  711                             sc->nfe_irq[0]);
  712         } else {
  713                 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
  714                         if (sc->nfe_irq[i] != NULL) {
  715                                 bus_release_resource(dev, SYS_RES_IRQ, rid,
  716                                     sc->nfe_irq[i]);
  717                                 sc->nfe_irq[i] = NULL;
  718                         }
  719                 }
  720                 pci_release_msi(dev);
  721         }
  722         if (sc->nfe_msix_pba_res != NULL) {
  723                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
  724                     sc->nfe_msix_pba_res);
  725                 sc->nfe_msix_pba_res = NULL;
  726         }
  727         if (sc->nfe_msix_res != NULL) {
  728                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
  729                     sc->nfe_msix_res);
  730                 sc->nfe_msix_res = NULL;
  731         }
  732         if (sc->nfe_res[0] != NULL) {
  733                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
  734                     sc->nfe_res[0]);
  735                 sc->nfe_res[0] = NULL;
  736         }
  737 
  738         nfe_free_tx_ring(sc, &sc->txq);
  739         nfe_free_rx_ring(sc, &sc->rxq);
  740         nfe_free_jrx_ring(sc, &sc->jrxq);
  741 
  742         if (sc->nfe_parent_tag) {
  743                 bus_dma_tag_destroy(sc->nfe_parent_tag);
  744                 sc->nfe_parent_tag = NULL;
  745         }
  746 
  747         mtx_destroy(&sc->nfe_mtx);
  748 
  749         return (0);
  750 }
  751 
  752 
  753 static int
  754 nfe_suspend(device_t dev)
  755 {
  756         struct nfe_softc *sc;
  757 
  758         sc = device_get_softc(dev);
  759 
  760         NFE_LOCK(sc);
  761         nfe_stop(sc->nfe_ifp);
  762         nfe_set_wol(sc);
  763         sc->nfe_suspended = 1;
  764         NFE_UNLOCK(sc);
  765 
  766         return (0);
  767 }
  768 
  769 
  770 static int
  771 nfe_resume(device_t dev)
  772 {
  773         struct nfe_softc *sc;
  774         struct ifnet *ifp;
  775 
  776         sc = device_get_softc(dev);
  777 
  778         NFE_LOCK(sc);
  779         nfe_power(sc);
  780         ifp = sc->nfe_ifp;
  781         if (ifp->if_flags & IFF_UP)
  782                 nfe_init_locked(sc);
  783         sc->nfe_suspended = 0;
  784         NFE_UNLOCK(sc);
  785 
  786         return (0);
  787 }
  788 
  789 
  790 static int
  791 nfe_can_use_msix(struct nfe_softc *sc)
  792 {
  793         static struct msix_blacklist {
  794                 char    *maker;
  795                 char    *product;
  796         } msix_blacklists[] = {
  797                 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
  798         };
  799 
  800         struct msix_blacklist *mblp;
  801         char *maker, *product;
  802         int count, n, use_msix;
  803 
  804         /*
  805          * Search base board manufacturer and product name table
  806          * to see this system has a known MSI/MSI-X issue.
  807          */
  808         maker = getenv("smbios.planar.maker");
  809         product = getenv("smbios.planar.product");
  810         use_msix = 1;
  811         if (maker != NULL && product != NULL) {
  812                 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
  813                 mblp = msix_blacklists;
  814                 for (n = 0; n < count; n++) {
  815                         if (strcmp(maker, mblp->maker) == 0 &&
  816                             strcmp(product, mblp->product) == 0) {
  817                                 use_msix = 0;
  818                                 break;
  819                         }
  820                         mblp++;
  821                 }
  822         }
  823         if (maker != NULL)
  824                 freeenv(maker);
  825         if (product != NULL)
  826                 freeenv(product);
  827 
  828         return (use_msix);
  829 }
  830 
  831 
  832 /* Take PHY/NIC out of powerdown, from Linux */
  833 static void
  834 nfe_power(struct nfe_softc *sc)
  835 {
  836         uint32_t pwr;
  837 
  838         if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
  839                 return;
  840         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
  841         NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
  842         DELAY(100);
  843         NFE_WRITE(sc, NFE_MAC_RESET, 0);
  844         DELAY(100);
  845         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
  846         pwr = NFE_READ(sc, NFE_PWR2_CTL);
  847         pwr &= ~NFE_PWR2_WAKEUP_MASK;
  848         if (sc->nfe_revid >= 0xa3 &&
  849             (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
  850             sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
  851                 pwr |= NFE_PWR2_REVA3;
  852         NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
  853 }
  854 
  855 
  856 static void
  857 nfe_miibus_statchg(device_t dev)
  858 {
  859         struct nfe_softc *sc;
  860         struct mii_data *mii;
  861         struct ifnet *ifp;
  862         uint32_t rxctl, txctl;
  863 
  864         sc = device_get_softc(dev);
  865 
  866         mii = device_get_softc(sc->nfe_miibus);
  867         ifp = sc->nfe_ifp;
  868 
  869         sc->nfe_link = 0;
  870         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
  871             (IFM_ACTIVE | IFM_AVALID)) {
  872                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  873                 case IFM_10_T:
  874                 case IFM_100_TX:
  875                 case IFM_1000_T:
  876                         sc->nfe_link = 1;
  877                         break;
  878                 default:
  879                         break;
  880                 }
  881         }
  882 
  883         nfe_mac_config(sc, mii);
  884         txctl = NFE_READ(sc, NFE_TX_CTL);
  885         rxctl = NFE_READ(sc, NFE_RX_CTL);
  886         if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
  887                 txctl |= NFE_TX_START;
  888                 rxctl |= NFE_RX_START;
  889         } else {
  890                 txctl &= ~NFE_TX_START;
  891                 rxctl &= ~NFE_RX_START;
  892         }
  893         NFE_WRITE(sc, NFE_TX_CTL, txctl);
  894         NFE_WRITE(sc, NFE_RX_CTL, rxctl);
  895 }
  896 
  897 
  898 static void
  899 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
  900 {
  901         uint32_t link, misc, phy, seed;
  902         uint32_t val;
  903 
  904         NFE_LOCK_ASSERT(sc);
  905 
  906         phy = NFE_READ(sc, NFE_PHY_IFACE);
  907         phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
  908 
  909         seed = NFE_READ(sc, NFE_RNDSEED);
  910         seed &= ~NFE_SEED_MASK;
  911 
  912         misc = NFE_MISC1_MAGIC;
  913         link = NFE_MEDIA_SET;
  914 
  915         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
  916                 phy  |= NFE_PHY_HDX;    /* half-duplex */
  917                 misc |= NFE_MISC1_HDX;
  918         }
  919 
  920         switch (IFM_SUBTYPE(mii->mii_media_active)) {
  921         case IFM_1000_T:        /* full-duplex only */
  922                 link |= NFE_MEDIA_1000T;
  923                 seed |= NFE_SEED_1000T;
  924                 phy  |= NFE_PHY_1000T;
  925                 break;
  926         case IFM_100_TX:
  927                 link |= NFE_MEDIA_100TX;
  928                 seed |= NFE_SEED_100TX;
  929                 phy  |= NFE_PHY_100TX;
  930                 break;
  931         case IFM_10_T:
  932                 link |= NFE_MEDIA_10T;
  933                 seed |= NFE_SEED_10T;
  934                 break;
  935         }
  936 
  937         if ((phy & 0x10000000) != 0) {
  938                 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
  939                         val = NFE_R1_MAGIC_1000;
  940                 else
  941                         val = NFE_R1_MAGIC_10_100;
  942         } else
  943                 val = NFE_R1_MAGIC_DEFAULT;
  944         NFE_WRITE(sc, NFE_SETUP_R1, val);
  945 
  946         NFE_WRITE(sc, NFE_RNDSEED, seed);       /* XXX: gigabit NICs only? */
  947 
  948         NFE_WRITE(sc, NFE_PHY_IFACE, phy);
  949         NFE_WRITE(sc, NFE_MISC1, misc);
  950         NFE_WRITE(sc, NFE_LINKSPEED, link);
  951 
  952         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  953                 /* It seems all hardwares supports Rx pause frames. */
  954                 val = NFE_READ(sc, NFE_RXFILTER);
  955                 if ((IFM_OPTIONS(mii->mii_media_active) &
  956                     IFM_ETH_RXPAUSE) != 0)
  957                         val |= NFE_PFF_RX_PAUSE;
  958                 else
  959                         val &= ~NFE_PFF_RX_PAUSE;
  960                 NFE_WRITE(sc, NFE_RXFILTER, val);
  961                 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
  962                         val = NFE_READ(sc, NFE_MISC1);
  963                         if ((IFM_OPTIONS(mii->mii_media_active) &
  964                             IFM_ETH_TXPAUSE) != 0) {
  965                                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  966                                     NFE_TX_PAUSE_FRAME_ENABLE);
  967                                 val |= NFE_MISC1_TX_PAUSE;
  968                         } else {
  969                                 val &= ~NFE_MISC1_TX_PAUSE;
  970                                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  971                                     NFE_TX_PAUSE_FRAME_DISABLE);
  972                         }
  973                         NFE_WRITE(sc, NFE_MISC1, val);
  974                 }
  975         } else {
  976                 /* disable rx/tx pause frames */
  977                 val = NFE_READ(sc, NFE_RXFILTER);
  978                 val &= ~NFE_PFF_RX_PAUSE;
  979                 NFE_WRITE(sc, NFE_RXFILTER, val);
  980                 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
  981                         NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  982                             NFE_TX_PAUSE_FRAME_DISABLE);
  983                         val = NFE_READ(sc, NFE_MISC1);
  984                         val &= ~NFE_MISC1_TX_PAUSE;
  985                         NFE_WRITE(sc, NFE_MISC1, val);
  986                 }
  987         }
  988 }
  989 
  990 
  991 static int
  992 nfe_miibus_readreg(device_t dev, int phy, int reg)
  993 {
  994         struct nfe_softc *sc = device_get_softc(dev);
  995         uint32_t val;
  996         int ntries;
  997 
  998         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
  999 
 1000         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
 1001                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
 1002                 DELAY(100);
 1003         }
 1004 
 1005         NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
 1006 
 1007         for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
 1008                 DELAY(100);
 1009                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
 1010                         break;
 1011         }
 1012         if (ntries == NFE_TIMEOUT) {
 1013                 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
 1014                 return 0;
 1015         }
 1016 
 1017         if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
 1018                 DPRINTFN(sc, 2, "could not read PHY\n");
 1019                 return 0;
 1020         }
 1021 
 1022         val = NFE_READ(sc, NFE_PHY_DATA);
 1023         if (val != 0xffffffff && val != 0)
 1024                 sc->mii_phyaddr = phy;
 1025 
 1026         DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
 1027 
 1028         return (val);
 1029 }
 1030 
 1031 
 1032 static int
 1033 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
 1034 {
 1035         struct nfe_softc *sc = device_get_softc(dev);
 1036         uint32_t ctl;
 1037         int ntries;
 1038 
 1039         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 1040 
 1041         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
 1042                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
 1043                 DELAY(100);
 1044         }
 1045 
 1046         NFE_WRITE(sc, NFE_PHY_DATA, val);
 1047         ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
 1048         NFE_WRITE(sc, NFE_PHY_CTL, ctl);
 1049 
 1050         for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
 1051                 DELAY(100);
 1052                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
 1053                         break;
 1054         }
 1055 #ifdef NFE_DEBUG
 1056         if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
 1057                 device_printf(sc->nfe_dev, "could not write to PHY\n");
 1058 #endif
 1059         return (0);
 1060 }
 1061 
 1062 struct nfe_dmamap_arg {
 1063         bus_addr_t nfe_busaddr;
 1064 };
 1065 
 1066 static int
 1067 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
 1068 {
 1069         struct nfe_dmamap_arg ctx;
 1070         struct nfe_rx_data *data;
 1071         void *desc;
 1072         int i, error, descsize;
 1073 
 1074         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1075                 desc = ring->desc64;
 1076                 descsize = sizeof (struct nfe_desc64);
 1077         } else {
 1078                 desc = ring->desc32;
 1079                 descsize = sizeof (struct nfe_desc32);
 1080         }
 1081 
 1082         ring->cur = ring->next = 0;
 1083 
 1084         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1085             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
 1086             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1087             BUS_SPACE_MAXADDR,                  /* highaddr */
 1088             NULL, NULL,                         /* filter, filterarg */
 1089             NFE_RX_RING_COUNT * descsize, 1,    /* maxsize, nsegments */
 1090             NFE_RX_RING_COUNT * descsize,       /* maxsegsize */
 1091             0,                                  /* flags */
 1092             NULL, NULL,                         /* lockfunc, lockarg */
 1093             &ring->rx_desc_tag);
 1094         if (error != 0) {
 1095                 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
 1096                 goto fail;
 1097         }
 1098 
 1099         /* allocate memory to desc */
 1100         error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
 1101             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
 1102         if (error != 0) {
 1103                 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
 1104                 goto fail;
 1105         }
 1106         if (sc->nfe_flags & NFE_40BIT_ADDR)
 1107                 ring->desc64 = desc;
 1108         else
 1109                 ring->desc32 = desc;
 1110 
 1111         /* map desc to device visible address space */
 1112         ctx.nfe_busaddr = 0;
 1113         error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
 1114             NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
 1115         if (error != 0) {
 1116                 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
 1117                 goto fail;
 1118         }
 1119         ring->physaddr = ctx.nfe_busaddr;
 1120 
 1121         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1122             1, 0,                       /* alignment, boundary */
 1123             BUS_SPACE_MAXADDR,          /* lowaddr */
 1124             BUS_SPACE_MAXADDR,          /* highaddr */
 1125             NULL, NULL,                 /* filter, filterarg */
 1126             MCLBYTES, 1,                /* maxsize, nsegments */
 1127             MCLBYTES,                   /* maxsegsize */
 1128             0,                          /* flags */
 1129             NULL, NULL,                 /* lockfunc, lockarg */
 1130             &ring->rx_data_tag);
 1131         if (error != 0) {
 1132                 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
 1133                 goto fail;
 1134         }
 1135 
 1136         error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
 1137         if (error != 0) {
 1138                 device_printf(sc->nfe_dev,
 1139                     "could not create Rx DMA spare map\n");
 1140                 goto fail;
 1141         }
 1142 
 1143         /*
 1144          * Pre-allocate Rx buffers and populate Rx ring.
 1145          */
 1146         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 1147                 data = &sc->rxq.data[i];
 1148                 data->rx_data_map = NULL;
 1149                 data->m = NULL;
 1150                 error = bus_dmamap_create(ring->rx_data_tag, 0,
 1151                     &data->rx_data_map);
 1152                 if (error != 0) {
 1153                         device_printf(sc->nfe_dev,
 1154                             "could not create Rx DMA map\n");
 1155                         goto fail;
 1156                 }
 1157         }
 1158 
 1159 fail:
 1160         return (error);
 1161 }
 1162 
 1163 
 1164 static void
 1165 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
 1166 {
 1167         struct nfe_dmamap_arg ctx;
 1168         struct nfe_rx_data *data;
 1169         void *desc;
 1170         int i, error, descsize;
 1171 
 1172         if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
 1173                 return;
 1174         if (jumbo_disable != 0) {
 1175                 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
 1176                 sc->nfe_jumbo_disable = 1;
 1177                 return;
 1178         }
 1179 
 1180         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1181                 desc = ring->jdesc64;
 1182                 descsize = sizeof (struct nfe_desc64);
 1183         } else {
 1184                 desc = ring->jdesc32;
 1185                 descsize = sizeof (struct nfe_desc32);
 1186         }
 1187 
 1188         ring->jcur = ring->jnext = 0;
 1189 
 1190         /* Create DMA tag for jumbo Rx ring. */
 1191         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1192             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
 1193             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1194             BUS_SPACE_MAXADDR,                  /* highaddr */
 1195             NULL, NULL,                         /* filter, filterarg */
 1196             NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
 1197             1,                                  /* nsegments */
 1198             NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
 1199             0,                                  /* flags */
 1200             NULL, NULL,                         /* lockfunc, lockarg */
 1201             &ring->jrx_desc_tag);
 1202         if (error != 0) {
 1203                 device_printf(sc->nfe_dev,
 1204                     "could not create jumbo ring DMA tag\n");
 1205                 goto fail;
 1206         }
 1207 
 1208         /* Create DMA tag for jumbo Rx buffers. */
 1209         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1210             1, 0,                               /* alignment, boundary */
 1211             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1212             BUS_SPACE_MAXADDR,                  /* highaddr */
 1213             NULL, NULL,                         /* filter, filterarg */
 1214             MJUM9BYTES,                         /* maxsize */
 1215             1,                                  /* nsegments */
 1216             MJUM9BYTES,                         /* maxsegsize */
 1217             0,                                  /* flags */
 1218             NULL, NULL,                         /* lockfunc, lockarg */
 1219             &ring->jrx_data_tag);
 1220         if (error != 0) {
 1221                 device_printf(sc->nfe_dev,
 1222                     "could not create jumbo Rx buffer DMA tag\n");
 1223                 goto fail;
 1224         }
 1225 
 1226         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 1227         error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
 1228             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
 1229         if (error != 0) {
 1230                 device_printf(sc->nfe_dev,
 1231                     "could not allocate DMA'able memory for jumbo Rx ring\n");
 1232                 goto fail;
 1233         }
 1234         if (sc->nfe_flags & NFE_40BIT_ADDR)
 1235                 ring->jdesc64 = desc;
 1236         else
 1237                 ring->jdesc32 = desc;
 1238 
 1239         ctx.nfe_busaddr = 0;
 1240         error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
 1241             NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
 1242         if (error != 0) {
 1243                 device_printf(sc->nfe_dev,
 1244                     "could not load DMA'able memory for jumbo Rx ring\n");
 1245                 goto fail;
 1246         }
 1247         ring->jphysaddr = ctx.nfe_busaddr;
 1248 
 1249         /* Create DMA maps for jumbo Rx buffers. */
 1250         error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
 1251         if (error != 0) {
 1252                 device_printf(sc->nfe_dev,
 1253                     "could not create jumbo Rx DMA spare map\n");
 1254                 goto fail;
 1255         }
 1256 
 1257         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 1258                 data = &sc->jrxq.jdata[i];
 1259                 data->rx_data_map = NULL;
 1260                 data->m = NULL;
 1261                 error = bus_dmamap_create(ring->jrx_data_tag, 0,
 1262                     &data->rx_data_map);
 1263                 if (error != 0) {
 1264                         device_printf(sc->nfe_dev,
 1265                             "could not create jumbo Rx DMA map\n");
 1266                         goto fail;
 1267                 }
 1268         }
 1269 
 1270         return;
 1271 
 1272 fail:
 1273         /*
 1274          * Running without jumbo frame support is ok for most cases
 1275          * so don't fail on creating dma tag/map for jumbo frame.
 1276          */
 1277         nfe_free_jrx_ring(sc, ring);
 1278         device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
 1279             "resource shortage\n");
 1280         sc->nfe_jumbo_disable = 1;
 1281 }
 1282 
 1283 
 1284 static int
 1285 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
 1286 {
 1287         void *desc;
 1288         size_t descsize;
 1289         int i;
 1290 
 1291         ring->cur = ring->next = 0;
 1292         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1293                 desc = ring->desc64;
 1294                 descsize = sizeof (struct nfe_desc64);
 1295         } else {
 1296                 desc = ring->desc32;
 1297                 descsize = sizeof (struct nfe_desc32);
 1298         }
 1299         bzero(desc, descsize * NFE_RX_RING_COUNT);
 1300         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 1301                 if (nfe_newbuf(sc, i) != 0)
 1302                         return (ENOBUFS);
 1303         }
 1304 
 1305         bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
 1306             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1307 
 1308         return (0);
 1309 }
 1310 
 1311 
 1312 static int
 1313 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
 1314 {
 1315         void *desc;
 1316         size_t descsize;
 1317         int i;
 1318 
 1319         ring->jcur = ring->jnext = 0;
 1320         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1321                 desc = ring->jdesc64;
 1322                 descsize = sizeof (struct nfe_desc64);
 1323         } else {
 1324                 desc = ring->jdesc32;
 1325                 descsize = sizeof (struct nfe_desc32);
 1326         }
 1327         bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
 1328         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 1329                 if (nfe_jnewbuf(sc, i) != 0)
 1330                         return (ENOBUFS);
 1331         }
 1332 
 1333         bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
 1334             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1335 
 1336         return (0);
 1337 }
 1338 
 1339 
 1340 static void
 1341 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
 1342 {
 1343         struct nfe_rx_data *data;
 1344         void *desc;
 1345         int i, descsize;
 1346 
 1347         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1348                 desc = ring->desc64;
 1349                 descsize = sizeof (struct nfe_desc64);
 1350         } else {
 1351                 desc = ring->desc32;
 1352                 descsize = sizeof (struct nfe_desc32);
 1353         }
 1354 
 1355         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 1356                 data = &ring->data[i];
 1357                 if (data->rx_data_map != NULL) {
 1358                         bus_dmamap_destroy(ring->rx_data_tag,
 1359                             data->rx_data_map);
 1360                         data->rx_data_map = NULL;
 1361                 }
 1362                 if (data->m != NULL) {
 1363                         m_freem(data->m);
 1364                         data->m = NULL;
 1365                 }
 1366         }
 1367         if (ring->rx_data_tag != NULL) {
 1368                 if (ring->rx_spare_map != NULL) {
 1369                         bus_dmamap_destroy(ring->rx_data_tag,
 1370                             ring->rx_spare_map);
 1371                         ring->rx_spare_map = NULL;
 1372                 }
 1373                 bus_dma_tag_destroy(ring->rx_data_tag);
 1374                 ring->rx_data_tag = NULL;
 1375         }
 1376 
 1377         if (desc != NULL) {
 1378                 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
 1379                 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
 1380                 ring->desc64 = NULL;
 1381                 ring->desc32 = NULL;
 1382                 ring->rx_desc_map = NULL;
 1383         }
 1384         if (ring->rx_desc_tag != NULL) {
 1385                 bus_dma_tag_destroy(ring->rx_desc_tag);
 1386                 ring->rx_desc_tag = NULL;
 1387         }
 1388 }
 1389 
 1390 
 1391 static void
 1392 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
 1393 {
 1394         struct nfe_rx_data *data;
 1395         void *desc;
 1396         int i, descsize;
 1397 
 1398         if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
 1399                 return;
 1400 
 1401         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1402                 desc = ring->jdesc64;
 1403                 descsize = sizeof (struct nfe_desc64);
 1404         } else {
 1405                 desc = ring->jdesc32;
 1406                 descsize = sizeof (struct nfe_desc32);
 1407         }
 1408 
 1409         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 1410                 data = &ring->jdata[i];
 1411                 if (data->rx_data_map != NULL) {
 1412                         bus_dmamap_destroy(ring->jrx_data_tag,
 1413                             data->rx_data_map);
 1414                         data->rx_data_map = NULL;
 1415                 }
 1416                 if (data->m != NULL) {
 1417                         m_freem(data->m);
 1418                         data->m = NULL;
 1419                 }
 1420         }
 1421         if (ring->jrx_data_tag != NULL) {
 1422                 if (ring->jrx_spare_map != NULL) {
 1423                         bus_dmamap_destroy(ring->jrx_data_tag,
 1424                             ring->jrx_spare_map);
 1425                         ring->jrx_spare_map = NULL;
 1426                 }
 1427                 bus_dma_tag_destroy(ring->jrx_data_tag);
 1428                 ring->jrx_data_tag = NULL;
 1429         }
 1430 
 1431         if (desc != NULL) {
 1432                 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
 1433                 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
 1434                 ring->jdesc64 = NULL;
 1435                 ring->jdesc32 = NULL;
 1436                 ring->jrx_desc_map = NULL;
 1437         }
 1438 
 1439         if (ring->jrx_desc_tag != NULL) {
 1440                 bus_dma_tag_destroy(ring->jrx_desc_tag);
 1441                 ring->jrx_desc_tag = NULL;
 1442         }
 1443 }
 1444 
 1445 
 1446 static int
 1447 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
 1448 {
 1449         struct nfe_dmamap_arg ctx;
 1450         int i, error;
 1451         void *desc;
 1452         int descsize;
 1453 
 1454         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1455                 desc = ring->desc64;
 1456                 descsize = sizeof (struct nfe_desc64);
 1457         } else {
 1458                 desc = ring->desc32;
 1459                 descsize = sizeof (struct nfe_desc32);
 1460         }
 1461 
 1462         ring->queued = 0;
 1463         ring->cur = ring->next = 0;
 1464 
 1465         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1466             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
 1467             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1468             BUS_SPACE_MAXADDR,                  /* highaddr */
 1469             NULL, NULL,                         /* filter, filterarg */
 1470             NFE_TX_RING_COUNT * descsize, 1,    /* maxsize, nsegments */
 1471             NFE_TX_RING_COUNT * descsize,       /* maxsegsize */
 1472             0,                                  /* flags */
 1473             NULL, NULL,                         /* lockfunc, lockarg */
 1474             &ring->tx_desc_tag);
 1475         if (error != 0) {
 1476                 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
 1477                 goto fail;
 1478         }
 1479 
 1480         error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
 1481             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
 1482         if (error != 0) {
 1483                 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
 1484                 goto fail;
 1485         }
 1486         if (sc->nfe_flags & NFE_40BIT_ADDR)
 1487                 ring->desc64 = desc;
 1488         else
 1489                 ring->desc32 = desc;
 1490 
 1491         ctx.nfe_busaddr = 0;
 1492         error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
 1493             NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
 1494         if (error != 0) {
 1495                 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
 1496                 goto fail;
 1497         }
 1498         ring->physaddr = ctx.nfe_busaddr;
 1499 
 1500         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1501             1, 0,
 1502             BUS_SPACE_MAXADDR,
 1503             BUS_SPACE_MAXADDR,
 1504             NULL, NULL,
 1505             NFE_TSO_MAXSIZE,
 1506             NFE_MAX_SCATTER,
 1507             NFE_TSO_MAXSGSIZE,
 1508             0,
 1509             NULL, NULL,
 1510             &ring->tx_data_tag);
 1511         if (error != 0) {
 1512                 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
 1513                 goto fail;
 1514         }
 1515 
 1516         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
 1517                 error = bus_dmamap_create(ring->tx_data_tag, 0,
 1518                     &ring->data[i].tx_data_map);
 1519                 if (error != 0) {
 1520                         device_printf(sc->nfe_dev,
 1521                             "could not create Tx DMA map\n");
 1522                         goto fail;
 1523                 }
 1524         }
 1525 
 1526 fail:
 1527         return (error);
 1528 }
 1529 
 1530 
 1531 static void
 1532 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
 1533 {
 1534         void *desc;
 1535         size_t descsize;
 1536 
 1537         sc->nfe_force_tx = 0;
 1538         ring->queued = 0;
 1539         ring->cur = ring->next = 0;
 1540         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1541                 desc = ring->desc64;
 1542                 descsize = sizeof (struct nfe_desc64);
 1543         } else {
 1544                 desc = ring->desc32;
 1545                 descsize = sizeof (struct nfe_desc32);
 1546         }
 1547         bzero(desc, descsize * NFE_TX_RING_COUNT);
 1548 
 1549         bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
 1550             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1551 }
 1552 
 1553 
 1554 static void
 1555 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
 1556 {
 1557         struct nfe_tx_data *data;
 1558         void *desc;
 1559         int i, descsize;
 1560 
 1561         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1562                 desc = ring->desc64;
 1563                 descsize = sizeof (struct nfe_desc64);
 1564         } else {
 1565                 desc = ring->desc32;
 1566                 descsize = sizeof (struct nfe_desc32);
 1567         }
 1568 
 1569         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
 1570                 data = &ring->data[i];
 1571 
 1572                 if (data->m != NULL) {
 1573                         bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
 1574                             BUS_DMASYNC_POSTWRITE);
 1575                         bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
 1576                         m_freem(data->m);
 1577                         data->m = NULL;
 1578                 }
 1579                 if (data->tx_data_map != NULL) {
 1580                         bus_dmamap_destroy(ring->tx_data_tag,
 1581                             data->tx_data_map);
 1582                         data->tx_data_map = NULL;
 1583                 }
 1584         }
 1585 
 1586         if (ring->tx_data_tag != NULL) {
 1587                 bus_dma_tag_destroy(ring->tx_data_tag);
 1588                 ring->tx_data_tag = NULL;
 1589         }
 1590 
 1591         if (desc != NULL) {
 1592                 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
 1593                     BUS_DMASYNC_POSTWRITE);
 1594                 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
 1595                 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
 1596                 ring->desc64 = NULL;
 1597                 ring->desc32 = NULL;
 1598                 ring->tx_desc_map = NULL;
 1599                 bus_dma_tag_destroy(ring->tx_desc_tag);
 1600                 ring->tx_desc_tag = NULL;
 1601         }
 1602 }
 1603 
 1604 #ifdef DEVICE_POLLING
 1605 static poll_handler_t nfe_poll;
 1606 
 1607 
 1608 static int
 1609 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1610 {
 1611         struct nfe_softc *sc = ifp->if_softc;
 1612         uint32_t r;
 1613         int rx_npkts = 0;
 1614 
 1615         NFE_LOCK(sc);
 1616 
 1617         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1618                 NFE_UNLOCK(sc);
 1619                 return (rx_npkts);
 1620         }
 1621 
 1622         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
 1623                 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
 1624         else
 1625                 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
 1626         nfe_txeof(sc);
 1627         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1628                 nfe_start_locked(ifp);
 1629 
 1630         if (cmd == POLL_AND_CHECK_STATUS) {
 1631                 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
 1632                         NFE_UNLOCK(sc);
 1633                         return (rx_npkts);
 1634                 }
 1635                 NFE_WRITE(sc, sc->nfe_irq_status, r);
 1636 
 1637                 if (r & NFE_IRQ_LINK) {
 1638                         NFE_READ(sc, NFE_PHY_STATUS);
 1639                         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 1640                         DPRINTF(sc, "link state changed\n");
 1641                 }
 1642         }
 1643         NFE_UNLOCK(sc);
 1644         return (rx_npkts);
 1645 }
 1646 #endif /* DEVICE_POLLING */
 1647 
 1648 static void
 1649 nfe_set_intr(struct nfe_softc *sc)
 1650 {
 1651 
 1652         if (sc->nfe_msi != 0)
 1653                 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
 1654 }
 1655 
 1656 
 1657 /* In MSIX, a write to mask reegisters behaves as XOR. */
 1658 static __inline void
 1659 nfe_enable_intr(struct nfe_softc *sc)
 1660 {
 1661 
 1662         if (sc->nfe_msix != 0) {
 1663                 /* XXX Should have a better way to enable interrupts! */
 1664                 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
 1665                         NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
 1666         } else
 1667                 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
 1668 }
 1669 
 1670 
 1671 static __inline void
 1672 nfe_disable_intr(struct nfe_softc *sc)
 1673 {
 1674 
 1675         if (sc->nfe_msix != 0) {
 1676                 /* XXX Should have a better way to disable interrupts! */
 1677                 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
 1678                         NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
 1679         } else
 1680                 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
 1681 }
 1682 
 1683 
 1684 static int
 1685 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1686 {
 1687         struct nfe_softc *sc;
 1688         struct ifreq *ifr;
 1689         struct mii_data *mii;
 1690         int error, init, mask;
 1691 
 1692         sc = ifp->if_softc;
 1693         ifr = (struct ifreq *) data;
 1694         error = 0;
 1695         init = 0;
 1696         switch (cmd) {
 1697         case SIOCSIFMTU:
 1698                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
 1699                         error = EINVAL;
 1700                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1701                         if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
 1702                             (sc->nfe_jumbo_disable != 0)) &&
 1703                             ifr->ifr_mtu > ETHERMTU)
 1704                                 error = EINVAL;
 1705                         else {
 1706                                 NFE_LOCK(sc);
 1707                                 ifp->if_mtu = ifr->ifr_mtu;
 1708                                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1709                                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1710                                         nfe_init_locked(sc);
 1711                                 }
 1712                                 NFE_UNLOCK(sc);
 1713                         }
 1714                 }
 1715                 break;
 1716         case SIOCSIFFLAGS:
 1717                 NFE_LOCK(sc);
 1718                 if (ifp->if_flags & IFF_UP) {
 1719                         /*
 1720                          * If only the PROMISC or ALLMULTI flag changes, then
 1721                          * don't do a full re-init of the chip, just update
 1722                          * the Rx filter.
 1723                          */
 1724                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
 1725                             ((ifp->if_flags ^ sc->nfe_if_flags) &
 1726                              (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 1727                                 nfe_setmulti(sc);
 1728                         else
 1729                                 nfe_init_locked(sc);
 1730                 } else {
 1731                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1732                                 nfe_stop(ifp);
 1733                 }
 1734                 sc->nfe_if_flags = ifp->if_flags;
 1735                 NFE_UNLOCK(sc);
 1736                 error = 0;
 1737                 break;
 1738         case SIOCADDMULTI:
 1739         case SIOCDELMULTI:
 1740                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1741                         NFE_LOCK(sc);
 1742                         nfe_setmulti(sc);
 1743                         NFE_UNLOCK(sc);
 1744                         error = 0;
 1745                 }
 1746                 break;
 1747         case SIOCSIFMEDIA:
 1748         case SIOCGIFMEDIA:
 1749                 mii = device_get_softc(sc->nfe_miibus);
 1750                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1751                 break;
 1752         case SIOCSIFCAP:
 1753                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1754 #ifdef DEVICE_POLLING
 1755                 if ((mask & IFCAP_POLLING) != 0) {
 1756                         if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
 1757                                 error = ether_poll_register(nfe_poll, ifp);
 1758                                 if (error)
 1759                                         break;
 1760                                 NFE_LOCK(sc);
 1761                                 nfe_disable_intr(sc);
 1762                                 ifp->if_capenable |= IFCAP_POLLING;
 1763                                 NFE_UNLOCK(sc);
 1764                         } else {
 1765                                 error = ether_poll_deregister(ifp);
 1766                                 /* Enable interrupt even in error case */
 1767                                 NFE_LOCK(sc);
 1768                                 nfe_enable_intr(sc);
 1769                                 ifp->if_capenable &= ~IFCAP_POLLING;
 1770                                 NFE_UNLOCK(sc);
 1771                         }
 1772                 }
 1773 #endif /* DEVICE_POLLING */
 1774                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 1775                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 1776                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 1777                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1778                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 1779                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1780                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1781                                 ifp->if_hwassist |= NFE_CSUM_FEATURES;
 1782                         else
 1783                                 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
 1784                 }
 1785                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1786                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
 1787                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1788                         init++;
 1789                 }
 1790                 if ((mask & IFCAP_TSO4) != 0 &&
 1791                     (ifp->if_capabilities & IFCAP_TSO4) != 0) {
 1792                         ifp->if_capenable ^= IFCAP_TSO4;
 1793                         if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
 1794                                 ifp->if_hwassist |= CSUM_TSO;
 1795                         else
 1796                                 ifp->if_hwassist &= ~CSUM_TSO;
 1797                 }
 1798                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 1799                     (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
 1800                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 1801                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1802                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
 1803                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1804                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
 1805                                 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
 1806                         init++;
 1807                 }
 1808                 /*
 1809                  * XXX
 1810                  * It seems that VLAN stripping requires Rx checksum offload.
 1811                  * Unfortunately FreeBSD has no way to disable only Rx side
 1812                  * VLAN stripping. So when we know Rx checksum offload is
 1813                  * disabled turn entire hardware VLAN assist off.
 1814                  */
 1815                 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
 1816                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 1817                                 init++;
 1818                         ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
 1819                             IFCAP_VLAN_HWTSO);
 1820                 }
 1821                 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1822                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1823                         nfe_init(sc);
 1824                 }
 1825                 VLAN_CAPABILITIES(ifp);
 1826                 break;
 1827         default:
 1828                 error = ether_ioctl(ifp, cmd, data);
 1829                 break;
 1830         }
 1831 
 1832         return (error);
 1833 }
 1834 
 1835 
 1836 static int
 1837 nfe_intr(void *arg)
 1838 {
 1839         struct nfe_softc *sc;
 1840         uint32_t status;
 1841 
 1842         sc = (struct nfe_softc *)arg;
 1843 
 1844         status = NFE_READ(sc, sc->nfe_irq_status);
 1845         if (status == 0 || status == 0xffffffff)
 1846                 return (FILTER_STRAY);
 1847         nfe_disable_intr(sc);
 1848         taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
 1849 
 1850         return (FILTER_HANDLED);
 1851 }
 1852 
 1853 
 1854 static void
 1855 nfe_int_task(void *arg, int pending)
 1856 {
 1857         struct nfe_softc *sc = arg;
 1858         struct ifnet *ifp = sc->nfe_ifp;
 1859         uint32_t r;
 1860         int domore;
 1861 
 1862         NFE_LOCK(sc);
 1863 
 1864         if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
 1865                 nfe_enable_intr(sc);
 1866                 NFE_UNLOCK(sc);
 1867                 return; /* not for us */
 1868         }
 1869         NFE_WRITE(sc, sc->nfe_irq_status, r);
 1870 
 1871         DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
 1872 
 1873 #ifdef DEVICE_POLLING
 1874         if (ifp->if_capenable & IFCAP_POLLING) {
 1875                 NFE_UNLOCK(sc);
 1876                 return;
 1877         }
 1878 #endif
 1879 
 1880         if (r & NFE_IRQ_LINK) {
 1881                 NFE_READ(sc, NFE_PHY_STATUS);
 1882                 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 1883                 DPRINTF(sc, "link state changed\n");
 1884         }
 1885 
 1886         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1887                 NFE_UNLOCK(sc);
 1888                 nfe_disable_intr(sc);
 1889                 return;
 1890         }
 1891 
 1892         domore = 0;
 1893         /* check Rx ring */
 1894         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
 1895                 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
 1896         else
 1897                 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
 1898         /* check Tx ring */
 1899         nfe_txeof(sc);
 1900 
 1901         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1902                 nfe_start_locked(ifp);
 1903 
 1904         NFE_UNLOCK(sc);
 1905 
 1906         if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
 1907                 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
 1908                 return;
 1909         }
 1910 
 1911         /* Reenable interrupts. */
 1912         nfe_enable_intr(sc);
 1913 }
 1914 
 1915 
 1916 static __inline void
 1917 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
 1918 {
 1919         struct nfe_desc32 *desc32;
 1920         struct nfe_desc64 *desc64;
 1921         struct nfe_rx_data *data;
 1922         struct mbuf *m;
 1923 
 1924         data = &sc->rxq.data[idx];
 1925         m = data->m;
 1926 
 1927         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1928                 desc64 = &sc->rxq.desc64[idx];
 1929                 /* VLAN packet may have overwritten it. */
 1930                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
 1931                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
 1932                 desc64->length = htole16(m->m_len);
 1933                 desc64->flags = htole16(NFE_RX_READY);
 1934         } else {
 1935                 desc32 = &sc->rxq.desc32[idx];
 1936                 desc32->length = htole16(m->m_len);
 1937                 desc32->flags = htole16(NFE_RX_READY);
 1938         }
 1939 }
 1940 
 1941 
 1942 static __inline void
 1943 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
 1944 {
 1945         struct nfe_desc32 *desc32;
 1946         struct nfe_desc64 *desc64;
 1947         struct nfe_rx_data *data;
 1948         struct mbuf *m;
 1949 
 1950         data = &sc->jrxq.jdata[idx];
 1951         m = data->m;
 1952 
 1953         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1954                 desc64 = &sc->jrxq.jdesc64[idx];
 1955                 /* VLAN packet may have overwritten it. */
 1956                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
 1957                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
 1958                 desc64->length = htole16(m->m_len);
 1959                 desc64->flags = htole16(NFE_RX_READY);
 1960         } else {
 1961                 desc32 = &sc->jrxq.jdesc32[idx];
 1962                 desc32->length = htole16(m->m_len);
 1963                 desc32->flags = htole16(NFE_RX_READY);
 1964         }
 1965 }
 1966 
 1967 
 1968 static int
 1969 nfe_newbuf(struct nfe_softc *sc, int idx)
 1970 {
 1971         struct nfe_rx_data *data;
 1972         struct nfe_desc32 *desc32;
 1973         struct nfe_desc64 *desc64;
 1974         struct mbuf *m;
 1975         bus_dma_segment_t segs[1];
 1976         bus_dmamap_t map;
 1977         int nsegs;
 1978 
 1979         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1980         if (m == NULL)
 1981                 return (ENOBUFS);
 1982 
 1983         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1984         m_adj(m, ETHER_ALIGN);
 1985 
 1986         if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
 1987             m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
 1988                 m_freem(m);
 1989                 return (ENOBUFS);
 1990         }
 1991         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1992 
 1993         data = &sc->rxq.data[idx];
 1994         if (data->m != NULL) {
 1995                 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
 1996                     BUS_DMASYNC_POSTREAD);
 1997                 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
 1998         }
 1999         map = data->rx_data_map;
 2000         data->rx_data_map = sc->rxq.rx_spare_map;
 2001         sc->rxq.rx_spare_map = map;
 2002         bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
 2003             BUS_DMASYNC_PREREAD);
 2004         data->paddr = segs[0].ds_addr;
 2005         data->m = m;
 2006         /* update mapping address in h/w descriptor */
 2007         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2008                 desc64 = &sc->rxq.desc64[idx];
 2009                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
 2010                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2011                 desc64->length = htole16(segs[0].ds_len);
 2012                 desc64->flags = htole16(NFE_RX_READY);
 2013         } else {
 2014                 desc32 = &sc->rxq.desc32[idx];
 2015                 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2016                 desc32->length = htole16(segs[0].ds_len);
 2017                 desc32->flags = htole16(NFE_RX_READY);
 2018         }
 2019 
 2020         return (0);
 2021 }
 2022 
 2023 
 2024 static int
 2025 nfe_jnewbuf(struct nfe_softc *sc, int idx)
 2026 {
 2027         struct nfe_rx_data *data;
 2028         struct nfe_desc32 *desc32;
 2029         struct nfe_desc64 *desc64;
 2030         struct mbuf *m;
 2031         bus_dma_segment_t segs[1];
 2032         bus_dmamap_t map;
 2033         int nsegs;
 2034 
 2035         m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
 2036         if (m == NULL)
 2037                 return (ENOBUFS);
 2038         if ((m->m_flags & M_EXT) == 0) {
 2039                 m_freem(m);
 2040                 return (ENOBUFS);
 2041         }
 2042         m->m_pkthdr.len = m->m_len = MJUM9BYTES;
 2043         m_adj(m, ETHER_ALIGN);
 2044 
 2045         if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
 2046             sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
 2047                 m_freem(m);
 2048                 return (ENOBUFS);
 2049         }
 2050         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 2051 
 2052         data = &sc->jrxq.jdata[idx];
 2053         if (data->m != NULL) {
 2054                 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
 2055                     BUS_DMASYNC_POSTREAD);
 2056                 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
 2057         }
 2058         map = data->rx_data_map;
 2059         data->rx_data_map = sc->jrxq.jrx_spare_map;
 2060         sc->jrxq.jrx_spare_map = map;
 2061         bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
 2062             BUS_DMASYNC_PREREAD);
 2063         data->paddr = segs[0].ds_addr;
 2064         data->m = m;
 2065         /* update mapping address in h/w descriptor */
 2066         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2067                 desc64 = &sc->jrxq.jdesc64[idx];
 2068                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
 2069                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2070                 desc64->length = htole16(segs[0].ds_len);
 2071                 desc64->flags = htole16(NFE_RX_READY);
 2072         } else {
 2073                 desc32 = &sc->jrxq.jdesc32[idx];
 2074                 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2075                 desc32->length = htole16(segs[0].ds_len);
 2076                 desc32->flags = htole16(NFE_RX_READY);
 2077         }
 2078 
 2079         return (0);
 2080 }
 2081 
 2082 
 2083 static int
 2084 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
 2085 {
 2086         struct ifnet *ifp = sc->nfe_ifp;
 2087         struct nfe_desc32 *desc32;
 2088         struct nfe_desc64 *desc64;
 2089         struct nfe_rx_data *data;
 2090         struct mbuf *m;
 2091         uint16_t flags;
 2092         int len, prog, rx_npkts;
 2093         uint32_t vtag = 0;
 2094 
 2095         rx_npkts = 0;
 2096         NFE_LOCK_ASSERT(sc);
 2097 
 2098         bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
 2099             BUS_DMASYNC_POSTREAD);
 2100 
 2101         for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
 2102                 if (count <= 0)
 2103                         break;
 2104                 count--;
 2105 
 2106                 data = &sc->rxq.data[sc->rxq.cur];
 2107 
 2108                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2109                         desc64 = &sc->rxq.desc64[sc->rxq.cur];
 2110                         vtag = le32toh(desc64->physaddr[1]);
 2111                         flags = le16toh(desc64->flags);
 2112                         len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
 2113                 } else {
 2114                         desc32 = &sc->rxq.desc32[sc->rxq.cur];
 2115                         flags = le16toh(desc32->flags);
 2116                         len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
 2117                 }
 2118 
 2119                 if (flags & NFE_RX_READY)
 2120                         break;
 2121                 prog++;
 2122                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
 2123                         if (!(flags & NFE_RX_VALID_V1)) {
 2124                                 ifp->if_ierrors++;
 2125                                 nfe_discard_rxbuf(sc, sc->rxq.cur);
 2126                                 continue;
 2127                         }
 2128                         if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
 2129                                 flags &= ~NFE_RX_ERROR;
 2130                                 len--;  /* fix buffer length */
 2131                         }
 2132                 } else {
 2133                         if (!(flags & NFE_RX_VALID_V2)) {
 2134                                 ifp->if_ierrors++;
 2135                                 nfe_discard_rxbuf(sc, sc->rxq.cur);
 2136                                 continue;
 2137                         }
 2138 
 2139                         if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
 2140                                 flags &= ~NFE_RX_ERROR;
 2141                                 len--;  /* fix buffer length */
 2142                         }
 2143                 }
 2144 
 2145                 if (flags & NFE_RX_ERROR) {
 2146                         ifp->if_ierrors++;
 2147                         nfe_discard_rxbuf(sc, sc->rxq.cur);
 2148                         continue;
 2149                 }
 2150 
 2151                 m = data->m;
 2152                 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
 2153                         ifp->if_iqdrops++;
 2154                         nfe_discard_rxbuf(sc, sc->rxq.cur);
 2155                         continue;
 2156                 }
 2157 
 2158                 if ((vtag & NFE_RX_VTAG) != 0 &&
 2159                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 2160                         m->m_pkthdr.ether_vtag = vtag & 0xffff;
 2161                         m->m_flags |= M_VLANTAG;
 2162                 }
 2163 
 2164                 m->m_pkthdr.len = m->m_len = len;
 2165                 m->m_pkthdr.rcvif = ifp;
 2166 
 2167                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
 2168                         if ((flags & NFE_RX_IP_CSUMOK) != 0) {
 2169                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2170                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2171                                 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
 2172                                     (flags & NFE_RX_UDP_CSUMOK) != 0) {
 2173                                         m->m_pkthdr.csum_flags |=
 2174                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 2175                                         m->m_pkthdr.csum_data = 0xffff;
 2176                                 }
 2177                         }
 2178                 }
 2179 
 2180                 ifp->if_ipackets++;
 2181 
 2182                 NFE_UNLOCK(sc);
 2183                 (*ifp->if_input)(ifp, m);
 2184                 NFE_LOCK(sc);
 2185                 rx_npkts++;
 2186         }
 2187 
 2188         if (prog > 0)
 2189                 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
 2190                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2191 
 2192         if (rx_npktsp != NULL)
 2193                 *rx_npktsp = rx_npkts;
 2194         return (count > 0 ? 0 : EAGAIN);
 2195 }
 2196 
 2197 
 2198 static int
 2199 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
 2200 {
 2201         struct ifnet *ifp = sc->nfe_ifp;
 2202         struct nfe_desc32 *desc32;
 2203         struct nfe_desc64 *desc64;
 2204         struct nfe_rx_data *data;
 2205         struct mbuf *m;
 2206         uint16_t flags;
 2207         int len, prog, rx_npkts;
 2208         uint32_t vtag = 0;
 2209 
 2210         rx_npkts = 0;
 2211         NFE_LOCK_ASSERT(sc);
 2212 
 2213         bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
 2214             BUS_DMASYNC_POSTREAD);
 2215 
 2216         for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
 2217             vtag = 0) {
 2218                 if (count <= 0)
 2219                         break;
 2220                 count--;
 2221 
 2222                 data = &sc->jrxq.jdata[sc->jrxq.jcur];
 2223 
 2224                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2225                         desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
 2226                         vtag = le32toh(desc64->physaddr[1]);
 2227                         flags = le16toh(desc64->flags);
 2228                         len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
 2229                 } else {
 2230                         desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
 2231                         flags = le16toh(desc32->flags);
 2232                         len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
 2233                 }
 2234 
 2235                 if (flags & NFE_RX_READY)
 2236                         break;
 2237                 prog++;
 2238                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
 2239                         if (!(flags & NFE_RX_VALID_V1)) {
 2240                                 ifp->if_ierrors++;
 2241                                 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2242                                 continue;
 2243                         }
 2244                         if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
 2245                                 flags &= ~NFE_RX_ERROR;
 2246                                 len--;  /* fix buffer length */
 2247                         }
 2248                 } else {
 2249                         if (!(flags & NFE_RX_VALID_V2)) {
 2250                                 ifp->if_ierrors++;
 2251                                 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2252                                 continue;
 2253                         }
 2254 
 2255                         if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
 2256                                 flags &= ~NFE_RX_ERROR;
 2257                                 len--;  /* fix buffer length */
 2258                         }
 2259                 }
 2260 
 2261                 if (flags & NFE_RX_ERROR) {
 2262                         ifp->if_ierrors++;
 2263                         nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2264                         continue;
 2265                 }
 2266 
 2267                 m = data->m;
 2268                 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
 2269                         ifp->if_iqdrops++;
 2270                         nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2271                         continue;
 2272                 }
 2273 
 2274                 if ((vtag & NFE_RX_VTAG) != 0 &&
 2275                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 2276                         m->m_pkthdr.ether_vtag = vtag & 0xffff;
 2277                         m->m_flags |= M_VLANTAG;
 2278                 }
 2279 
 2280                 m->m_pkthdr.len = m->m_len = len;
 2281                 m->m_pkthdr.rcvif = ifp;
 2282 
 2283                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
 2284                         if ((flags & NFE_RX_IP_CSUMOK) != 0) {
 2285                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2286                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2287                                 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
 2288                                     (flags & NFE_RX_UDP_CSUMOK) != 0) {
 2289                                         m->m_pkthdr.csum_flags |=
 2290                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 2291                                         m->m_pkthdr.csum_data = 0xffff;
 2292                                 }
 2293                         }
 2294                 }
 2295 
 2296                 ifp->if_ipackets++;
 2297 
 2298                 NFE_UNLOCK(sc);
 2299                 (*ifp->if_input)(ifp, m);
 2300                 NFE_LOCK(sc);
 2301                 rx_npkts++;
 2302         }
 2303 
 2304         if (prog > 0)
 2305                 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
 2306                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2307 
 2308         if (rx_npktsp != NULL)
 2309                 *rx_npktsp = rx_npkts;
 2310         return (count > 0 ? 0 : EAGAIN);
 2311 }
 2312 
 2313 
 2314 static void
 2315 nfe_txeof(struct nfe_softc *sc)
 2316 {
 2317         struct ifnet *ifp = sc->nfe_ifp;
 2318         struct nfe_desc32 *desc32;
 2319         struct nfe_desc64 *desc64;
 2320         struct nfe_tx_data *data = NULL;
 2321         uint16_t flags;
 2322         int cons, prog;
 2323 
 2324         NFE_LOCK_ASSERT(sc);
 2325 
 2326         bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
 2327             BUS_DMASYNC_POSTREAD);
 2328 
 2329         prog = 0;
 2330         for (cons = sc->txq.next; cons != sc->txq.cur;
 2331             NFE_INC(cons, NFE_TX_RING_COUNT)) {
 2332                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2333                         desc64 = &sc->txq.desc64[cons];
 2334                         flags = le16toh(desc64->flags);
 2335                 } else {
 2336                         desc32 = &sc->txq.desc32[cons];
 2337                         flags = le16toh(desc32->flags);
 2338                 }
 2339 
 2340                 if (flags & NFE_TX_VALID)
 2341                         break;
 2342 
 2343                 prog++;
 2344                 sc->txq.queued--;
 2345                 data = &sc->txq.data[cons];
 2346 
 2347                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
 2348                         if ((flags & NFE_TX_LASTFRAG_V1) == 0)
 2349                                 continue;
 2350                         if ((flags & NFE_TX_ERROR_V1) != 0) {
 2351                                 device_printf(sc->nfe_dev,
 2352                                     "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
 2353 
 2354                                 ifp->if_oerrors++;
 2355                         } else
 2356                                 ifp->if_opackets++;
 2357                 } else {
 2358                         if ((flags & NFE_TX_LASTFRAG_V2) == 0)
 2359                                 continue;
 2360                         if ((flags & NFE_TX_ERROR_V2) != 0) {
 2361                                 device_printf(sc->nfe_dev,
 2362                                     "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
 2363                                 ifp->if_oerrors++;
 2364                         } else
 2365                                 ifp->if_opackets++;
 2366                 }
 2367 
 2368                 /* last fragment of the mbuf chain transmitted */
 2369                 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
 2370                 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
 2371                     BUS_DMASYNC_POSTWRITE);
 2372                 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
 2373                 m_freem(data->m);
 2374                 data->m = NULL;
 2375         }
 2376 
 2377         if (prog > 0) {
 2378                 sc->nfe_force_tx = 0;
 2379                 sc->txq.next = cons;
 2380                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2381                 if (sc->txq.queued == 0)
 2382                         sc->nfe_watchdog_timer = 0;
 2383         }
 2384 }
 2385 
 2386 static int
 2387 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
 2388 {
 2389         struct nfe_desc32 *desc32 = NULL;
 2390         struct nfe_desc64 *desc64 = NULL;
 2391         bus_dmamap_t map;
 2392         bus_dma_segment_t segs[NFE_MAX_SCATTER];
 2393         int error, i, nsegs, prod, si;
 2394         uint32_t tso_segsz;
 2395         uint16_t cflags, flags;
 2396         struct mbuf *m;
 2397 
 2398         prod = si = sc->txq.cur;
 2399         map = sc->txq.data[prod].tx_data_map;
 2400 
 2401         error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
 2402             &nsegs, BUS_DMA_NOWAIT);
 2403         if (error == EFBIG) {
 2404                 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
 2405                 if (m == NULL) {
 2406                         m_freem(*m_head);
 2407                         *m_head = NULL;
 2408                         return (ENOBUFS);
 2409                 }
 2410                 *m_head = m;
 2411                 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
 2412                     *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
 2413                 if (error != 0) {
 2414                         m_freem(*m_head);
 2415                         *m_head = NULL;
 2416                         return (ENOBUFS);
 2417                 }
 2418         } else if (error != 0)
 2419                 return (error);
 2420         if (nsegs == 0) {
 2421                 m_freem(*m_head);
 2422                 *m_head = NULL;
 2423                 return (EIO);
 2424         }
 2425 
 2426         if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
 2427                 bus_dmamap_unload(sc->txq.tx_data_tag, map);
 2428                 return (ENOBUFS);
 2429         }
 2430 
 2431         m = *m_head;
 2432         cflags = flags = 0;
 2433         tso_segsz = 0;
 2434         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2435                 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
 2436                     NFE_TX_TSO_SHIFT;
 2437                 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
 2438                 cflags |= NFE_TX_TSO;
 2439         } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
 2440                 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
 2441                         cflags |= NFE_TX_IP_CSUM;
 2442                 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
 2443                         cflags |= NFE_TX_TCP_UDP_CSUM;
 2444                 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2445                         cflags |= NFE_TX_TCP_UDP_CSUM;
 2446         }
 2447 
 2448         for (i = 0; i < nsegs; i++) {
 2449                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2450                         desc64 = &sc->txq.desc64[prod];
 2451                         desc64->physaddr[0] =
 2452                             htole32(NFE_ADDR_HI(segs[i].ds_addr));
 2453                         desc64->physaddr[1] =
 2454                             htole32(NFE_ADDR_LO(segs[i].ds_addr));
 2455                         desc64->vtag = 0;
 2456                         desc64->length = htole16(segs[i].ds_len - 1);
 2457                         desc64->flags = htole16(flags);
 2458                 } else {
 2459                         desc32 = &sc->txq.desc32[prod];
 2460                         desc32->physaddr =
 2461                             htole32(NFE_ADDR_LO(segs[i].ds_addr));
 2462                         desc32->length = htole16(segs[i].ds_len - 1);
 2463                         desc32->flags = htole16(flags);
 2464                 }
 2465 
 2466                 /*
 2467                  * Setting of the valid bit in the first descriptor is
 2468                  * deferred until the whole chain is fully setup.
 2469                  */
 2470                 flags |= NFE_TX_VALID;
 2471 
 2472                 sc->txq.queued++;
 2473                 NFE_INC(prod, NFE_TX_RING_COUNT);
 2474         }
 2475 
 2476         /*
 2477          * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
 2478          * csum flags, vtag and TSO belong to the first fragment only.
 2479          */
 2480         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2481                 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
 2482                 desc64 = &sc->txq.desc64[si];
 2483                 if ((m->m_flags & M_VLANTAG) != 0)
 2484                         desc64->vtag = htole32(NFE_TX_VTAG |
 2485                             m->m_pkthdr.ether_vtag);
 2486                 if (tso_segsz != 0) {
 2487                         /*
 2488                          * XXX
 2489                          * The following indicates the descriptor element
 2490                          * is a 32bit quantity.
 2491                          */
 2492                         desc64->length |= htole16((uint16_t)tso_segsz);
 2493                         desc64->flags |= htole16(tso_segsz >> 16);
 2494                 }
 2495                 /*
 2496                  * finally, set the valid/checksum/TSO bit in the first
 2497                  * descriptor.
 2498                  */
 2499                 desc64->flags |= htole16(NFE_TX_VALID | cflags);
 2500         } else {
 2501                 if (sc->nfe_flags & NFE_JUMBO_SUP)
 2502                         desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
 2503                 else
 2504                         desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
 2505                 desc32 = &sc->txq.desc32[si];
 2506                 if (tso_segsz != 0) {
 2507                         /*
 2508                          * XXX
 2509                          * The following indicates the descriptor element
 2510                          * is a 32bit quantity.
 2511                          */
 2512                         desc32->length |= htole16((uint16_t)tso_segsz);
 2513                         desc32->flags |= htole16(tso_segsz >> 16);
 2514                 }
 2515                 /*
 2516                  * finally, set the valid/checksum/TSO bit in the first
 2517                  * descriptor.
 2518                  */
 2519                 desc32->flags |= htole16(NFE_TX_VALID | cflags);
 2520         }
 2521 
 2522         sc->txq.cur = prod;
 2523         prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
 2524         sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
 2525         sc->txq.data[prod].tx_data_map = map;
 2526         sc->txq.data[prod].m = m;
 2527 
 2528         bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
 2529 
 2530         return (0);
 2531 }
 2532 
 2533 
 2534 static void
 2535 nfe_setmulti(struct nfe_softc *sc)
 2536 {
 2537         struct ifnet *ifp = sc->nfe_ifp;
 2538         struct ifmultiaddr *ifma;
 2539         int i;
 2540         uint32_t filter;
 2541         uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
 2542         uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
 2543                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 2544         };
 2545 
 2546         NFE_LOCK_ASSERT(sc);
 2547 
 2548         if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
 2549                 bzero(addr, ETHER_ADDR_LEN);
 2550                 bzero(mask, ETHER_ADDR_LEN);
 2551                 goto done;
 2552         }
 2553 
 2554         bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
 2555         bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
 2556 
 2557         if_maddr_rlock(ifp);
 2558         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2559                 u_char *addrp;
 2560 
 2561                 if (ifma->ifma_addr->sa_family != AF_LINK)
 2562                         continue;
 2563 
 2564                 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
 2565                 for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2566                         u_int8_t mcaddr = addrp[i];
 2567                         addr[i] &= mcaddr;
 2568                         mask[i] &= ~mcaddr;
 2569                 }
 2570         }
 2571         if_maddr_runlock(ifp);
 2572 
 2573         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2574                 mask[i] |= addr[i];
 2575         }
 2576 
 2577 done:
 2578         addr[0] |= 0x01;        /* make sure multicast bit is set */
 2579 
 2580         NFE_WRITE(sc, NFE_MULTIADDR_HI,
 2581             addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
 2582         NFE_WRITE(sc, NFE_MULTIADDR_LO,
 2583             addr[5] <<  8 | addr[4]);
 2584         NFE_WRITE(sc, NFE_MULTIMASK_HI,
 2585             mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
 2586         NFE_WRITE(sc, NFE_MULTIMASK_LO,
 2587             mask[5] <<  8 | mask[4]);
 2588 
 2589         filter = NFE_READ(sc, NFE_RXFILTER);
 2590         filter &= NFE_PFF_RX_PAUSE;
 2591         filter |= NFE_RXFILTER_MAGIC;
 2592         filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
 2593         NFE_WRITE(sc, NFE_RXFILTER, filter);
 2594 }
 2595 
 2596 
 2597 static void
 2598 nfe_start(struct ifnet *ifp)
 2599 {
 2600         struct nfe_softc *sc = ifp->if_softc;
 2601 
 2602         NFE_LOCK(sc);
 2603         nfe_start_locked(ifp);
 2604         NFE_UNLOCK(sc);
 2605 }
 2606 
 2607 static void
 2608 nfe_start_locked(struct ifnet *ifp)
 2609 {
 2610         struct nfe_softc *sc = ifp->if_softc;
 2611         struct mbuf *m0;
 2612         int enq;
 2613 
 2614         NFE_LOCK_ASSERT(sc);
 2615 
 2616         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2617             IFF_DRV_RUNNING || sc->nfe_link == 0)
 2618                 return;
 2619 
 2620         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
 2621                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
 2622                 if (m0 == NULL)
 2623                         break;
 2624 
 2625                 if (nfe_encap(sc, &m0) != 0) {
 2626                         if (m0 == NULL)
 2627                                 break;
 2628                         IFQ_DRV_PREPEND(&ifp->if_snd, m0);
 2629                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2630                         break;
 2631                 }
 2632                 enq++;
 2633                 ETHER_BPF_MTAP(ifp, m0);
 2634         }
 2635 
 2636         if (enq > 0) {
 2637                 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
 2638                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2639 
 2640                 /* kick Tx */
 2641                 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
 2642 
 2643                 /*
 2644                  * Set a timeout in case the chip goes out to lunch.
 2645                  */
 2646                 sc->nfe_watchdog_timer = 5;
 2647         }
 2648 }
 2649 
 2650 
 2651 static void
 2652 nfe_watchdog(struct ifnet *ifp)
 2653 {
 2654         struct nfe_softc *sc = ifp->if_softc;
 2655 
 2656         if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
 2657                 return;
 2658 
 2659         /* Check if we've lost Tx completion interrupt. */
 2660         nfe_txeof(sc);
 2661         if (sc->txq.queued == 0) {
 2662                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
 2663                     "-- recovering\n");
 2664                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2665                         nfe_start_locked(ifp);
 2666                 return;
 2667         }
 2668         /* Check if we've lost start Tx command. */
 2669         sc->nfe_force_tx++;
 2670         if (sc->nfe_force_tx <= 3) {
 2671                 /*
 2672                  * If this is the case for watchdog timeout, the following
 2673                  * code should go to nfe_txeof().
 2674                  */
 2675                 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
 2676                 return;
 2677         }
 2678         sc->nfe_force_tx = 0;
 2679 
 2680         if_printf(ifp, "watchdog timeout\n");
 2681 
 2682         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2683         ifp->if_oerrors++;
 2684         nfe_init_locked(sc);
 2685 }
 2686 
 2687 
 2688 static void
 2689 nfe_init(void *xsc)
 2690 {
 2691         struct nfe_softc *sc = xsc;
 2692 
 2693         NFE_LOCK(sc);
 2694         nfe_init_locked(sc);
 2695         NFE_UNLOCK(sc);
 2696 }
 2697 
 2698 
 2699 static void
 2700 nfe_init_locked(void *xsc)
 2701 {
 2702         struct nfe_softc *sc = xsc;
 2703         struct ifnet *ifp = sc->nfe_ifp;
 2704         struct mii_data *mii;
 2705         uint32_t val;
 2706         int error;
 2707 
 2708         NFE_LOCK_ASSERT(sc);
 2709 
 2710         mii = device_get_softc(sc->nfe_miibus);
 2711 
 2712         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2713                 return;
 2714 
 2715         nfe_stop(ifp);
 2716 
 2717         sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
 2718 
 2719         nfe_init_tx_ring(sc, &sc->txq);
 2720         if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
 2721                 error = nfe_init_jrx_ring(sc, &sc->jrxq);
 2722         else
 2723                 error = nfe_init_rx_ring(sc, &sc->rxq);
 2724         if (error != 0) {
 2725                 device_printf(sc->nfe_dev,
 2726                     "initialization failed: no memory for rx buffers\n");
 2727                 nfe_stop(ifp);
 2728                 return;
 2729         }
 2730 
 2731         val = 0;
 2732         if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
 2733                 val |= NFE_MAC_ADDR_INORDER;
 2734         NFE_WRITE(sc, NFE_TX_UNK, val);
 2735         NFE_WRITE(sc, NFE_STATUS, 0);
 2736 
 2737         if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
 2738                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
 2739 
 2740         sc->rxtxctl = NFE_RXTX_BIT2;
 2741         if (sc->nfe_flags & NFE_40BIT_ADDR)
 2742                 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
 2743         else if (sc->nfe_flags & NFE_JUMBO_SUP)
 2744                 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
 2745 
 2746         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 2747                 sc->rxtxctl |= NFE_RXTX_RXCSUM;
 2748         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2749                 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
 2750 
 2751         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
 2752         DELAY(10);
 2753         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
 2754 
 2755         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2756                 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
 2757         else
 2758                 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
 2759 
 2760         NFE_WRITE(sc, NFE_SETUP_R6, 0);
 2761 
 2762         /* set MAC address */
 2763         nfe_set_macaddr(sc, IF_LLADDR(ifp));
 2764 
 2765         /* tell MAC where rings are in memory */
 2766         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
 2767                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
 2768                     NFE_ADDR_HI(sc->jrxq.jphysaddr));
 2769                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
 2770                     NFE_ADDR_LO(sc->jrxq.jphysaddr));
 2771         } else {
 2772                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
 2773                     NFE_ADDR_HI(sc->rxq.physaddr));
 2774                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
 2775                     NFE_ADDR_LO(sc->rxq.physaddr));
 2776         }
 2777         NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
 2778         NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
 2779 
 2780         NFE_WRITE(sc, NFE_RING_SIZE,
 2781             (NFE_RX_RING_COUNT - 1) << 16 |
 2782             (NFE_TX_RING_COUNT - 1));
 2783 
 2784         NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
 2785 
 2786         /* force MAC to wakeup */
 2787         val = NFE_READ(sc, NFE_PWR_STATE);
 2788         if ((val & NFE_PWR_WAKEUP) == 0)
 2789                 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
 2790         DELAY(10);
 2791         val = NFE_READ(sc, NFE_PWR_STATE);
 2792         NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
 2793 
 2794 #if 1
 2795         /* configure interrupts coalescing/mitigation */
 2796         NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
 2797 #else
 2798         /* no interrupt mitigation: one interrupt per packet */
 2799         NFE_WRITE(sc, NFE_IMTIMER, 970);
 2800 #endif
 2801 
 2802         NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
 2803         NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
 2804         NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
 2805 
 2806         /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
 2807         NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
 2808 
 2809         NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
 2810         /* Disable WOL. */
 2811         NFE_WRITE(sc, NFE_WOL_CTL, 0);
 2812 
 2813         sc->rxtxctl &= ~NFE_RXTX_BIT2;
 2814         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
 2815         DELAY(10);
 2816         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
 2817 
 2818         /* set Rx filter */
 2819         nfe_setmulti(sc);
 2820 
 2821         /* enable Rx */
 2822         NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
 2823 
 2824         /* enable Tx */
 2825         NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
 2826 
 2827         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 2828 
 2829         /* Clear hardware stats. */
 2830         nfe_stats_clear(sc);
 2831 
 2832 #ifdef DEVICE_POLLING
 2833         if (ifp->if_capenable & IFCAP_POLLING)
 2834                 nfe_disable_intr(sc);
 2835         else
 2836 #endif
 2837         nfe_set_intr(sc);
 2838         nfe_enable_intr(sc); /* enable interrupts */
 2839 
 2840         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2841         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2842 
 2843         sc->nfe_link = 0;
 2844         mii_mediachg(mii);
 2845 
 2846         callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
 2847 }
 2848 
 2849 
 2850 static void
 2851 nfe_stop(struct ifnet *ifp)
 2852 {
 2853         struct nfe_softc *sc = ifp->if_softc;
 2854         struct nfe_rx_ring *rx_ring;
 2855         struct nfe_jrx_ring *jrx_ring;
 2856         struct nfe_tx_ring *tx_ring;
 2857         struct nfe_rx_data *rdata;
 2858         struct nfe_tx_data *tdata;
 2859         int i;
 2860 
 2861         NFE_LOCK_ASSERT(sc);
 2862 
 2863         sc->nfe_watchdog_timer = 0;
 2864         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2865 
 2866         callout_stop(&sc->nfe_stat_ch);
 2867 
 2868         /* abort Tx */
 2869         NFE_WRITE(sc, NFE_TX_CTL, 0);
 2870 
 2871         /* disable Rx */
 2872         NFE_WRITE(sc, NFE_RX_CTL, 0);
 2873 
 2874         /* disable interrupts */
 2875         nfe_disable_intr(sc);
 2876 
 2877         sc->nfe_link = 0;
 2878 
 2879         /* free Rx and Tx mbufs still in the queues. */
 2880         rx_ring = &sc->rxq;
 2881         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 2882                 rdata = &rx_ring->data[i];
 2883                 if (rdata->m != NULL) {
 2884                         bus_dmamap_sync(rx_ring->rx_data_tag,
 2885                             rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
 2886                         bus_dmamap_unload(rx_ring->rx_data_tag,
 2887                             rdata->rx_data_map);
 2888                         m_freem(rdata->m);
 2889                         rdata->m = NULL;
 2890                 }
 2891         }
 2892 
 2893         if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
 2894                 jrx_ring = &sc->jrxq;
 2895                 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 2896                         rdata = &jrx_ring->jdata[i];
 2897                         if (rdata->m != NULL) {
 2898                                 bus_dmamap_sync(jrx_ring->jrx_data_tag,
 2899                                     rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
 2900                                 bus_dmamap_unload(jrx_ring->jrx_data_tag,
 2901                                     rdata->rx_data_map);
 2902                                 m_freem(rdata->m);
 2903                                 rdata->m = NULL;
 2904                         }
 2905                 }
 2906         }
 2907 
 2908         tx_ring = &sc->txq;
 2909         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 2910                 tdata = &tx_ring->data[i];
 2911                 if (tdata->m != NULL) {
 2912                         bus_dmamap_sync(tx_ring->tx_data_tag,
 2913                             tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
 2914                         bus_dmamap_unload(tx_ring->tx_data_tag,
 2915                             tdata->tx_data_map);
 2916                         m_freem(tdata->m);
 2917                         tdata->m = NULL;
 2918                 }
 2919         }
 2920         /* Update hardware stats. */
 2921         nfe_stats_update(sc);
 2922 }
 2923 
 2924 
 2925 static int
 2926 nfe_ifmedia_upd(struct ifnet *ifp)
 2927 {
 2928         struct nfe_softc *sc = ifp->if_softc;
 2929         struct mii_data *mii;
 2930 
 2931         NFE_LOCK(sc);
 2932         mii = device_get_softc(sc->nfe_miibus);
 2933         mii_mediachg(mii);
 2934         NFE_UNLOCK(sc);
 2935 
 2936         return (0);
 2937 }
 2938 
 2939 
 2940 static void
 2941 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2942 {
 2943         struct nfe_softc *sc;
 2944         struct mii_data *mii;
 2945 
 2946         sc = ifp->if_softc;
 2947 
 2948         NFE_LOCK(sc);
 2949         mii = device_get_softc(sc->nfe_miibus);
 2950         mii_pollstat(mii);
 2951 
 2952         ifmr->ifm_active = mii->mii_media_active;
 2953         ifmr->ifm_status = mii->mii_media_status;
 2954         NFE_UNLOCK(sc);
 2955 }
 2956 
 2957 
 2958 void
 2959 nfe_tick(void *xsc)
 2960 {
 2961         struct nfe_softc *sc;
 2962         struct mii_data *mii;
 2963         struct ifnet *ifp;
 2964 
 2965         sc = (struct nfe_softc *)xsc;
 2966 
 2967         NFE_LOCK_ASSERT(sc);
 2968 
 2969         ifp = sc->nfe_ifp;
 2970 
 2971         mii = device_get_softc(sc->nfe_miibus);
 2972         mii_tick(mii);
 2973         nfe_stats_update(sc);
 2974         nfe_watchdog(ifp);
 2975         callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
 2976 }
 2977 
 2978 
 2979 static int
 2980 nfe_shutdown(device_t dev)
 2981 {
 2982 
 2983         return (nfe_suspend(dev));
 2984 }
 2985 
 2986 
 2987 static void
 2988 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
 2989 {
 2990         uint32_t val;
 2991 
 2992         if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
 2993                 val = NFE_READ(sc, NFE_MACADDR_LO);
 2994                 addr[0] = (val >> 8) & 0xff;
 2995                 addr[1] = (val & 0xff);
 2996 
 2997                 val = NFE_READ(sc, NFE_MACADDR_HI);
 2998                 addr[2] = (val >> 24) & 0xff;
 2999                 addr[3] = (val >> 16) & 0xff;
 3000                 addr[4] = (val >>  8) & 0xff;
 3001                 addr[5] = (val & 0xff);
 3002         } else {
 3003                 val = NFE_READ(sc, NFE_MACADDR_LO);
 3004                 addr[5] = (val >> 8) & 0xff;
 3005                 addr[4] = (val & 0xff);
 3006 
 3007                 val = NFE_READ(sc, NFE_MACADDR_HI);
 3008                 addr[3] = (val >> 24) & 0xff;
 3009                 addr[2] = (val >> 16) & 0xff;
 3010                 addr[1] = (val >>  8) & 0xff;
 3011                 addr[0] = (val & 0xff);
 3012         }
 3013 }
 3014 
 3015 
 3016 static void
 3017 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
 3018 {
 3019 
 3020         NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
 3021         NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
 3022             addr[1] << 8 | addr[0]);
 3023 }
 3024 
 3025 
 3026 /*
 3027  * Map a single buffer address.
 3028  */
 3029 
 3030 static void
 3031 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 3032 {
 3033         struct nfe_dmamap_arg *ctx;
 3034 
 3035         if (error != 0)
 3036                 return;
 3037 
 3038         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
 3039 
 3040         ctx = (struct nfe_dmamap_arg *)arg;
 3041         ctx->nfe_busaddr = segs[0].ds_addr;
 3042 }
 3043 
 3044 
 3045 static int
 3046 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3047 {
 3048         int error, value;
 3049 
 3050         if (!arg1)
 3051                 return (EINVAL);
 3052         value = *(int *)arg1;
 3053         error = sysctl_handle_int(oidp, &value, 0, req);
 3054         if (error || !req->newptr)
 3055                 return (error);
 3056         if (value < low || value > high)
 3057                 return (EINVAL);
 3058         *(int *)arg1 = value;
 3059 
 3060         return (0);
 3061 }
 3062 
 3063 
 3064 static int
 3065 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
 3066 {
 3067 
 3068         return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
 3069             NFE_PROC_MAX));
 3070 }
 3071 
 3072 
 3073 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d)    \
 3074             SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
 3075 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d)    \
 3076             SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
 3077 
 3078 static void
 3079 nfe_sysctl_node(struct nfe_softc *sc)
 3080 {
 3081         struct sysctl_ctx_list *ctx;
 3082         struct sysctl_oid_list *child, *parent;
 3083         struct sysctl_oid *tree;
 3084         struct nfe_hw_stats *stats;
 3085         int error;
 3086 
 3087         stats = &sc->nfe_stats;
 3088         ctx = device_get_sysctl_ctx(sc->nfe_dev);
 3089         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
 3090         SYSCTL_ADD_PROC(ctx, child,
 3091             OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
 3092             &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
 3093             "max number of Rx events to process");
 3094 
 3095         sc->nfe_process_limit = NFE_PROC_DEFAULT;
 3096         error = resource_int_value(device_get_name(sc->nfe_dev),
 3097             device_get_unit(sc->nfe_dev), "process_limit",
 3098             &sc->nfe_process_limit);
 3099         if (error == 0) {
 3100                 if (sc->nfe_process_limit < NFE_PROC_MIN ||
 3101                     sc->nfe_process_limit > NFE_PROC_MAX) {
 3102                         device_printf(sc->nfe_dev,
 3103                             "process_limit value out of range; "
 3104                             "using default: %d\n", NFE_PROC_DEFAULT);
 3105                         sc->nfe_process_limit = NFE_PROC_DEFAULT;
 3106                 }
 3107         }
 3108 
 3109         if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
 3110                 return;
 3111 
 3112         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
 3113             NULL, "NFE statistics");
 3114         parent = SYSCTL_CHILDREN(tree);
 3115 
 3116         /* Rx statistics. */
 3117         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
 3118             NULL, "Rx MAC statistics");
 3119         child = SYSCTL_CHILDREN(tree);
 3120 
 3121         NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
 3122             &stats->rx_frame_errors, "Framing Errors");
 3123         NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
 3124             &stats->rx_extra_bytes, "Extra Bytes");
 3125         NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
 3126             &stats->rx_late_cols, "Late Collisions");
 3127         NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
 3128             &stats->rx_runts, "Runts");
 3129         NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
 3130             &stats->rx_jumbos, "Jumbos");
 3131         NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
 3132             &stats->rx_fifo_overuns, "FIFO Overruns");
 3133         NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
 3134             &stats->rx_crc_errors, "CRC Errors");
 3135         NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
 3136             &stats->rx_fae, "Frame Alignment Errors");
 3137         NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
 3138             &stats->rx_len_errors, "Length Errors");
 3139         NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
 3140             &stats->rx_unicast, "Unicast Frames");
 3141         NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
 3142             &stats->rx_multicast, "Multicast Frames");
 3143         NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
 3144             &stats->rx_broadcast, "Broadcast Frames");
 3145         if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
 3146                 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
 3147                     &stats->rx_octets, "Octets");
 3148                 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
 3149                     &stats->rx_pause, "Pause frames");
 3150                 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
 3151                     &stats->rx_drops, "Drop frames");
 3152         }
 3153 
 3154         /* Tx statistics. */
 3155         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
 3156             NULL, "Tx MAC statistics");
 3157         child = SYSCTL_CHILDREN(tree);
 3158         NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
 3159             &stats->tx_octets, "Octets");
 3160         NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
 3161             &stats->tx_zero_rexmits, "Zero Retransmits");
 3162         NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
 3163             &stats->tx_one_rexmits, "One Retransmits");
 3164         NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
 3165             &stats->tx_multi_rexmits, "Multiple Retransmits");
 3166         NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
 3167             &stats->tx_late_cols, "Late Collisions");
 3168         NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
 3169             &stats->tx_fifo_underuns, "FIFO Underruns");
 3170         NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
 3171             &stats->tx_carrier_losts, "Carrier Losts");
 3172         NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
 3173             &stats->tx_excess_deferals, "Excess Deferrals");
 3174         NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
 3175             &stats->tx_retry_errors, "Retry Errors");
 3176         if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
 3177                 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
 3178                     &stats->tx_deferals, "Deferrals");
 3179                 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
 3180                     &stats->tx_frames, "Frames");
 3181                 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
 3182                     &stats->tx_pause, "Pause Frames");
 3183         }
 3184         if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
 3185                 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
 3186                     &stats->tx_deferals, "Unicast Frames");
 3187                 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
 3188                     &stats->tx_frames, "Multicast Frames");
 3189                 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
 3190                     &stats->tx_pause, "Broadcast Frames");
 3191         }
 3192 }
 3193 
 3194 #undef NFE_SYSCTL_STAT_ADD32
 3195 #undef NFE_SYSCTL_STAT_ADD64
 3196 
 3197 static void
 3198 nfe_stats_clear(struct nfe_softc *sc)
 3199 {
 3200         int i, mib_cnt;
 3201 
 3202         if ((sc->nfe_flags & NFE_MIB_V1) != 0)
 3203                 mib_cnt = NFE_NUM_MIB_STATV1;
 3204         else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
 3205                 mib_cnt = NFE_NUM_MIB_STATV2;
 3206         else
 3207                 return;
 3208 
 3209         for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
 3210                 NFE_READ(sc, NFE_TX_OCTET + i);
 3211 
 3212         if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
 3213                 NFE_READ(sc, NFE_TX_UNICAST);
 3214                 NFE_READ(sc, NFE_TX_MULTICAST);
 3215                 NFE_READ(sc, NFE_TX_BROADCAST);
 3216         }
 3217 }
 3218 
 3219 static void
 3220 nfe_stats_update(struct nfe_softc *sc)
 3221 {
 3222         struct nfe_hw_stats *stats;
 3223 
 3224         NFE_LOCK_ASSERT(sc);
 3225 
 3226         if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
 3227                 return;
 3228 
 3229         stats = &sc->nfe_stats;
 3230         stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
 3231         stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
 3232         stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
 3233         stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
 3234         stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
 3235         stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
 3236         stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
 3237         stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
 3238         stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
 3239         stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
 3240         stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
 3241         stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
 3242         stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
 3243         stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
 3244         stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
 3245         stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
 3246         stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
 3247         stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
 3248         stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
 3249         stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
 3250         stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
 3251 
 3252         if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
 3253                 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
 3254                 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
 3255                 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
 3256                 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
 3257                 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
 3258                 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
 3259         }
 3260 
 3261         if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
 3262                 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
 3263                 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
 3264                 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
 3265         }
 3266 }
 3267 
 3268 
 3269 static void
 3270 nfe_set_linkspeed(struct nfe_softc *sc)
 3271 {
 3272         struct mii_softc *miisc;
 3273         struct mii_data *mii;
 3274         int aneg, i, phyno;
 3275 
 3276         NFE_LOCK_ASSERT(sc);
 3277 
 3278         mii = device_get_softc(sc->nfe_miibus);
 3279         mii_pollstat(mii);
 3280         aneg = 0;
 3281         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
 3282             (IFM_ACTIVE | IFM_AVALID)) {
 3283                 switch IFM_SUBTYPE(mii->mii_media_active) {
 3284                 case IFM_10_T:
 3285                 case IFM_100_TX:
 3286                         return;
 3287                 case IFM_1000_T:
 3288                         aneg++;
 3289                         break;
 3290                 default:
 3291                         break;
 3292                 }
 3293         }
 3294         miisc = LIST_FIRST(&mii->mii_phys);
 3295         phyno = miisc->mii_phy;
 3296         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 3297                 mii_phy_reset(miisc);
 3298         nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
 3299         nfe_miibus_writereg(sc->nfe_dev, phyno,
 3300             MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
 3301         nfe_miibus_writereg(sc->nfe_dev, phyno,
 3302             MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
 3303         DELAY(1000);
 3304         if (aneg != 0) {
 3305                 /*
 3306                  * Poll link state until nfe(4) get a 10/100Mbps link.
 3307                  */
 3308                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
 3309                         mii_pollstat(mii);
 3310                         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
 3311                             == (IFM_ACTIVE | IFM_AVALID)) {
 3312                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 3313                                 case IFM_10_T:
 3314                                 case IFM_100_TX:
 3315                                         nfe_mac_config(sc, mii);
 3316                                         return;
 3317                                 default:
 3318                                         break;
 3319                                 }
 3320                         }
 3321                         NFE_UNLOCK(sc);
 3322                         pause("nfelnk", hz);
 3323                         NFE_LOCK(sc);
 3324                 }
 3325                 if (i == MII_ANEGTICKS_GIGE)
 3326                         device_printf(sc->nfe_dev,
 3327                             "establishing a link failed, WOL may not work!");
 3328         }
 3329         /*
 3330          * No link, force MAC to have 100Mbps, full-duplex link.
 3331          * This is the last resort and may/may not work.
 3332          */
 3333         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
 3334         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
 3335         nfe_mac_config(sc, mii);
 3336 }
 3337 
 3338 
 3339 static void
 3340 nfe_set_wol(struct nfe_softc *sc)
 3341 {
 3342         struct ifnet *ifp;
 3343         uint32_t wolctl;
 3344         int pmc;
 3345         uint16_t pmstat;
 3346 
 3347         NFE_LOCK_ASSERT(sc);
 3348 
 3349         if (pci_find_extcap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
 3350                 return;
 3351         ifp = sc->nfe_ifp;
 3352         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3353                 wolctl = NFE_WOL_MAGIC;
 3354         else
 3355                 wolctl = 0;
 3356         NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
 3357         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
 3358                 nfe_set_linkspeed(sc);
 3359                 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
 3360                         NFE_WRITE(sc, NFE_PWR2_CTL,
 3361                             NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
 3362                 /* Enable RX. */
 3363                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
 3364                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
 3365                 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
 3366                     NFE_RX_START);
 3367         }
 3368         /* Request PME if WOL is requested. */
 3369         pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
 3370         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 3371         if ((ifp->if_capenable & IFCAP_WOL) != 0)
 3372                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 3373         pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 3374 }

Cache object: 0ac1b20640b0554618fffdcee768362a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.