The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/nfe/if_nfe.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
    5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
    6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
    7  *
    8  * Permission to use, copy, modify, and distribute this software for any
    9  * purpose with or without fee is hereby granted, provided that the above
   10  * copyright notice and this permission notice appear in all copies.
   11  *
   12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   19  */
   20 
   21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
   22 
   23 #include <sys/cdefs.h>
   24 __FBSDID("$FreeBSD$");
   25 
   26 #ifdef HAVE_KERNEL_OPTION_HEADERS
   27 #include "opt_device_polling.h"
   28 #endif
   29 
   30 #include <sys/param.h>
   31 #include <sys/endian.h>
   32 #include <sys/systm.h>
   33 #include <sys/sockio.h>
   34 #include <sys/mbuf.h>
   35 #include <sys/malloc.h>
   36 #include <sys/module.h>
   37 #include <sys/kernel.h>
   38 #include <sys/queue.h>
   39 #include <sys/socket.h>
   40 #include <sys/sysctl.h>
   41 #include <sys/taskqueue.h>
   42 
   43 #include <net/if.h>
   44 #include <net/if_var.h>
   45 #include <net/if_arp.h>
   46 #include <net/ethernet.h>
   47 #include <net/if_dl.h>
   48 #include <net/if_media.h>
   49 #include <net/if_types.h>
   50 #include <net/if_vlan_var.h>
   51 
   52 #include <net/bpf.h>
   53 
   54 #include <machine/bus.h>
   55 #include <machine/resource.h>
   56 #include <sys/bus.h>
   57 #include <sys/rman.h>
   58 
   59 #include <dev/mii/mii.h>
   60 #include <dev/mii/miivar.h>
   61 
   62 #include <dev/pci/pcireg.h>
   63 #include <dev/pci/pcivar.h>
   64 
   65 #include <dev/nfe/if_nfereg.h>
   66 #include <dev/nfe/if_nfevar.h>
   67 
   68 MODULE_DEPEND(nfe, pci, 1, 1, 1);
   69 MODULE_DEPEND(nfe, ether, 1, 1, 1);
   70 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
   71 
   72 /* "device miibus" required.  See GENERIC if you get errors here. */
   73 #include "miibus_if.h"
   74 
   75 static int  nfe_probe(device_t);
   76 static int  nfe_attach(device_t);
   77 static int  nfe_detach(device_t);
   78 static int  nfe_suspend(device_t);
   79 static int  nfe_resume(device_t);
   80 static int nfe_shutdown(device_t);
   81 static int  nfe_can_use_msix(struct nfe_softc *);
   82 static int  nfe_detect_msik9(struct nfe_softc *);
   83 static void nfe_power(struct nfe_softc *);
   84 static int  nfe_miibus_readreg(device_t, int, int);
   85 static int  nfe_miibus_writereg(device_t, int, int, int);
   86 static void nfe_miibus_statchg(device_t);
   87 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
   88 static void nfe_set_intr(struct nfe_softc *);
   89 static __inline void nfe_enable_intr(struct nfe_softc *);
   90 static __inline void nfe_disable_intr(struct nfe_softc *);
   91 static int  nfe_ioctl(if_t, u_long, caddr_t);
   92 static void nfe_alloc_msix(struct nfe_softc *, int);
   93 static int nfe_intr(void *);
   94 static void nfe_int_task(void *, int);
   95 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
   96 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
   97 static int nfe_newbuf(struct nfe_softc *, int);
   98 static int nfe_jnewbuf(struct nfe_softc *, int);
   99 static int  nfe_rxeof(struct nfe_softc *, int, int *);
  100 static int  nfe_jrxeof(struct nfe_softc *, int, int *);
  101 static void nfe_txeof(struct nfe_softc *);
  102 static int  nfe_encap(struct nfe_softc *, struct mbuf **);
  103 static void nfe_setmulti(struct nfe_softc *);
  104 static void nfe_start(if_t);
  105 static void nfe_start_locked(if_t);
  106 static void nfe_watchdog(if_t);
  107 static void nfe_init(void *);
  108 static void nfe_init_locked(void *);
  109 static void nfe_stop(if_t);
  110 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  111 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  112 static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  113 static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  114 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  115 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  116 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  117 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  118 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  119 static int  nfe_ifmedia_upd(if_t);
  120 static void nfe_ifmedia_sts(if_t, struct ifmediareq *);
  121 static void nfe_tick(void *);
  122 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
  123 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
  124 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
  125 
  126 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  127 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
  128 static void nfe_sysctl_node(struct nfe_softc *);
  129 static void nfe_stats_clear(struct nfe_softc *);
  130 static void nfe_stats_update(struct nfe_softc *);
  131 static void nfe_set_linkspeed(struct nfe_softc *);
  132 static void nfe_set_wol(struct nfe_softc *);
  133 
  134 #ifdef NFE_DEBUG
  135 static int nfedebug = 0;
  136 #define DPRINTF(sc, ...)        do {                            \
  137         if (nfedebug)                                           \
  138                 device_printf((sc)->nfe_dev, __VA_ARGS__);      \
  139 } while (0)
  140 #define DPRINTFN(sc, n, ...)    do {                            \
  141         if (nfedebug >= (n))                                    \
  142                 device_printf((sc)->nfe_dev, __VA_ARGS__);      \
  143 } while (0)
  144 #else
  145 #define DPRINTF(sc, ...)
  146 #define DPRINTFN(sc, n, ...)
  147 #endif
  148 
  149 #define NFE_LOCK(_sc)           mtx_lock(&(_sc)->nfe_mtx)
  150 #define NFE_UNLOCK(_sc)         mtx_unlock(&(_sc)->nfe_mtx)
  151 #define NFE_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
  152 
  153 /* Tunables. */
  154 static int msi_disable = 0;
  155 static int msix_disable = 0;
  156 static int jumbo_disable = 0;
  157 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
  158 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
  159 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
  160 
  161 static device_method_t nfe_methods[] = {
  162         /* Device interface */
  163         DEVMETHOD(device_probe,         nfe_probe),
  164         DEVMETHOD(device_attach,        nfe_attach),
  165         DEVMETHOD(device_detach,        nfe_detach),
  166         DEVMETHOD(device_suspend,       nfe_suspend),
  167         DEVMETHOD(device_resume,        nfe_resume),
  168         DEVMETHOD(device_shutdown,      nfe_shutdown),
  169 
  170         /* MII interface */
  171         DEVMETHOD(miibus_readreg,       nfe_miibus_readreg),
  172         DEVMETHOD(miibus_writereg,      nfe_miibus_writereg),
  173         DEVMETHOD(miibus_statchg,       nfe_miibus_statchg),
  174 
  175         DEVMETHOD_END
  176 };
  177 
  178 static driver_t nfe_driver = {
  179         "nfe",
  180         nfe_methods,
  181         sizeof(struct nfe_softc)
  182 };
  183 
  184 DRIVER_MODULE(nfe, pci, nfe_driver, 0, 0);
  185 DRIVER_MODULE(miibus, nfe, miibus_driver, 0, 0);
  186 
  187 static struct nfe_type nfe_devs[] = {
  188         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
  189             "NVIDIA nForce MCP Networking Adapter"},
  190         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
  191             "NVIDIA nForce2 MCP2 Networking Adapter"},
  192         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
  193             "NVIDIA nForce2 400 MCP4 Networking Adapter"},
  194         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
  195             "NVIDIA nForce2 400 MCP5 Networking Adapter"},
  196         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
  197             "NVIDIA nForce3 MCP3 Networking Adapter"},
  198         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
  199             "NVIDIA nForce3 250 MCP6 Networking Adapter"},
  200         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
  201             "NVIDIA nForce3 MCP7 Networking Adapter"},
  202         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
  203             "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
  204         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
  205             "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
  206         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
  207             "NVIDIA nForce MCP04 Networking Adapter"},          /* MCP10 */
  208         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
  209             "NVIDIA nForce MCP04 Networking Adapter"},          /* MCP11 */
  210         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
  211             "NVIDIA nForce 430 MCP12 Networking Adapter"},
  212         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
  213             "NVIDIA nForce 430 MCP13 Networking Adapter"},
  214         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
  215             "NVIDIA nForce MCP55 Networking Adapter"},
  216         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
  217             "NVIDIA nForce MCP55 Networking Adapter"},
  218         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
  219             "NVIDIA nForce MCP61 Networking Adapter"},
  220         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
  221             "NVIDIA nForce MCP61 Networking Adapter"},
  222         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
  223             "NVIDIA nForce MCP61 Networking Adapter"},
  224         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
  225             "NVIDIA nForce MCP61 Networking Adapter"},
  226         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
  227             "NVIDIA nForce MCP65 Networking Adapter"},
  228         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
  229             "NVIDIA nForce MCP65 Networking Adapter"},
  230         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
  231             "NVIDIA nForce MCP65 Networking Adapter"},
  232         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
  233             "NVIDIA nForce MCP65 Networking Adapter"},
  234         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
  235             "NVIDIA nForce MCP67 Networking Adapter"},
  236         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
  237             "NVIDIA nForce MCP67 Networking Adapter"},
  238         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
  239             "NVIDIA nForce MCP67 Networking Adapter"},
  240         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
  241             "NVIDIA nForce MCP67 Networking Adapter"},
  242         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
  243             "NVIDIA nForce MCP73 Networking Adapter"},
  244         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
  245             "NVIDIA nForce MCP73 Networking Adapter"},
  246         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
  247             "NVIDIA nForce MCP73 Networking Adapter"},
  248         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
  249             "NVIDIA nForce MCP73 Networking Adapter"},
  250         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
  251             "NVIDIA nForce MCP77 Networking Adapter"},
  252         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
  253             "NVIDIA nForce MCP77 Networking Adapter"},
  254         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
  255             "NVIDIA nForce MCP77 Networking Adapter"},
  256         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
  257             "NVIDIA nForce MCP77 Networking Adapter"},
  258         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
  259             "NVIDIA nForce MCP79 Networking Adapter"},
  260         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
  261             "NVIDIA nForce MCP79 Networking Adapter"},
  262         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
  263             "NVIDIA nForce MCP79 Networking Adapter"},
  264         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
  265             "NVIDIA nForce MCP79 Networking Adapter"},
  266         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN,
  267             "NVIDIA nForce MCP89 Networking Adapter"},
  268         {0, 0, NULL}
  269 };
  270 
  271 /* Probe for supported hardware ID's */
  272 static int
  273 nfe_probe(device_t dev)
  274 {
  275         struct nfe_type *t;
  276 
  277         t = nfe_devs;
  278         /* Check for matching PCI DEVICE ID's */
  279         while (t->name != NULL) {
  280                 if ((pci_get_vendor(dev) == t->vid_id) &&
  281                     (pci_get_device(dev) == t->dev_id)) {
  282                         device_set_desc(dev, t->name);
  283                         return (BUS_PROBE_DEFAULT);
  284                 }
  285                 t++;
  286         }
  287 
  288         return (ENXIO);
  289 }
  290 
  291 static void
  292 nfe_alloc_msix(struct nfe_softc *sc, int count)
  293 {
  294         int rid;
  295 
  296         rid = PCIR_BAR(2);
  297         sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
  298             &rid, RF_ACTIVE);
  299         if (sc->nfe_msix_res == NULL) {
  300                 device_printf(sc->nfe_dev,
  301                     "couldn't allocate MSIX table resource\n");
  302                 return;
  303         }
  304         rid = PCIR_BAR(3);
  305         sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
  306             SYS_RES_MEMORY, &rid, RF_ACTIVE);
  307         if (sc->nfe_msix_pba_res == NULL) {
  308                 device_printf(sc->nfe_dev,
  309                     "couldn't allocate MSIX PBA resource\n");
  310                 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
  311                     sc->nfe_msix_res);
  312                 sc->nfe_msix_res = NULL;
  313                 return;
  314         }
  315 
  316         if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
  317                 if (count == NFE_MSI_MESSAGES) {
  318                         if (bootverbose)
  319                                 device_printf(sc->nfe_dev,
  320                                     "Using %d MSIX messages\n", count);
  321                         sc->nfe_msix = 1;
  322                 } else {
  323                         if (bootverbose)
  324                                 device_printf(sc->nfe_dev,
  325                                     "couldn't allocate MSIX\n");
  326                         pci_release_msi(sc->nfe_dev);
  327                         bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
  328                             PCIR_BAR(3), sc->nfe_msix_pba_res);
  329                         bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
  330                             PCIR_BAR(2), sc->nfe_msix_res);
  331                         sc->nfe_msix_pba_res = NULL;
  332                         sc->nfe_msix_res = NULL;
  333                 }
  334         }
  335 }
  336 
  337 static int
  338 nfe_detect_msik9(struct nfe_softc *sc)
  339 {
  340         static const char *maker = "MSI";
  341         static const char *product = "K9N6PGM2-V2 (MS-7309)";
  342         char *m, *p;
  343         int found;
  344 
  345         found = 0;
  346         m = kern_getenv("smbios.planar.maker");
  347         p = kern_getenv("smbios.planar.product");
  348         if (m != NULL && p != NULL) {
  349                 if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
  350                         found = 1;
  351         }
  352         if (m != NULL)
  353                 freeenv(m);
  354         if (p != NULL)
  355                 freeenv(p);
  356 
  357         return (found);
  358 }
  359 
  360 static int
  361 nfe_attach(device_t dev)
  362 {
  363         struct nfe_softc *sc;
  364         if_t ifp;
  365         bus_addr_t dma_addr_max;
  366         int error = 0, i, msic, phyloc, reg, rid;
  367 
  368         sc = device_get_softc(dev);
  369         sc->nfe_dev = dev;
  370 
  371         mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  372             MTX_DEF);
  373         callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
  374 
  375         pci_enable_busmaster(dev);
  376 
  377         rid = PCIR_BAR(0);
  378         sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  379             RF_ACTIVE);
  380         if (sc->nfe_res[0] == NULL) {
  381                 device_printf(dev, "couldn't map memory resources\n");
  382                 mtx_destroy(&sc->nfe_mtx);
  383                 return (ENXIO);
  384         }
  385 
  386         if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
  387                 uint16_t v, width;
  388 
  389                 v = pci_read_config(dev, reg + 0x08, 2);
  390                 /* Change max. read request size to 4096. */
  391                 v &= ~(7 << 12);
  392                 v |= (5 << 12);
  393                 pci_write_config(dev, reg + 0x08, v, 2);
  394 
  395                 v = pci_read_config(dev, reg + 0x0c, 2);
  396                 /* link capability */
  397                 v = (v >> 4) & 0x0f;
  398                 width = pci_read_config(dev, reg + 0x12, 2);
  399                 /* negotiated link width */
  400                 width = (width >> 4) & 0x3f;
  401                 if (v != width)
  402                         device_printf(sc->nfe_dev,
  403                             "warning, negotiated width of link(x%d) != "
  404                             "max. width of link(x%d)\n", width, v);
  405         }
  406 
  407         if (nfe_can_use_msix(sc) == 0) {
  408                 device_printf(sc->nfe_dev,
  409                     "MSI/MSI-X capability black-listed, will use INTx\n"); 
  410                 msix_disable = 1;
  411                 msi_disable = 1;
  412         }
  413 
  414         /* Allocate interrupt */
  415         if (msix_disable == 0 || msi_disable == 0) {
  416                 if (msix_disable == 0 &&
  417                     (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
  418                         nfe_alloc_msix(sc, msic);
  419                 if (msi_disable == 0 && sc->nfe_msix == 0 &&
  420                     (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
  421                     pci_alloc_msi(dev, &msic) == 0) {
  422                         if (msic == NFE_MSI_MESSAGES) {
  423                                 if (bootverbose)
  424                                         device_printf(dev,
  425                                             "Using %d MSI messages\n", msic);
  426                                 sc->nfe_msi = 1;
  427                         } else
  428                                 pci_release_msi(dev);
  429                 }
  430         }
  431 
  432         if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
  433                 rid = 0;
  434                 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  435                     RF_SHAREABLE | RF_ACTIVE);
  436                 if (sc->nfe_irq[0] == NULL) {
  437                         device_printf(dev, "couldn't allocate IRQ resources\n");
  438                         error = ENXIO;
  439                         goto fail;
  440                 }
  441         } else {
  442                 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
  443                         sc->nfe_irq[i] = bus_alloc_resource_any(dev,
  444                             SYS_RES_IRQ, &rid, RF_ACTIVE);
  445                         if (sc->nfe_irq[i] == NULL) {
  446                                 device_printf(dev,
  447                                     "couldn't allocate IRQ resources for "
  448                                     "message %d\n", rid);
  449                                 error = ENXIO;
  450                                 goto fail;
  451                         }
  452                 }
  453                 /* Map interrupts to vector 0. */
  454                 if (sc->nfe_msix != 0) {
  455                         NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
  456                         NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
  457                 } else if (sc->nfe_msi != 0) {
  458                         NFE_WRITE(sc, NFE_MSI_MAP0, 0);
  459                         NFE_WRITE(sc, NFE_MSI_MAP1, 0);
  460                 }
  461         }
  462 
  463         /* Set IRQ status/mask register. */
  464         sc->nfe_irq_status = NFE_IRQ_STATUS;
  465         sc->nfe_irq_mask = NFE_IRQ_MASK;
  466         sc->nfe_intrs = NFE_IRQ_WANTED;
  467         sc->nfe_nointrs = 0;
  468         if (sc->nfe_msix != 0) {
  469                 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
  470                 sc->nfe_nointrs = NFE_IRQ_WANTED;
  471         } else if (sc->nfe_msi != 0) {
  472                 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
  473                 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
  474         }
  475 
  476         sc->nfe_devid = pci_get_device(dev);
  477         sc->nfe_revid = pci_get_revid(dev);
  478         sc->nfe_flags = 0;
  479 
  480         switch (sc->nfe_devid) {
  481         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
  482         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
  483         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
  484         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
  485                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
  486                 break;
  487         case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
  488         case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
  489                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
  490                 break;
  491         case PCI_PRODUCT_NVIDIA_CK804_LAN1:
  492         case PCI_PRODUCT_NVIDIA_CK804_LAN2:
  493         case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
  494         case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
  495                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  496                     NFE_MIB_V1;
  497                 break;
  498         case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
  499         case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
  500                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  501                     NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
  502                 break;
  503 
  504         case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
  505         case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
  506         case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
  507         case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
  508         case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
  509         case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
  510         case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
  511         case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
  512         case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
  513         case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
  514         case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
  515         case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
  516                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
  517                     NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
  518                 break;
  519         case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
  520         case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
  521         case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
  522         case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
  523                 /* XXX flow control */
  524                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
  525                     NFE_CORRECT_MACADDR | NFE_MIB_V3;
  526                 break;
  527         case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
  528         case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
  529         case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
  530         case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
  531         case PCI_PRODUCT_NVIDIA_MCP89_LAN:
  532                 /* XXX flow control */
  533                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  534                     NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
  535                 break;
  536         case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
  537         case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
  538         case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
  539         case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
  540                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
  541                     NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
  542                     NFE_MIB_V2;
  543                 break;
  544         }
  545 
  546         nfe_power(sc);
  547         /* Check for reversed ethernet address */
  548         if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
  549                 sc->nfe_flags |= NFE_CORRECT_MACADDR;
  550         nfe_get_macaddr(sc, sc->eaddr);
  551         /*
  552          * Allocate the parent bus DMA tag appropriate for PCI.
  553          */
  554         dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
  555         if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
  556                 dma_addr_max = NFE_DMA_MAXADDR;
  557         error = bus_dma_tag_create(
  558             bus_get_dma_tag(sc->nfe_dev),       /* parent */
  559             1, 0,                               /* alignment, boundary */
  560             dma_addr_max,                       /* lowaddr */
  561             BUS_SPACE_MAXADDR,                  /* highaddr */
  562             NULL, NULL,                         /* filter, filterarg */
  563             BUS_SPACE_MAXSIZE_32BIT, 0,         /* maxsize, nsegments */
  564             BUS_SPACE_MAXSIZE_32BIT,            /* maxsegsize */
  565             0,                                  /* flags */
  566             NULL, NULL,                         /* lockfunc, lockarg */
  567             &sc->nfe_parent_tag);
  568         if (error)
  569                 goto fail;
  570 
  571         ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
  572         if (ifp == NULL) {
  573                 device_printf(dev, "can not if_gethandle()\n");
  574                 error = ENOSPC;
  575                 goto fail;
  576         }
  577 
  578         /*
  579          * Allocate Tx and Rx rings.
  580          */
  581         if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
  582                 goto fail;
  583 
  584         if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
  585                 goto fail;
  586 
  587         nfe_alloc_jrx_ring(sc, &sc->jrxq);
  588         /* Create sysctl node. */
  589         nfe_sysctl_node(sc);
  590 
  591         if_setsoftc(ifp, sc);
  592         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  593         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
  594         if_setioctlfn(ifp, nfe_ioctl);
  595         if_setstartfn(ifp, nfe_start);
  596         if_sethwassist(ifp, 0);
  597         if_setcapabilities(ifp, 0);
  598         if_setinitfn(ifp, nfe_init);
  599         if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1);
  600         if_setsendqready(ifp);
  601 
  602         if (sc->nfe_flags & NFE_HW_CSUM) {
  603                 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
  604                 if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0);
  605         }
  606         if_setcapenable(ifp, if_getcapabilities(ifp));
  607 
  608         sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
  609         /* VLAN capability setup. */
  610         if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
  611         if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
  612                 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
  613                 if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0)
  614                         if_setcapabilitiesbit(ifp,
  615                             (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
  616         }
  617 
  618         if (pci_find_cap(dev, PCIY_PMG, &reg) == 0)
  619                 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
  620         if_setcapenable(ifp, if_getcapabilities(ifp));
  621 
  622         /*
  623          * Tell the upper layer(s) we support long frames.
  624          * Must appear after the call to ether_ifattach() because
  625          * ether_ifattach() sets ifi_hdrlen to the default value.
  626          */
  627         if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
  628 
  629 #ifdef DEVICE_POLLING
  630         if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
  631 #endif
  632 
  633         /* Do MII setup */
  634         phyloc = MII_PHY_ANY;
  635         if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
  636             sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
  637             sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
  638             sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
  639                 if (nfe_detect_msik9(sc) != 0)
  640                         phyloc = 0;
  641         }
  642         error = mii_attach(dev, &sc->nfe_miibus, ifp,
  643             (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts,
  644             BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE);
  645         if (error != 0) {
  646                 device_printf(dev, "attaching PHYs failed\n");
  647                 goto fail;
  648         }
  649         ether_ifattach(ifp, sc->eaddr);
  650 
  651         NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
  652         sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
  653             taskqueue_thread_enqueue, &sc->nfe_tq);
  654         taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
  655             device_get_nameunit(sc->nfe_dev));
  656         error = 0;
  657         if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
  658                 error = bus_setup_intr(dev, sc->nfe_irq[0],
  659                     INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
  660                     &sc->nfe_intrhand[0]);
  661         } else {
  662                 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
  663                         error = bus_setup_intr(dev, sc->nfe_irq[i],
  664                             INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
  665                             &sc->nfe_intrhand[i]);
  666                         if (error != 0)
  667                                 break;
  668                 }
  669         }
  670         if (error) {
  671                 device_printf(dev, "couldn't set up irq\n");
  672                 taskqueue_free(sc->nfe_tq);
  673                 sc->nfe_tq = NULL;
  674                 ether_ifdetach(ifp);
  675                 goto fail;
  676         }
  677 
  678 fail:
  679         if (error)
  680                 nfe_detach(dev);
  681 
  682         return (error);
  683 }
  684 
  685 static int
  686 nfe_detach(device_t dev)
  687 {
  688         struct nfe_softc *sc;
  689         if_t ifp;
  690         uint8_t eaddr[ETHER_ADDR_LEN];
  691         int i, rid;
  692 
  693         sc = device_get_softc(dev);
  694         KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
  695         ifp = sc->nfe_ifp;
  696 
  697 #ifdef DEVICE_POLLING
  698         if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
  699                 ether_poll_deregister(ifp);
  700 #endif
  701         if (device_is_attached(dev)) {
  702                 NFE_LOCK(sc);
  703                 nfe_stop(ifp);
  704                 if_setflagbits(ifp, 0, IFF_UP);
  705                 NFE_UNLOCK(sc);
  706                 callout_drain(&sc->nfe_stat_ch);
  707                 ether_ifdetach(ifp);
  708         }
  709 
  710         if (ifp) {
  711                 /* restore ethernet address */
  712                 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
  713                         for (i = 0; i < ETHER_ADDR_LEN; i++) {
  714                                 eaddr[i] = sc->eaddr[5 - i];
  715                         }
  716                 } else
  717                         bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
  718                 nfe_set_macaddr(sc, eaddr);
  719                 if_free(ifp);
  720         }
  721         if (sc->nfe_miibus)
  722                 device_delete_child(dev, sc->nfe_miibus);
  723         bus_generic_detach(dev);
  724         if (sc->nfe_tq != NULL) {
  725                 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
  726                 taskqueue_free(sc->nfe_tq);
  727                 sc->nfe_tq = NULL;
  728         }
  729 
  730         for (i = 0; i < NFE_MSI_MESSAGES; i++) {
  731                 if (sc->nfe_intrhand[i] != NULL) {
  732                         bus_teardown_intr(dev, sc->nfe_irq[i],
  733                             sc->nfe_intrhand[i]);
  734                         sc->nfe_intrhand[i] = NULL;
  735                 }
  736         }
  737 
  738         if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
  739                 if (sc->nfe_irq[0] != NULL)
  740                         bus_release_resource(dev, SYS_RES_IRQ, 0,
  741                             sc->nfe_irq[0]);
  742         } else {
  743                 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
  744                         if (sc->nfe_irq[i] != NULL) {
  745                                 bus_release_resource(dev, SYS_RES_IRQ, rid,
  746                                     sc->nfe_irq[i]);
  747                                 sc->nfe_irq[i] = NULL;
  748                         }
  749                 }
  750                 pci_release_msi(dev);
  751         }
  752         if (sc->nfe_msix_pba_res != NULL) {
  753                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
  754                     sc->nfe_msix_pba_res);
  755                 sc->nfe_msix_pba_res = NULL;
  756         }
  757         if (sc->nfe_msix_res != NULL) {
  758                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
  759                     sc->nfe_msix_res);
  760                 sc->nfe_msix_res = NULL;
  761         }
  762         if (sc->nfe_res[0] != NULL) {
  763                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
  764                     sc->nfe_res[0]);
  765                 sc->nfe_res[0] = NULL;
  766         }
  767 
  768         nfe_free_tx_ring(sc, &sc->txq);
  769         nfe_free_rx_ring(sc, &sc->rxq);
  770         nfe_free_jrx_ring(sc, &sc->jrxq);
  771 
  772         if (sc->nfe_parent_tag) {
  773                 bus_dma_tag_destroy(sc->nfe_parent_tag);
  774                 sc->nfe_parent_tag = NULL;
  775         }
  776 
  777         mtx_destroy(&sc->nfe_mtx);
  778 
  779         return (0);
  780 }
  781 
  782 static int
  783 nfe_suspend(device_t dev)
  784 {
  785         struct nfe_softc *sc;
  786 
  787         sc = device_get_softc(dev);
  788 
  789         NFE_LOCK(sc);
  790         nfe_stop(sc->nfe_ifp);
  791         nfe_set_wol(sc);
  792         sc->nfe_suspended = 1;
  793         NFE_UNLOCK(sc);
  794 
  795         return (0);
  796 }
  797 
  798 static int
  799 nfe_resume(device_t dev)
  800 {
  801         struct nfe_softc *sc;
  802         if_t ifp;
  803 
  804         sc = device_get_softc(dev);
  805 
  806         NFE_LOCK(sc);
  807         nfe_power(sc);
  808         ifp = sc->nfe_ifp;
  809         if (if_getflags(ifp) & IFF_UP)
  810                 nfe_init_locked(sc);
  811         sc->nfe_suspended = 0;
  812         NFE_UNLOCK(sc);
  813 
  814         return (0);
  815 }
  816 
  817 static int
  818 nfe_can_use_msix(struct nfe_softc *sc)
  819 {
  820         static struct msix_blacklist {
  821                 char    *maker;
  822                 char    *product;
  823         } msix_blacklists[] = {
  824                 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
  825         };
  826 
  827         struct msix_blacklist *mblp;
  828         char *maker, *product;
  829         int count, n, use_msix;
  830 
  831         /*
  832          * Search base board manufacturer and product name table
  833          * to see this system has a known MSI/MSI-X issue.
  834          */
  835         maker = kern_getenv("smbios.planar.maker");
  836         product = kern_getenv("smbios.planar.product");
  837         use_msix = 1;
  838         if (maker != NULL && product != NULL) {
  839                 count = nitems(msix_blacklists);
  840                 mblp = msix_blacklists;
  841                 for (n = 0; n < count; n++) {
  842                         if (strcmp(maker, mblp->maker) == 0 &&
  843                             strcmp(product, mblp->product) == 0) {
  844                                 use_msix = 0;
  845                                 break;
  846                         }
  847                         mblp++;
  848                 }
  849         }
  850         if (maker != NULL)
  851                 freeenv(maker);
  852         if (product != NULL)
  853                 freeenv(product);
  854 
  855         return (use_msix);
  856 }
  857 
  858 /* Take PHY/NIC out of powerdown, from Linux */
  859 static void
  860 nfe_power(struct nfe_softc *sc)
  861 {
  862         uint32_t pwr;
  863 
  864         if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
  865                 return;
  866         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
  867         NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
  868         DELAY(100);
  869         NFE_WRITE(sc, NFE_MAC_RESET, 0);
  870         DELAY(100);
  871         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
  872         pwr = NFE_READ(sc, NFE_PWR2_CTL);
  873         pwr &= ~NFE_PWR2_WAKEUP_MASK;
  874         if (sc->nfe_revid >= 0xa3 &&
  875             (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
  876             sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
  877                 pwr |= NFE_PWR2_REVA3;
  878         NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
  879 }
  880 
  881 static void
  882 nfe_miibus_statchg(device_t dev)
  883 {
  884         struct nfe_softc *sc;
  885         struct mii_data *mii;
  886         if_t ifp;
  887         uint32_t rxctl, txctl;
  888 
  889         sc = device_get_softc(dev);
  890 
  891         mii = device_get_softc(sc->nfe_miibus);
  892         ifp = sc->nfe_ifp;
  893 
  894         sc->nfe_link = 0;
  895         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
  896             (IFM_ACTIVE | IFM_AVALID)) {
  897                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  898                 case IFM_10_T:
  899                 case IFM_100_TX:
  900                 case IFM_1000_T:
  901                         sc->nfe_link = 1;
  902                         break;
  903                 default:
  904                         break;
  905                 }
  906         }
  907 
  908         nfe_mac_config(sc, mii);
  909         txctl = NFE_READ(sc, NFE_TX_CTL);
  910         rxctl = NFE_READ(sc, NFE_RX_CTL);
  911         if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
  912                 txctl |= NFE_TX_START;
  913                 rxctl |= NFE_RX_START;
  914         } else {
  915                 txctl &= ~NFE_TX_START;
  916                 rxctl &= ~NFE_RX_START;
  917         }
  918         NFE_WRITE(sc, NFE_TX_CTL, txctl);
  919         NFE_WRITE(sc, NFE_RX_CTL, rxctl);
  920 }
  921 
  922 static void
  923 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
  924 {
  925         uint32_t link, misc, phy, seed;
  926         uint32_t val;
  927 
  928         NFE_LOCK_ASSERT(sc);
  929 
  930         phy = NFE_READ(sc, NFE_PHY_IFACE);
  931         phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
  932 
  933         seed = NFE_READ(sc, NFE_RNDSEED);
  934         seed &= ~NFE_SEED_MASK;
  935 
  936         misc = NFE_MISC1_MAGIC;
  937         link = NFE_MEDIA_SET;
  938 
  939         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
  940                 phy  |= NFE_PHY_HDX;    /* half-duplex */
  941                 misc |= NFE_MISC1_HDX;
  942         }
  943 
  944         switch (IFM_SUBTYPE(mii->mii_media_active)) {
  945         case IFM_1000_T:        /* full-duplex only */
  946                 link |= NFE_MEDIA_1000T;
  947                 seed |= NFE_SEED_1000T;
  948                 phy  |= NFE_PHY_1000T;
  949                 break;
  950         case IFM_100_TX:
  951                 link |= NFE_MEDIA_100TX;
  952                 seed |= NFE_SEED_100TX;
  953                 phy  |= NFE_PHY_100TX;
  954                 break;
  955         case IFM_10_T:
  956                 link |= NFE_MEDIA_10T;
  957                 seed |= NFE_SEED_10T;
  958                 break;
  959         }
  960 
  961         if ((phy & 0x10000000) != 0) {
  962                 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
  963                         val = NFE_R1_MAGIC_1000;
  964                 else
  965                         val = NFE_R1_MAGIC_10_100;
  966         } else
  967                 val = NFE_R1_MAGIC_DEFAULT;
  968         NFE_WRITE(sc, NFE_SETUP_R1, val);
  969 
  970         NFE_WRITE(sc, NFE_RNDSEED, seed);       /* XXX: gigabit NICs only? */
  971 
  972         NFE_WRITE(sc, NFE_PHY_IFACE, phy);
  973         NFE_WRITE(sc, NFE_MISC1, misc);
  974         NFE_WRITE(sc, NFE_LINKSPEED, link);
  975 
  976         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  977                 /* It seems all hardwares supports Rx pause frames. */
  978                 val = NFE_READ(sc, NFE_RXFILTER);
  979                 if ((IFM_OPTIONS(mii->mii_media_active) &
  980                     IFM_ETH_RXPAUSE) != 0)
  981                         val |= NFE_PFF_RX_PAUSE;
  982                 else
  983                         val &= ~NFE_PFF_RX_PAUSE;
  984                 NFE_WRITE(sc, NFE_RXFILTER, val);
  985                 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
  986                         val = NFE_READ(sc, NFE_MISC1);
  987                         if ((IFM_OPTIONS(mii->mii_media_active) &
  988                             IFM_ETH_TXPAUSE) != 0) {
  989                                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  990                                     NFE_TX_PAUSE_FRAME_ENABLE);
  991                                 val |= NFE_MISC1_TX_PAUSE;
  992                         } else {
  993                                 val &= ~NFE_MISC1_TX_PAUSE;
  994                                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  995                                     NFE_TX_PAUSE_FRAME_DISABLE);
  996                         }
  997                         NFE_WRITE(sc, NFE_MISC1, val);
  998                 }
  999         } else {
 1000                 /* disable rx/tx pause frames */
 1001                 val = NFE_READ(sc, NFE_RXFILTER);
 1002                 val &= ~NFE_PFF_RX_PAUSE;
 1003                 NFE_WRITE(sc, NFE_RXFILTER, val);
 1004                 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
 1005                         NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
 1006                             NFE_TX_PAUSE_FRAME_DISABLE);
 1007                         val = NFE_READ(sc, NFE_MISC1);
 1008                         val &= ~NFE_MISC1_TX_PAUSE;
 1009                         NFE_WRITE(sc, NFE_MISC1, val);
 1010                 }
 1011         }
 1012 }
 1013 
 1014 static int
 1015 nfe_miibus_readreg(device_t dev, int phy, int reg)
 1016 {
 1017         struct nfe_softc *sc = device_get_softc(dev);
 1018         uint32_t val;
 1019         int ntries;
 1020 
 1021         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 1022 
 1023         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
 1024                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
 1025                 DELAY(100);
 1026         }
 1027 
 1028         NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
 1029 
 1030         for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
 1031                 DELAY(100);
 1032                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
 1033                         break;
 1034         }
 1035         if (ntries == NFE_TIMEOUT) {
 1036                 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
 1037                 return 0;
 1038         }
 1039 
 1040         if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
 1041                 DPRINTFN(sc, 2, "could not read PHY\n");
 1042                 return 0;
 1043         }
 1044 
 1045         val = NFE_READ(sc, NFE_PHY_DATA);
 1046         if (val != 0xffffffff && val != 0)
 1047                 sc->mii_phyaddr = phy;
 1048 
 1049         DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
 1050 
 1051         return (val);
 1052 }
 1053 
 1054 static int
 1055 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
 1056 {
 1057         struct nfe_softc *sc = device_get_softc(dev);
 1058         uint32_t ctl;
 1059         int ntries;
 1060 
 1061         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 1062 
 1063         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
 1064                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
 1065                 DELAY(100);
 1066         }
 1067 
 1068         NFE_WRITE(sc, NFE_PHY_DATA, val);
 1069         ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
 1070         NFE_WRITE(sc, NFE_PHY_CTL, ctl);
 1071 
 1072         for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
 1073                 DELAY(100);
 1074                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
 1075                         break;
 1076         }
 1077 #ifdef NFE_DEBUG
 1078         if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
 1079                 device_printf(sc->nfe_dev, "could not write to PHY\n");
 1080 #endif
 1081         return (0);
 1082 }
 1083 
 1084 struct nfe_dmamap_arg {
 1085         bus_addr_t nfe_busaddr;
 1086 };
 1087 
 1088 static int
 1089 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
 1090 {
 1091         struct nfe_dmamap_arg ctx;
 1092         struct nfe_rx_data *data;
 1093         void *desc;
 1094         int i, error, descsize;
 1095 
 1096         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1097                 desc = ring->desc64;
 1098                 descsize = sizeof (struct nfe_desc64);
 1099         } else {
 1100                 desc = ring->desc32;
 1101                 descsize = sizeof (struct nfe_desc32);
 1102         }
 1103 
 1104         ring->cur = ring->next = 0;
 1105 
 1106         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1107             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
 1108             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1109             BUS_SPACE_MAXADDR,                  /* highaddr */
 1110             NULL, NULL,                         /* filter, filterarg */
 1111             NFE_RX_RING_COUNT * descsize, 1,    /* maxsize, nsegments */
 1112             NFE_RX_RING_COUNT * descsize,       /* maxsegsize */
 1113             0,                                  /* flags */
 1114             NULL, NULL,                         /* lockfunc, lockarg */
 1115             &ring->rx_desc_tag);
 1116         if (error != 0) {
 1117                 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
 1118                 goto fail;
 1119         }
 1120 
 1121         /* allocate memory to desc */
 1122         error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
 1123             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
 1124         if (error != 0) {
 1125                 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
 1126                 goto fail;
 1127         }
 1128         if (sc->nfe_flags & NFE_40BIT_ADDR)
 1129                 ring->desc64 = desc;
 1130         else
 1131                 ring->desc32 = desc;
 1132 
 1133         /* map desc to device visible address space */
 1134         ctx.nfe_busaddr = 0;
 1135         error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
 1136             NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
 1137         if (error != 0) {
 1138                 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
 1139                 goto fail;
 1140         }
 1141         ring->physaddr = ctx.nfe_busaddr;
 1142 
 1143         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1144             1, 0,                       /* alignment, boundary */
 1145             BUS_SPACE_MAXADDR,          /* lowaddr */
 1146             BUS_SPACE_MAXADDR,          /* highaddr */
 1147             NULL, NULL,                 /* filter, filterarg */
 1148             MCLBYTES, 1,                /* maxsize, nsegments */
 1149             MCLBYTES,                   /* maxsegsize */
 1150             0,                          /* flags */
 1151             NULL, NULL,                 /* lockfunc, lockarg */
 1152             &ring->rx_data_tag);
 1153         if (error != 0) {
 1154                 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
 1155                 goto fail;
 1156         }
 1157 
 1158         error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
 1159         if (error != 0) {
 1160                 device_printf(sc->nfe_dev,
 1161                     "could not create Rx DMA spare map\n");
 1162                 goto fail;
 1163         }
 1164 
 1165         /*
 1166          * Pre-allocate Rx buffers and populate Rx ring.
 1167          */
 1168         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 1169                 data = &sc->rxq.data[i];
 1170                 data->rx_data_map = NULL;
 1171                 data->m = NULL;
 1172                 error = bus_dmamap_create(ring->rx_data_tag, 0,
 1173                     &data->rx_data_map);
 1174                 if (error != 0) {
 1175                         device_printf(sc->nfe_dev,
 1176                             "could not create Rx DMA map\n");
 1177                         goto fail;
 1178                 }
 1179         }
 1180 
 1181 fail:
 1182         return (error);
 1183 }
 1184 
 1185 static void
 1186 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
 1187 {
 1188         struct nfe_dmamap_arg ctx;
 1189         struct nfe_rx_data *data;
 1190         void *desc;
 1191         int i, error, descsize;
 1192 
 1193         if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
 1194                 return;
 1195         if (jumbo_disable != 0) {
 1196                 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
 1197                 sc->nfe_jumbo_disable = 1;
 1198                 return;
 1199         }
 1200 
 1201         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1202                 desc = ring->jdesc64;
 1203                 descsize = sizeof (struct nfe_desc64);
 1204         } else {
 1205                 desc = ring->jdesc32;
 1206                 descsize = sizeof (struct nfe_desc32);
 1207         }
 1208 
 1209         ring->jcur = ring->jnext = 0;
 1210 
 1211         /* Create DMA tag for jumbo Rx ring. */
 1212         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1213             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
 1214             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1215             BUS_SPACE_MAXADDR,                  /* highaddr */
 1216             NULL, NULL,                         /* filter, filterarg */
 1217             NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
 1218             1,                                  /* nsegments */
 1219             NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
 1220             0,                                  /* flags */
 1221             NULL, NULL,                         /* lockfunc, lockarg */
 1222             &ring->jrx_desc_tag);
 1223         if (error != 0) {
 1224                 device_printf(sc->nfe_dev,
 1225                     "could not create jumbo ring DMA tag\n");
 1226                 goto fail;
 1227         }
 1228 
 1229         /* Create DMA tag for jumbo Rx buffers. */
 1230         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1231             1, 0,                               /* alignment, boundary */
 1232             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1233             BUS_SPACE_MAXADDR,                  /* highaddr */
 1234             NULL, NULL,                         /* filter, filterarg */
 1235             MJUM9BYTES,                         /* maxsize */
 1236             1,                                  /* nsegments */
 1237             MJUM9BYTES,                         /* maxsegsize */
 1238             0,                                  /* flags */
 1239             NULL, NULL,                         /* lockfunc, lockarg */
 1240             &ring->jrx_data_tag);
 1241         if (error != 0) {
 1242                 device_printf(sc->nfe_dev,
 1243                     "could not create jumbo Rx buffer DMA tag\n");
 1244                 goto fail;
 1245         }
 1246 
 1247         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 1248         error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
 1249             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
 1250         if (error != 0) {
 1251                 device_printf(sc->nfe_dev,
 1252                     "could not allocate DMA'able memory for jumbo Rx ring\n");
 1253                 goto fail;
 1254         }
 1255         if (sc->nfe_flags & NFE_40BIT_ADDR)
 1256                 ring->jdesc64 = desc;
 1257         else
 1258                 ring->jdesc32 = desc;
 1259 
 1260         ctx.nfe_busaddr = 0;
 1261         error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
 1262             NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
 1263         if (error != 0) {
 1264                 device_printf(sc->nfe_dev,
 1265                     "could not load DMA'able memory for jumbo Rx ring\n");
 1266                 goto fail;
 1267         }
 1268         ring->jphysaddr = ctx.nfe_busaddr;
 1269 
 1270         /* Create DMA maps for jumbo Rx buffers. */
 1271         error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
 1272         if (error != 0) {
 1273                 device_printf(sc->nfe_dev,
 1274                     "could not create jumbo Rx DMA spare map\n");
 1275                 goto fail;
 1276         }
 1277 
 1278         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 1279                 data = &sc->jrxq.jdata[i];
 1280                 data->rx_data_map = NULL;
 1281                 data->m = NULL;
 1282                 error = bus_dmamap_create(ring->jrx_data_tag, 0,
 1283                     &data->rx_data_map);
 1284                 if (error != 0) {
 1285                         device_printf(sc->nfe_dev,
 1286                             "could not create jumbo Rx DMA map\n");
 1287                         goto fail;
 1288                 }
 1289         }
 1290 
 1291         return;
 1292 
 1293 fail:
 1294         /*
 1295          * Running without jumbo frame support is ok for most cases
 1296          * so don't fail on creating dma tag/map for jumbo frame.
 1297          */
 1298         nfe_free_jrx_ring(sc, ring);
 1299         device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
 1300             "resource shortage\n");
 1301         sc->nfe_jumbo_disable = 1;
 1302 }
 1303 
 1304 static int
 1305 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
 1306 {
 1307         void *desc;
 1308         size_t descsize;
 1309         int i;
 1310 
 1311         ring->cur = ring->next = 0;
 1312         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1313                 desc = ring->desc64;
 1314                 descsize = sizeof (struct nfe_desc64);
 1315         } else {
 1316                 desc = ring->desc32;
 1317                 descsize = sizeof (struct nfe_desc32);
 1318         }
 1319         bzero(desc, descsize * NFE_RX_RING_COUNT);
 1320         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 1321                 if (nfe_newbuf(sc, i) != 0)
 1322                         return (ENOBUFS);
 1323         }
 1324 
 1325         bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
 1326             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1327 
 1328         return (0);
 1329 }
 1330 
 1331 static int
 1332 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
 1333 {
 1334         void *desc;
 1335         size_t descsize;
 1336         int i;
 1337 
 1338         ring->jcur = ring->jnext = 0;
 1339         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1340                 desc = ring->jdesc64;
 1341                 descsize = sizeof (struct nfe_desc64);
 1342         } else {
 1343                 desc = ring->jdesc32;
 1344                 descsize = sizeof (struct nfe_desc32);
 1345         }
 1346         bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
 1347         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 1348                 if (nfe_jnewbuf(sc, i) != 0)
 1349                         return (ENOBUFS);
 1350         }
 1351 
 1352         bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
 1353             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1354 
 1355         return (0);
 1356 }
 1357 
 1358 static void
 1359 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
 1360 {
 1361         struct nfe_rx_data *data;
 1362         void *desc;
 1363         int i;
 1364 
 1365         if (sc->nfe_flags & NFE_40BIT_ADDR)
 1366                 desc = ring->desc64;
 1367         else
 1368                 desc = ring->desc32;
 1369 
 1370         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 1371                 data = &ring->data[i];
 1372                 if (data->rx_data_map != NULL) {
 1373                         bus_dmamap_destroy(ring->rx_data_tag,
 1374                             data->rx_data_map);
 1375                         data->rx_data_map = NULL;
 1376                 }
 1377                 if (data->m != NULL) {
 1378                         m_freem(data->m);
 1379                         data->m = NULL;
 1380                 }
 1381         }
 1382         if (ring->rx_data_tag != NULL) {
 1383                 if (ring->rx_spare_map != NULL) {
 1384                         bus_dmamap_destroy(ring->rx_data_tag,
 1385                             ring->rx_spare_map);
 1386                         ring->rx_spare_map = NULL;
 1387                 }
 1388                 bus_dma_tag_destroy(ring->rx_data_tag);
 1389                 ring->rx_data_tag = NULL;
 1390         }
 1391 
 1392         if (desc != NULL) {
 1393                 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
 1394                 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
 1395                 ring->desc64 = NULL;
 1396                 ring->desc32 = NULL;
 1397         }
 1398         if (ring->rx_desc_tag != NULL) {
 1399                 bus_dma_tag_destroy(ring->rx_desc_tag);
 1400                 ring->rx_desc_tag = NULL;
 1401         }
 1402 }
 1403 
 1404 static void
 1405 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
 1406 {
 1407         struct nfe_rx_data *data;
 1408         void *desc;
 1409         int i;
 1410 
 1411         if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
 1412                 return;
 1413 
 1414         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1415                 desc = ring->jdesc64;
 1416         } else {
 1417                 desc = ring->jdesc32;
 1418         }
 1419 
 1420         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 1421                 data = &ring->jdata[i];
 1422                 if (data->rx_data_map != NULL) {
 1423                         bus_dmamap_destroy(ring->jrx_data_tag,
 1424                             data->rx_data_map);
 1425                         data->rx_data_map = NULL;
 1426                 }
 1427                 if (data->m != NULL) {
 1428                         m_freem(data->m);
 1429                         data->m = NULL;
 1430                 }
 1431         }
 1432         if (ring->jrx_data_tag != NULL) {
 1433                 if (ring->jrx_spare_map != NULL) {
 1434                         bus_dmamap_destroy(ring->jrx_data_tag,
 1435                             ring->jrx_spare_map);
 1436                         ring->jrx_spare_map = NULL;
 1437                 }
 1438                 bus_dma_tag_destroy(ring->jrx_data_tag);
 1439                 ring->jrx_data_tag = NULL;
 1440         }
 1441 
 1442         if (desc != NULL) {
 1443                 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
 1444                 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
 1445                 ring->jdesc64 = NULL;
 1446                 ring->jdesc32 = NULL;
 1447         }
 1448 
 1449         if (ring->jrx_desc_tag != NULL) {
 1450                 bus_dma_tag_destroy(ring->jrx_desc_tag);
 1451                 ring->jrx_desc_tag = NULL;
 1452         }
 1453 }
 1454 
 1455 static int
 1456 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
 1457 {
 1458         struct nfe_dmamap_arg ctx;
 1459         int i, error;
 1460         void *desc;
 1461         int descsize;
 1462 
 1463         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1464                 desc = ring->desc64;
 1465                 descsize = sizeof (struct nfe_desc64);
 1466         } else {
 1467                 desc = ring->desc32;
 1468                 descsize = sizeof (struct nfe_desc32);
 1469         }
 1470 
 1471         ring->queued = 0;
 1472         ring->cur = ring->next = 0;
 1473 
 1474         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1475             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
 1476             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1477             BUS_SPACE_MAXADDR,                  /* highaddr */
 1478             NULL, NULL,                         /* filter, filterarg */
 1479             NFE_TX_RING_COUNT * descsize, 1,    /* maxsize, nsegments */
 1480             NFE_TX_RING_COUNT * descsize,       /* maxsegsize */
 1481             0,                                  /* flags */
 1482             NULL, NULL,                         /* lockfunc, lockarg */
 1483             &ring->tx_desc_tag);
 1484         if (error != 0) {
 1485                 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
 1486                 goto fail;
 1487         }
 1488 
 1489         error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
 1490             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
 1491         if (error != 0) {
 1492                 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
 1493                 goto fail;
 1494         }
 1495         if (sc->nfe_flags & NFE_40BIT_ADDR)
 1496                 ring->desc64 = desc;
 1497         else
 1498                 ring->desc32 = desc;
 1499 
 1500         ctx.nfe_busaddr = 0;
 1501         error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
 1502             NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
 1503         if (error != 0) {
 1504                 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
 1505                 goto fail;
 1506         }
 1507         ring->physaddr = ctx.nfe_busaddr;
 1508 
 1509         error = bus_dma_tag_create(sc->nfe_parent_tag,
 1510             1, 0,
 1511             BUS_SPACE_MAXADDR,
 1512             BUS_SPACE_MAXADDR,
 1513             NULL, NULL,
 1514             NFE_TSO_MAXSIZE,
 1515             NFE_MAX_SCATTER,
 1516             NFE_TSO_MAXSGSIZE,
 1517             0,
 1518             NULL, NULL,
 1519             &ring->tx_data_tag);
 1520         if (error != 0) {
 1521                 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
 1522                 goto fail;
 1523         }
 1524 
 1525         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
 1526                 error = bus_dmamap_create(ring->tx_data_tag, 0,
 1527                     &ring->data[i].tx_data_map);
 1528                 if (error != 0) {
 1529                         device_printf(sc->nfe_dev,
 1530                             "could not create Tx DMA map\n");
 1531                         goto fail;
 1532                 }
 1533         }
 1534 
 1535 fail:
 1536         return (error);
 1537 }
 1538 
 1539 static void
 1540 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
 1541 {
 1542         void *desc;
 1543         size_t descsize;
 1544 
 1545         sc->nfe_force_tx = 0;
 1546         ring->queued = 0;
 1547         ring->cur = ring->next = 0;
 1548         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1549                 desc = ring->desc64;
 1550                 descsize = sizeof (struct nfe_desc64);
 1551         } else {
 1552                 desc = ring->desc32;
 1553                 descsize = sizeof (struct nfe_desc32);
 1554         }
 1555         bzero(desc, descsize * NFE_TX_RING_COUNT);
 1556 
 1557         bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
 1558             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1559 }
 1560 
 1561 static void
 1562 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
 1563 {
 1564         struct nfe_tx_data *data;
 1565         void *desc;
 1566         int i;
 1567 
 1568         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1569                 desc = ring->desc64;
 1570         } else {
 1571                 desc = ring->desc32;
 1572         }
 1573 
 1574         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
 1575                 data = &ring->data[i];
 1576 
 1577                 if (data->m != NULL) {
 1578                         bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
 1579                             BUS_DMASYNC_POSTWRITE);
 1580                         bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
 1581                         m_freem(data->m);
 1582                         data->m = NULL;
 1583                 }
 1584                 if (data->tx_data_map != NULL) {
 1585                         bus_dmamap_destroy(ring->tx_data_tag,
 1586                             data->tx_data_map);
 1587                         data->tx_data_map = NULL;
 1588                 }
 1589         }
 1590 
 1591         if (ring->tx_data_tag != NULL) {
 1592                 bus_dma_tag_destroy(ring->tx_data_tag);
 1593                 ring->tx_data_tag = NULL;
 1594         }
 1595 
 1596         if (desc != NULL) {
 1597                 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
 1598                     BUS_DMASYNC_POSTWRITE);
 1599                 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
 1600                 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
 1601                 ring->desc64 = NULL;
 1602                 ring->desc32 = NULL;
 1603                 bus_dma_tag_destroy(ring->tx_desc_tag);
 1604                 ring->tx_desc_tag = NULL;
 1605         }
 1606 }
 1607 
 1608 #ifdef DEVICE_POLLING
 1609 static poll_handler_t nfe_poll;
 1610 
 1611 static int
 1612 nfe_poll(if_t ifp, enum poll_cmd cmd, int count)
 1613 {
 1614         struct nfe_softc *sc = if_getsoftc(ifp);
 1615         uint32_t r;
 1616         int rx_npkts = 0;
 1617 
 1618         NFE_LOCK(sc);
 1619 
 1620         if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
 1621                 NFE_UNLOCK(sc);
 1622                 return (rx_npkts);
 1623         }
 1624 
 1625         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
 1626                 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
 1627         else
 1628                 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
 1629         nfe_txeof(sc);
 1630         if (!if_sendq_empty(ifp))
 1631                 nfe_start_locked(ifp);
 1632 
 1633         if (cmd == POLL_AND_CHECK_STATUS) {
 1634                 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
 1635                         NFE_UNLOCK(sc);
 1636                         return (rx_npkts);
 1637                 }
 1638                 NFE_WRITE(sc, sc->nfe_irq_status, r);
 1639 
 1640                 if (r & NFE_IRQ_LINK) {
 1641                         NFE_READ(sc, NFE_PHY_STATUS);
 1642                         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 1643                         DPRINTF(sc, "link state changed\n");
 1644                 }
 1645         }
 1646         NFE_UNLOCK(sc);
 1647         return (rx_npkts);
 1648 }
 1649 #endif /* DEVICE_POLLING */
 1650 
 1651 static void
 1652 nfe_set_intr(struct nfe_softc *sc)
 1653 {
 1654 
 1655         if (sc->nfe_msi != 0)
 1656                 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
 1657 }
 1658 
 1659 /* In MSIX, a write to mask reegisters behaves as XOR. */
 1660 static __inline void
 1661 nfe_enable_intr(struct nfe_softc *sc)
 1662 {
 1663 
 1664         if (sc->nfe_msix != 0) {
 1665                 /* XXX Should have a better way to enable interrupts! */
 1666                 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
 1667                         NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
 1668         } else
 1669                 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
 1670 }
 1671 
 1672 static __inline void
 1673 nfe_disable_intr(struct nfe_softc *sc)
 1674 {
 1675 
 1676         if (sc->nfe_msix != 0) {
 1677                 /* XXX Should have a better way to disable interrupts! */
 1678                 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
 1679                         NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
 1680         } else
 1681                 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
 1682 }
 1683 
 1684 static int
 1685 nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
 1686 {
 1687         struct nfe_softc *sc;
 1688         struct ifreq *ifr;
 1689         struct mii_data *mii;
 1690         int error, init, mask;
 1691 
 1692         sc = if_getsoftc(ifp);
 1693         ifr = (struct ifreq *) data;
 1694         error = 0;
 1695         init = 0;
 1696         switch (cmd) {
 1697         case SIOCSIFMTU:
 1698                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
 1699                         error = EINVAL;
 1700                 else if (if_getmtu(ifp) != ifr->ifr_mtu) {
 1701                         if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
 1702                             (sc->nfe_jumbo_disable != 0)) &&
 1703                             ifr->ifr_mtu > ETHERMTU)
 1704                                 error = EINVAL;
 1705                         else {
 1706                                 NFE_LOCK(sc);
 1707                                 if_setmtu(ifp, ifr->ifr_mtu);
 1708                                 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 1709                                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 1710                                         nfe_init_locked(sc);
 1711                                 }
 1712                                 NFE_UNLOCK(sc);
 1713                         }
 1714                 }
 1715                 break;
 1716         case SIOCSIFFLAGS:
 1717                 NFE_LOCK(sc);
 1718                 if (if_getflags(ifp) & IFF_UP) {
 1719                         /*
 1720                          * If only the PROMISC or ALLMULTI flag changes, then
 1721                          * don't do a full re-init of the chip, just update
 1722                          * the Rx filter.
 1723                          */
 1724                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
 1725                             ((if_getflags(ifp) ^ sc->nfe_if_flags) &
 1726                              (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 1727                                 nfe_setmulti(sc);
 1728                         else
 1729                                 nfe_init_locked(sc);
 1730                 } else {
 1731                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
 1732                                 nfe_stop(ifp);
 1733                 }
 1734                 sc->nfe_if_flags = if_getflags(ifp);
 1735                 NFE_UNLOCK(sc);
 1736                 error = 0;
 1737                 break;
 1738         case SIOCADDMULTI:
 1739         case SIOCDELMULTI:
 1740                 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 1741                         NFE_LOCK(sc);
 1742                         nfe_setmulti(sc);
 1743                         NFE_UNLOCK(sc);
 1744                         error = 0;
 1745                 }
 1746                 break;
 1747         case SIOCSIFMEDIA:
 1748         case SIOCGIFMEDIA:
 1749                 mii = device_get_softc(sc->nfe_miibus);
 1750                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1751                 break;
 1752         case SIOCSIFCAP:
 1753                 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
 1754 #ifdef DEVICE_POLLING
 1755                 if ((mask & IFCAP_POLLING) != 0) {
 1756                         if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
 1757                                 error = ether_poll_register(nfe_poll, ifp);
 1758                                 if (error)
 1759                                         break;
 1760                                 NFE_LOCK(sc);
 1761                                 nfe_disable_intr(sc);
 1762                                 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
 1763                                 NFE_UNLOCK(sc);
 1764                         } else {
 1765                                 error = ether_poll_deregister(ifp);
 1766                                 /* Enable interrupt even in error case */
 1767                                 NFE_LOCK(sc);
 1768                                 nfe_enable_intr(sc);
 1769                                 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
 1770                                 NFE_UNLOCK(sc);
 1771                         }
 1772                 }
 1773 #endif /* DEVICE_POLLING */
 1774                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 1775                     (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
 1776                         if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
 1777                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1778                     (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
 1779                         if_togglecapenable(ifp, IFCAP_TXCSUM);
 1780                         if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
 1781                                 if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0);
 1782                         else
 1783                                 if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES);
 1784                 }
 1785                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1786                     (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
 1787                         if_togglecapenable(ifp, IFCAP_RXCSUM);
 1788                         init++;
 1789                 }
 1790                 if ((mask & IFCAP_TSO4) != 0 &&
 1791                     (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
 1792                         if_togglecapenable(ifp, IFCAP_TSO4);
 1793                         if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
 1794                                 if_sethwassistbits(ifp, CSUM_TSO, 0);
 1795                         else
 1796                                 if_sethwassistbits(ifp, 0, CSUM_TSO);
 1797                 }
 1798                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 1799                     (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
 1800                         if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
 1801                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1802                     (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
 1803                         if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
 1804                         if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
 1805                                 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
 1806                         init++;
 1807                 }
 1808                 /*
 1809                  * XXX
 1810                  * It seems that VLAN stripping requires Rx checksum offload.
 1811                  * Unfortunately FreeBSD has no way to disable only Rx side
 1812                  * VLAN stripping. So when we know Rx checksum offload is
 1813                  * disabled turn entire hardware VLAN assist off.
 1814                  */
 1815                 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) {
 1816                         if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
 1817                                 init++;
 1818                         if_setcapenablebit(ifp, 0,
 1819                             (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO));
 1820                 }
 1821                 if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 1822                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 1823                         nfe_init(sc);
 1824                 }
 1825                 if_vlancap(ifp);
 1826                 break;
 1827         default:
 1828                 error = ether_ioctl(ifp, cmd, data);
 1829                 break;
 1830         }
 1831 
 1832         return (error);
 1833 }
 1834 
 1835 static int
 1836 nfe_intr(void *arg)
 1837 {
 1838         struct nfe_softc *sc;
 1839         uint32_t status;
 1840 
 1841         sc = (struct nfe_softc *)arg;
 1842 
 1843         status = NFE_READ(sc, sc->nfe_irq_status);
 1844         if (status == 0 || status == 0xffffffff)
 1845                 return (FILTER_STRAY);
 1846         nfe_disable_intr(sc);
 1847         taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
 1848 
 1849         return (FILTER_HANDLED);
 1850 }
 1851 
 1852 static void
 1853 nfe_int_task(void *arg, int pending)
 1854 {
 1855         struct nfe_softc *sc = arg;
 1856         if_t ifp = sc->nfe_ifp;
 1857         uint32_t r;
 1858         int domore;
 1859 
 1860         NFE_LOCK(sc);
 1861 
 1862         if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
 1863                 nfe_enable_intr(sc);
 1864                 NFE_UNLOCK(sc);
 1865                 return; /* not for us */
 1866         }
 1867         NFE_WRITE(sc, sc->nfe_irq_status, r);
 1868 
 1869         DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
 1870 
 1871 #ifdef DEVICE_POLLING
 1872         if (if_getcapenable(ifp) & IFCAP_POLLING) {
 1873                 NFE_UNLOCK(sc);
 1874                 return;
 1875         }
 1876 #endif
 1877 
 1878         if (r & NFE_IRQ_LINK) {
 1879                 NFE_READ(sc, NFE_PHY_STATUS);
 1880                 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 1881                 DPRINTF(sc, "link state changed\n");
 1882         }
 1883 
 1884         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
 1885                 NFE_UNLOCK(sc);
 1886                 nfe_disable_intr(sc);
 1887                 return;
 1888         }
 1889 
 1890         domore = 0;
 1891         /* check Rx ring */
 1892         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
 1893                 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
 1894         else
 1895                 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
 1896         /* check Tx ring */
 1897         nfe_txeof(sc);
 1898 
 1899         if (!if_sendq_empty(ifp))
 1900                 nfe_start_locked(ifp);
 1901 
 1902         NFE_UNLOCK(sc);
 1903 
 1904         if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
 1905                 taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
 1906                 return;
 1907         }
 1908 
 1909         /* Reenable interrupts. */
 1910         nfe_enable_intr(sc);
 1911 }
 1912 
 1913 static __inline void
 1914 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
 1915 {
 1916         struct nfe_desc32 *desc32;
 1917         struct nfe_desc64 *desc64;
 1918         struct nfe_rx_data *data;
 1919         struct mbuf *m;
 1920 
 1921         data = &sc->rxq.data[idx];
 1922         m = data->m;
 1923 
 1924         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1925                 desc64 = &sc->rxq.desc64[idx];
 1926                 /* VLAN packet may have overwritten it. */
 1927                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
 1928                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
 1929                 desc64->length = htole16(m->m_len);
 1930                 desc64->flags = htole16(NFE_RX_READY);
 1931         } else {
 1932                 desc32 = &sc->rxq.desc32[idx];
 1933                 desc32->length = htole16(m->m_len);
 1934                 desc32->flags = htole16(NFE_RX_READY);
 1935         }
 1936 }
 1937 
 1938 static __inline void
 1939 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
 1940 {
 1941         struct nfe_desc32 *desc32;
 1942         struct nfe_desc64 *desc64;
 1943         struct nfe_rx_data *data;
 1944         struct mbuf *m;
 1945 
 1946         data = &sc->jrxq.jdata[idx];
 1947         m = data->m;
 1948 
 1949         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 1950                 desc64 = &sc->jrxq.jdesc64[idx];
 1951                 /* VLAN packet may have overwritten it. */
 1952                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
 1953                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
 1954                 desc64->length = htole16(m->m_len);
 1955                 desc64->flags = htole16(NFE_RX_READY);
 1956         } else {
 1957                 desc32 = &sc->jrxq.jdesc32[idx];
 1958                 desc32->length = htole16(m->m_len);
 1959                 desc32->flags = htole16(NFE_RX_READY);
 1960         }
 1961 }
 1962 
 1963 static int
 1964 nfe_newbuf(struct nfe_softc *sc, int idx)
 1965 {
 1966         struct nfe_rx_data *data;
 1967         struct nfe_desc32 *desc32;
 1968         struct nfe_desc64 *desc64;
 1969         struct mbuf *m;
 1970         bus_dma_segment_t segs[1];
 1971         bus_dmamap_t map;
 1972         int nsegs;
 1973 
 1974         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1975         if (m == NULL)
 1976                 return (ENOBUFS);
 1977 
 1978         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1979         m_adj(m, ETHER_ALIGN);
 1980 
 1981         if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
 1982             m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
 1983                 m_freem(m);
 1984                 return (ENOBUFS);
 1985         }
 1986         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1987 
 1988         data = &sc->rxq.data[idx];
 1989         if (data->m != NULL) {
 1990                 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
 1991                     BUS_DMASYNC_POSTREAD);
 1992                 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
 1993         }
 1994         map = data->rx_data_map;
 1995         data->rx_data_map = sc->rxq.rx_spare_map;
 1996         sc->rxq.rx_spare_map = map;
 1997         bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
 1998             BUS_DMASYNC_PREREAD);
 1999         data->paddr = segs[0].ds_addr;
 2000         data->m = m;
 2001         /* update mapping address in h/w descriptor */
 2002         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2003                 desc64 = &sc->rxq.desc64[idx];
 2004                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
 2005                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2006                 desc64->length = htole16(segs[0].ds_len);
 2007                 desc64->flags = htole16(NFE_RX_READY);
 2008         } else {
 2009                 desc32 = &sc->rxq.desc32[idx];
 2010                 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2011                 desc32->length = htole16(segs[0].ds_len);
 2012                 desc32->flags = htole16(NFE_RX_READY);
 2013         }
 2014 
 2015         return (0);
 2016 }
 2017 
 2018 static int
 2019 nfe_jnewbuf(struct nfe_softc *sc, int idx)
 2020 {
 2021         struct nfe_rx_data *data;
 2022         struct nfe_desc32 *desc32;
 2023         struct nfe_desc64 *desc64;
 2024         struct mbuf *m;
 2025         bus_dma_segment_t segs[1];
 2026         bus_dmamap_t map;
 2027         int nsegs;
 2028 
 2029         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
 2030         if (m == NULL)
 2031                 return (ENOBUFS);
 2032         m->m_pkthdr.len = m->m_len = MJUM9BYTES;
 2033         m_adj(m, ETHER_ALIGN);
 2034 
 2035         if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
 2036             sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
 2037                 m_freem(m);
 2038                 return (ENOBUFS);
 2039         }
 2040         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 2041 
 2042         data = &sc->jrxq.jdata[idx];
 2043         if (data->m != NULL) {
 2044                 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
 2045                     BUS_DMASYNC_POSTREAD);
 2046                 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
 2047         }
 2048         map = data->rx_data_map;
 2049         data->rx_data_map = sc->jrxq.jrx_spare_map;
 2050         sc->jrxq.jrx_spare_map = map;
 2051         bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
 2052             BUS_DMASYNC_PREREAD);
 2053         data->paddr = segs[0].ds_addr;
 2054         data->m = m;
 2055         /* update mapping address in h/w descriptor */
 2056         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2057                 desc64 = &sc->jrxq.jdesc64[idx];
 2058                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
 2059                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2060                 desc64->length = htole16(segs[0].ds_len);
 2061                 desc64->flags = htole16(NFE_RX_READY);
 2062         } else {
 2063                 desc32 = &sc->jrxq.jdesc32[idx];
 2064                 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
 2065                 desc32->length = htole16(segs[0].ds_len);
 2066                 desc32->flags = htole16(NFE_RX_READY);
 2067         }
 2068 
 2069         return (0);
 2070 }
 2071 
 2072 static int
 2073 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
 2074 {
 2075         if_t ifp = sc->nfe_ifp;
 2076         struct nfe_desc32 *desc32;
 2077         struct nfe_desc64 *desc64;
 2078         struct nfe_rx_data *data;
 2079         struct mbuf *m;
 2080         uint16_t flags;
 2081         int len, prog, rx_npkts;
 2082         uint32_t vtag = 0;
 2083 
 2084         rx_npkts = 0;
 2085         NFE_LOCK_ASSERT(sc);
 2086 
 2087         bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
 2088             BUS_DMASYNC_POSTREAD);
 2089 
 2090         for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
 2091                 if (count <= 0)
 2092                         break;
 2093                 count--;
 2094 
 2095                 data = &sc->rxq.data[sc->rxq.cur];
 2096 
 2097                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2098                         desc64 = &sc->rxq.desc64[sc->rxq.cur];
 2099                         vtag = le32toh(desc64->physaddr[1]);
 2100                         flags = le16toh(desc64->flags);
 2101                         len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
 2102                 } else {
 2103                         desc32 = &sc->rxq.desc32[sc->rxq.cur];
 2104                         flags = le16toh(desc32->flags);
 2105                         len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
 2106                 }
 2107 
 2108                 if (flags & NFE_RX_READY)
 2109                         break;
 2110                 prog++;
 2111                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
 2112                         if (!(flags & NFE_RX_VALID_V1)) {
 2113                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2114                                 nfe_discard_rxbuf(sc, sc->rxq.cur);
 2115                                 continue;
 2116                         }
 2117                         if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
 2118                                 flags &= ~NFE_RX_ERROR;
 2119                                 len--;  /* fix buffer length */
 2120                         }
 2121                 } else {
 2122                         if (!(flags & NFE_RX_VALID_V2)) {
 2123                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2124                                 nfe_discard_rxbuf(sc, sc->rxq.cur);
 2125                                 continue;
 2126                         }
 2127 
 2128                         if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
 2129                                 flags &= ~NFE_RX_ERROR;
 2130                                 len--;  /* fix buffer length */
 2131                         }
 2132                 }
 2133 
 2134                 if (flags & NFE_RX_ERROR) {
 2135                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2136                         nfe_discard_rxbuf(sc, sc->rxq.cur);
 2137                         continue;
 2138                 }
 2139 
 2140                 m = data->m;
 2141                 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
 2142                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2143                         nfe_discard_rxbuf(sc, sc->rxq.cur);
 2144                         continue;
 2145                 }
 2146 
 2147                 if ((vtag & NFE_RX_VTAG) != 0 &&
 2148                     (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
 2149                         m->m_pkthdr.ether_vtag = vtag & 0xffff;
 2150                         m->m_flags |= M_VLANTAG;
 2151                 }
 2152 
 2153                 m->m_pkthdr.len = m->m_len = len;
 2154                 m->m_pkthdr.rcvif = ifp;
 2155 
 2156                 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
 2157                         if ((flags & NFE_RX_IP_CSUMOK) != 0) {
 2158                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2159                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2160                                 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
 2161                                     (flags & NFE_RX_UDP_CSUMOK) != 0) {
 2162                                         m->m_pkthdr.csum_flags |=
 2163                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 2164                                         m->m_pkthdr.csum_data = 0xffff;
 2165                                 }
 2166                         }
 2167                 }
 2168 
 2169                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 2170 
 2171                 NFE_UNLOCK(sc);
 2172                 if_input(ifp, m);
 2173                 NFE_LOCK(sc);
 2174                 rx_npkts++;
 2175         }
 2176 
 2177         if (prog > 0)
 2178                 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
 2179                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2180 
 2181         if (rx_npktsp != NULL)
 2182                 *rx_npktsp = rx_npkts;
 2183         return (count > 0 ? 0 : EAGAIN);
 2184 }
 2185 
 2186 static int
 2187 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
 2188 {
 2189         if_t ifp = sc->nfe_ifp;
 2190         struct nfe_desc32 *desc32;
 2191         struct nfe_desc64 *desc64;
 2192         struct nfe_rx_data *data;
 2193         struct mbuf *m;
 2194         uint16_t flags;
 2195         int len, prog, rx_npkts;
 2196         uint32_t vtag = 0;
 2197 
 2198         rx_npkts = 0;
 2199         NFE_LOCK_ASSERT(sc);
 2200 
 2201         bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
 2202             BUS_DMASYNC_POSTREAD);
 2203 
 2204         for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
 2205             vtag = 0) {
 2206                 if (count <= 0)
 2207                         break;
 2208                 count--;
 2209 
 2210                 data = &sc->jrxq.jdata[sc->jrxq.jcur];
 2211 
 2212                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2213                         desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
 2214                         vtag = le32toh(desc64->physaddr[1]);
 2215                         flags = le16toh(desc64->flags);
 2216                         len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
 2217                 } else {
 2218                         desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
 2219                         flags = le16toh(desc32->flags);
 2220                         len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
 2221                 }
 2222 
 2223                 if (flags & NFE_RX_READY)
 2224                         break;
 2225                 prog++;
 2226                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
 2227                         if (!(flags & NFE_RX_VALID_V1)) {
 2228                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2229                                 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2230                                 continue;
 2231                         }
 2232                         if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
 2233                                 flags &= ~NFE_RX_ERROR;
 2234                                 len--;  /* fix buffer length */
 2235                         }
 2236                 } else {
 2237                         if (!(flags & NFE_RX_VALID_V2)) {
 2238                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2239                                 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2240                                 continue;
 2241                         }
 2242 
 2243                         if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
 2244                                 flags &= ~NFE_RX_ERROR;
 2245                                 len--;  /* fix buffer length */
 2246                         }
 2247                 }
 2248 
 2249                 if (flags & NFE_RX_ERROR) {
 2250                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2251                         nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2252                         continue;
 2253                 }
 2254 
 2255                 m = data->m;
 2256                 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
 2257                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2258                         nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
 2259                         continue;
 2260                 }
 2261 
 2262                 if ((vtag & NFE_RX_VTAG) != 0 &&
 2263                     (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
 2264                         m->m_pkthdr.ether_vtag = vtag & 0xffff;
 2265                         m->m_flags |= M_VLANTAG;
 2266                 }
 2267 
 2268                 m->m_pkthdr.len = m->m_len = len;
 2269                 m->m_pkthdr.rcvif = ifp;
 2270 
 2271                 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
 2272                         if ((flags & NFE_RX_IP_CSUMOK) != 0) {
 2273                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2274                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2275                                 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
 2276                                     (flags & NFE_RX_UDP_CSUMOK) != 0) {
 2277                                         m->m_pkthdr.csum_flags |=
 2278                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 2279                                         m->m_pkthdr.csum_data = 0xffff;
 2280                                 }
 2281                         }
 2282                 }
 2283 
 2284                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 2285 
 2286                 NFE_UNLOCK(sc);
 2287                 if_input(ifp, m);
 2288                 NFE_LOCK(sc);
 2289                 rx_npkts++;
 2290         }
 2291 
 2292         if (prog > 0)
 2293                 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
 2294                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2295 
 2296         if (rx_npktsp != NULL)
 2297                 *rx_npktsp = rx_npkts;
 2298         return (count > 0 ? 0 : EAGAIN);
 2299 }
 2300 
 2301 static void
 2302 nfe_txeof(struct nfe_softc *sc)
 2303 {
 2304         if_t ifp = sc->nfe_ifp;
 2305         struct nfe_desc32 *desc32;
 2306         struct nfe_desc64 *desc64;
 2307         struct nfe_tx_data *data = NULL;
 2308         uint16_t flags;
 2309         int cons, prog;
 2310 
 2311         NFE_LOCK_ASSERT(sc);
 2312 
 2313         bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
 2314             BUS_DMASYNC_POSTREAD);
 2315 
 2316         prog = 0;
 2317         for (cons = sc->txq.next; cons != sc->txq.cur;
 2318             NFE_INC(cons, NFE_TX_RING_COUNT)) {
 2319                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2320                         desc64 = &sc->txq.desc64[cons];
 2321                         flags = le16toh(desc64->flags);
 2322                 } else {
 2323                         desc32 = &sc->txq.desc32[cons];
 2324                         flags = le16toh(desc32->flags);
 2325                 }
 2326 
 2327                 if (flags & NFE_TX_VALID)
 2328                         break;
 2329 
 2330                 prog++;
 2331                 sc->txq.queued--;
 2332                 data = &sc->txq.data[cons];
 2333 
 2334                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
 2335                         if ((flags & NFE_TX_LASTFRAG_V1) == 0)
 2336                                 continue;
 2337                         if ((flags & NFE_TX_ERROR_V1) != 0) {
 2338                                 device_printf(sc->nfe_dev,
 2339                                     "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
 2340 
 2341                                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2342                         } else
 2343                                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 2344                 } else {
 2345                         if ((flags & NFE_TX_LASTFRAG_V2) == 0)
 2346                                 continue;
 2347                         if ((flags & NFE_TX_ERROR_V2) != 0) {
 2348                                 device_printf(sc->nfe_dev,
 2349                                     "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
 2350                                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2351                         } else
 2352                                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 2353                 }
 2354 
 2355                 /* last fragment of the mbuf chain transmitted */
 2356                 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
 2357                 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
 2358                     BUS_DMASYNC_POSTWRITE);
 2359                 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
 2360                 m_freem(data->m);
 2361                 data->m = NULL;
 2362         }
 2363 
 2364         if (prog > 0) {
 2365                 sc->nfe_force_tx = 0;
 2366                 sc->txq.next = cons;
 2367                 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
 2368                 if (sc->txq.queued == 0)
 2369                         sc->nfe_watchdog_timer = 0;
 2370         }
 2371 }
 2372 
 2373 static int
 2374 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
 2375 {
 2376         struct nfe_desc32 *desc32 = NULL;
 2377         struct nfe_desc64 *desc64 = NULL;
 2378         bus_dmamap_t map;
 2379         bus_dma_segment_t segs[NFE_MAX_SCATTER];
 2380         int error, i, nsegs, prod, si;
 2381         uint32_t tsosegsz;
 2382         uint16_t cflags, flags;
 2383         struct mbuf *m;
 2384 
 2385         prod = si = sc->txq.cur;
 2386         map = sc->txq.data[prod].tx_data_map;
 2387 
 2388         error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
 2389             &nsegs, BUS_DMA_NOWAIT);
 2390         if (error == EFBIG) {
 2391                 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
 2392                 if (m == NULL) {
 2393                         m_freem(*m_head);
 2394                         *m_head = NULL;
 2395                         return (ENOBUFS);
 2396                 }
 2397                 *m_head = m;
 2398                 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
 2399                     *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
 2400                 if (error != 0) {
 2401                         m_freem(*m_head);
 2402                         *m_head = NULL;
 2403                         return (ENOBUFS);
 2404                 }
 2405         } else if (error != 0)
 2406                 return (error);
 2407         if (nsegs == 0) {
 2408                 m_freem(*m_head);
 2409                 *m_head = NULL;
 2410                 return (EIO);
 2411         }
 2412 
 2413         if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
 2414                 bus_dmamap_unload(sc->txq.tx_data_tag, map);
 2415                 return (ENOBUFS);
 2416         }
 2417 
 2418         m = *m_head;
 2419         cflags = flags = 0;
 2420         tsosegsz = 0;
 2421         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2422                 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
 2423                     NFE_TX_TSO_SHIFT;
 2424                 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
 2425                 cflags |= NFE_TX_TSO;
 2426         } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
 2427                 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
 2428                         cflags |= NFE_TX_IP_CSUM;
 2429                 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
 2430                         cflags |= NFE_TX_TCP_UDP_CSUM;
 2431                 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2432                         cflags |= NFE_TX_TCP_UDP_CSUM;
 2433         }
 2434 
 2435         for (i = 0; i < nsegs; i++) {
 2436                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2437                         desc64 = &sc->txq.desc64[prod];
 2438                         desc64->physaddr[0] =
 2439                             htole32(NFE_ADDR_HI(segs[i].ds_addr));
 2440                         desc64->physaddr[1] =
 2441                             htole32(NFE_ADDR_LO(segs[i].ds_addr));
 2442                         desc64->vtag = 0;
 2443                         desc64->length = htole16(segs[i].ds_len - 1);
 2444                         desc64->flags = htole16(flags);
 2445                 } else {
 2446                         desc32 = &sc->txq.desc32[prod];
 2447                         desc32->physaddr =
 2448                             htole32(NFE_ADDR_LO(segs[i].ds_addr));
 2449                         desc32->length = htole16(segs[i].ds_len - 1);
 2450                         desc32->flags = htole16(flags);
 2451                 }
 2452 
 2453                 /*
 2454                  * Setting of the valid bit in the first descriptor is
 2455                  * deferred until the whole chain is fully setup.
 2456                  */
 2457                 flags |= NFE_TX_VALID;
 2458 
 2459                 sc->txq.queued++;
 2460                 NFE_INC(prod, NFE_TX_RING_COUNT);
 2461         }
 2462 
 2463         /*
 2464          * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
 2465          * csum flags, vtag and TSO belong to the first fragment only.
 2466          */
 2467         if (sc->nfe_flags & NFE_40BIT_ADDR) {
 2468                 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
 2469                 desc64 = &sc->txq.desc64[si];
 2470                 if ((m->m_flags & M_VLANTAG) != 0)
 2471                         desc64->vtag = htole32(NFE_TX_VTAG |
 2472                             m->m_pkthdr.ether_vtag);
 2473                 if (tsosegsz != 0) {
 2474                         /*
 2475                          * XXX
 2476                          * The following indicates the descriptor element
 2477                          * is a 32bit quantity.
 2478                          */
 2479                         desc64->length |= htole16((uint16_t)tsosegsz);
 2480                         desc64->flags |= htole16(tsosegsz >> 16);
 2481                 }
 2482                 /*
 2483                  * finally, set the valid/checksum/TSO bit in the first
 2484                  * descriptor.
 2485                  */
 2486                 desc64->flags |= htole16(NFE_TX_VALID | cflags);
 2487         } else {
 2488                 if (sc->nfe_flags & NFE_JUMBO_SUP)
 2489                         desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
 2490                 else
 2491                         desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
 2492                 desc32 = &sc->txq.desc32[si];
 2493                 if (tsosegsz != 0) {
 2494                         /*
 2495                          * XXX
 2496                          * The following indicates the descriptor element
 2497                          * is a 32bit quantity.
 2498                          */
 2499                         desc32->length |= htole16((uint16_t)tsosegsz);
 2500                         desc32->flags |= htole16(tsosegsz >> 16);
 2501                 }
 2502                 /*
 2503                  * finally, set the valid/checksum/TSO bit in the first
 2504                  * descriptor.
 2505                  */
 2506                 desc32->flags |= htole16(NFE_TX_VALID | cflags);
 2507         }
 2508 
 2509         sc->txq.cur = prod;
 2510         prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
 2511         sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
 2512         sc->txq.data[prod].tx_data_map = map;
 2513         sc->txq.data[prod].m = m;
 2514 
 2515         bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
 2516 
 2517         return (0);
 2518 }
 2519 
 2520 struct nfe_hash_maddr_ctx {
 2521         uint8_t addr[ETHER_ADDR_LEN];
 2522         uint8_t mask[ETHER_ADDR_LEN];
 2523 };
 2524 
 2525 static u_int
 2526 nfe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
 2527 {
 2528         struct nfe_hash_maddr_ctx *ctx = arg;
 2529         uint8_t *addrp, mcaddr;
 2530         int j;
 2531 
 2532         addrp = LLADDR(sdl);
 2533         for (j = 0; j < ETHER_ADDR_LEN; j++) {
 2534                 mcaddr = addrp[j];
 2535                 ctx->addr[j] &= mcaddr;
 2536                 ctx->mask[j] &= ~mcaddr;
 2537         }
 2538 
 2539         return (1);
 2540 }
 2541 
 2542 static void
 2543 nfe_setmulti(struct nfe_softc *sc)
 2544 {
 2545         if_t ifp = sc->nfe_ifp;
 2546         struct nfe_hash_maddr_ctx ctx;
 2547         uint32_t filter;
 2548         uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
 2549                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 2550         };
 2551         int i;
 2552 
 2553         NFE_LOCK_ASSERT(sc);
 2554 
 2555         if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
 2556                 bzero(ctx.addr, ETHER_ADDR_LEN);
 2557                 bzero(ctx.mask, ETHER_ADDR_LEN);
 2558                 goto done;
 2559         }
 2560 
 2561         bcopy(etherbroadcastaddr, ctx.addr, ETHER_ADDR_LEN);
 2562         bcopy(etherbroadcastaddr, ctx.mask, ETHER_ADDR_LEN);
 2563 
 2564         if_foreach_llmaddr(ifp, nfe_hash_maddr, &ctx);
 2565 
 2566         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2567                 ctx.mask[i] |= ctx.addr[i];
 2568         }
 2569 
 2570 done:
 2571         ctx.addr[0] |= 0x01;    /* make sure multicast bit is set */
 2572 
 2573         NFE_WRITE(sc, NFE_MULTIADDR_HI, ctx.addr[3] << 24 | ctx.addr[2] << 16 |
 2574             ctx.addr[1] << 8 | ctx.addr[0]);
 2575         NFE_WRITE(sc, NFE_MULTIADDR_LO,
 2576             ctx.addr[5] <<  8 | ctx.addr[4]);
 2577         NFE_WRITE(sc, NFE_MULTIMASK_HI, ctx.mask[3] << 24 | ctx.mask[2] << 16 |
 2578             ctx.mask[1] << 8 | ctx.mask[0]);
 2579         NFE_WRITE(sc, NFE_MULTIMASK_LO,
 2580             ctx.mask[5] <<  8 | ctx.mask[4]);
 2581 
 2582         filter = NFE_READ(sc, NFE_RXFILTER);
 2583         filter &= NFE_PFF_RX_PAUSE;
 2584         filter |= NFE_RXFILTER_MAGIC;
 2585         filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
 2586         NFE_WRITE(sc, NFE_RXFILTER, filter);
 2587 }
 2588 
 2589 static void
 2590 nfe_start(if_t ifp)
 2591 {
 2592         struct nfe_softc *sc = if_getsoftc(ifp);
 2593 
 2594         NFE_LOCK(sc);
 2595         nfe_start_locked(ifp);
 2596         NFE_UNLOCK(sc);
 2597 }
 2598 
 2599 static void
 2600 nfe_start_locked(if_t ifp)
 2601 {
 2602         struct nfe_softc *sc = if_getsoftc(ifp);
 2603         struct mbuf *m0;
 2604         int enq = 0;
 2605 
 2606         NFE_LOCK_ASSERT(sc);
 2607 
 2608         if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2609             IFF_DRV_RUNNING || sc->nfe_link == 0)
 2610                 return;
 2611 
 2612         while (!if_sendq_empty(ifp)) {
 2613                 m0 = if_dequeue(ifp);
 2614 
 2615                 if (m0 == NULL)
 2616                         break;
 2617 
 2618                 if (nfe_encap(sc, &m0) != 0) {
 2619                         if (m0 == NULL)
 2620                                 break;
 2621                         if_sendq_prepend(ifp, m0);
 2622                         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
 2623                         break;
 2624                 }
 2625                 enq++;
 2626                 if_etherbpfmtap(ifp, m0);
 2627         }
 2628 
 2629         if (enq > 0) {
 2630                 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
 2631                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2632 
 2633                 /* kick Tx */
 2634                 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
 2635 
 2636                 /*
 2637                  * Set a timeout in case the chip goes out to lunch.
 2638                  */
 2639                 sc->nfe_watchdog_timer = 5;
 2640         }
 2641 }
 2642 
 2643 static void
 2644 nfe_watchdog(if_t ifp)
 2645 {
 2646         struct nfe_softc *sc = if_getsoftc(ifp);
 2647 
 2648         if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
 2649                 return;
 2650 
 2651         /* Check if we've lost Tx completion interrupt. */
 2652         nfe_txeof(sc);
 2653         if (sc->txq.queued == 0) {
 2654                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
 2655                     "-- recovering\n");
 2656                 if (!if_sendq_empty(ifp))
 2657                         nfe_start_locked(ifp);
 2658                 return;
 2659         }
 2660         /* Check if we've lost start Tx command. */
 2661         sc->nfe_force_tx++;
 2662         if (sc->nfe_force_tx <= 3) {
 2663                 /*
 2664                  * If this is the case for watchdog timeout, the following
 2665                  * code should go to nfe_txeof().
 2666                  */
 2667                 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
 2668                 return;
 2669         }
 2670         sc->nfe_force_tx = 0;
 2671 
 2672         if_printf(ifp, "watchdog timeout\n");
 2673 
 2674         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2675         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2676         nfe_init_locked(sc);
 2677 }
 2678 
 2679 static void
 2680 nfe_init(void *xsc)
 2681 {
 2682         struct nfe_softc *sc = xsc;
 2683 
 2684         NFE_LOCK(sc);
 2685         nfe_init_locked(sc);
 2686         NFE_UNLOCK(sc);
 2687 }
 2688 
 2689 static void
 2690 nfe_init_locked(void *xsc)
 2691 {
 2692         struct nfe_softc *sc = xsc;
 2693         if_t ifp = sc->nfe_ifp;
 2694         struct mii_data *mii;
 2695         uint32_t val;
 2696         int error;
 2697 
 2698         NFE_LOCK_ASSERT(sc);
 2699 
 2700         mii = device_get_softc(sc->nfe_miibus);
 2701 
 2702         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
 2703                 return;
 2704 
 2705         nfe_stop(ifp);
 2706 
 2707         sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
 2708 
 2709         nfe_init_tx_ring(sc, &sc->txq);
 2710         if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
 2711                 error = nfe_init_jrx_ring(sc, &sc->jrxq);
 2712         else
 2713                 error = nfe_init_rx_ring(sc, &sc->rxq);
 2714         if (error != 0) {
 2715                 device_printf(sc->nfe_dev,
 2716                     "initialization failed: no memory for rx buffers\n");
 2717                 nfe_stop(ifp);
 2718                 return;
 2719         }
 2720 
 2721         val = 0;
 2722         if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
 2723                 val |= NFE_MAC_ADDR_INORDER;
 2724         NFE_WRITE(sc, NFE_TX_UNK, val);
 2725         NFE_WRITE(sc, NFE_STATUS, 0);
 2726 
 2727         if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
 2728                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
 2729 
 2730         sc->rxtxctl = NFE_RXTX_BIT2;
 2731         if (sc->nfe_flags & NFE_40BIT_ADDR)
 2732                 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
 2733         else if (sc->nfe_flags & NFE_JUMBO_SUP)
 2734                 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
 2735 
 2736         if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
 2737                 sc->rxtxctl |= NFE_RXTX_RXCSUM;
 2738         if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
 2739                 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
 2740 
 2741         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
 2742         DELAY(10);
 2743         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
 2744 
 2745         if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
 2746                 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
 2747         else
 2748                 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
 2749 
 2750         NFE_WRITE(sc, NFE_SETUP_R6, 0);
 2751 
 2752         /* set MAC address */
 2753         nfe_set_macaddr(sc, if_getlladdr(ifp));
 2754 
 2755         /* tell MAC where rings are in memory */
 2756         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
 2757                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
 2758                     NFE_ADDR_HI(sc->jrxq.jphysaddr));
 2759                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
 2760                     NFE_ADDR_LO(sc->jrxq.jphysaddr));
 2761         } else {
 2762                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
 2763                     NFE_ADDR_HI(sc->rxq.physaddr));
 2764                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
 2765                     NFE_ADDR_LO(sc->rxq.physaddr));
 2766         }
 2767         NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
 2768         NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
 2769 
 2770         NFE_WRITE(sc, NFE_RING_SIZE,
 2771             (NFE_RX_RING_COUNT - 1) << 16 |
 2772             (NFE_TX_RING_COUNT - 1));
 2773 
 2774         NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
 2775 
 2776         /* force MAC to wakeup */
 2777         val = NFE_READ(sc, NFE_PWR_STATE);
 2778         if ((val & NFE_PWR_WAKEUP) == 0)
 2779                 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
 2780         DELAY(10);
 2781         val = NFE_READ(sc, NFE_PWR_STATE);
 2782         NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
 2783 
 2784 #if 1
 2785         /* configure interrupts coalescing/mitigation */
 2786         NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
 2787 #else
 2788         /* no interrupt mitigation: one interrupt per packet */
 2789         NFE_WRITE(sc, NFE_IMTIMER, 970);
 2790 #endif
 2791 
 2792         NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
 2793         NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
 2794         NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
 2795 
 2796         /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
 2797         NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
 2798 
 2799         NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
 2800         /* Disable WOL. */
 2801         NFE_WRITE(sc, NFE_WOL_CTL, 0);
 2802 
 2803         sc->rxtxctl &= ~NFE_RXTX_BIT2;
 2804         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
 2805         DELAY(10);
 2806         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
 2807 
 2808         /* set Rx filter */
 2809         nfe_setmulti(sc);
 2810 
 2811         /* enable Rx */
 2812         NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
 2813 
 2814         /* enable Tx */
 2815         NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
 2816 
 2817         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
 2818 
 2819         /* Clear hardware stats. */
 2820         nfe_stats_clear(sc);
 2821 
 2822 #ifdef DEVICE_POLLING
 2823         if (if_getcapenable(ifp) & IFCAP_POLLING)
 2824                 nfe_disable_intr(sc);
 2825         else
 2826 #endif
 2827         nfe_set_intr(sc);
 2828         nfe_enable_intr(sc); /* enable interrupts */
 2829 
 2830         if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
 2831         if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
 2832 
 2833         sc->nfe_link = 0;
 2834         mii_mediachg(mii);
 2835 
 2836         callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
 2837 }
 2838 
 2839 static void
 2840 nfe_stop(if_t ifp)
 2841 {
 2842         struct nfe_softc *sc = if_getsoftc(ifp);
 2843         struct nfe_rx_ring *rx_ring;
 2844         struct nfe_jrx_ring *jrx_ring;
 2845         struct nfe_tx_ring *tx_ring;
 2846         struct nfe_rx_data *rdata;
 2847         struct nfe_tx_data *tdata;
 2848         int i;
 2849 
 2850         NFE_LOCK_ASSERT(sc);
 2851 
 2852         sc->nfe_watchdog_timer = 0;
 2853         if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
 2854 
 2855         callout_stop(&sc->nfe_stat_ch);
 2856 
 2857         /* abort Tx */
 2858         NFE_WRITE(sc, NFE_TX_CTL, 0);
 2859 
 2860         /* disable Rx */
 2861         NFE_WRITE(sc, NFE_RX_CTL, 0);
 2862 
 2863         /* disable interrupts */
 2864         nfe_disable_intr(sc);
 2865 
 2866         sc->nfe_link = 0;
 2867 
 2868         /* free Rx and Tx mbufs still in the queues. */
 2869         rx_ring = &sc->rxq;
 2870         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 2871                 rdata = &rx_ring->data[i];
 2872                 if (rdata->m != NULL) {
 2873                         bus_dmamap_sync(rx_ring->rx_data_tag,
 2874                             rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
 2875                         bus_dmamap_unload(rx_ring->rx_data_tag,
 2876                             rdata->rx_data_map);
 2877                         m_freem(rdata->m);
 2878                         rdata->m = NULL;
 2879                 }
 2880         }
 2881 
 2882         if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
 2883                 jrx_ring = &sc->jrxq;
 2884                 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
 2885                         rdata = &jrx_ring->jdata[i];
 2886                         if (rdata->m != NULL) {
 2887                                 bus_dmamap_sync(jrx_ring->jrx_data_tag,
 2888                                     rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
 2889                                 bus_dmamap_unload(jrx_ring->jrx_data_tag,
 2890                                     rdata->rx_data_map);
 2891                                 m_freem(rdata->m);
 2892                                 rdata->m = NULL;
 2893                         }
 2894                 }
 2895         }
 2896 
 2897         tx_ring = &sc->txq;
 2898         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
 2899                 tdata = &tx_ring->data[i];
 2900                 if (tdata->m != NULL) {
 2901                         bus_dmamap_sync(tx_ring->tx_data_tag,
 2902                             tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
 2903                         bus_dmamap_unload(tx_ring->tx_data_tag,
 2904                             tdata->tx_data_map);
 2905                         m_freem(tdata->m);
 2906                         tdata->m = NULL;
 2907                 }
 2908         }
 2909         /* Update hardware stats. */
 2910         nfe_stats_update(sc);
 2911 }
 2912 
 2913 static int
 2914 nfe_ifmedia_upd(if_t ifp)
 2915 {
 2916         struct nfe_softc *sc = if_getsoftc(ifp);
 2917         struct mii_data *mii;
 2918 
 2919         NFE_LOCK(sc);
 2920         mii = device_get_softc(sc->nfe_miibus);
 2921         mii_mediachg(mii);
 2922         NFE_UNLOCK(sc);
 2923 
 2924         return (0);
 2925 }
 2926 
 2927 static void
 2928 nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
 2929 {
 2930         struct nfe_softc *sc;
 2931         struct mii_data *mii;
 2932 
 2933         sc = if_getsoftc(ifp);
 2934 
 2935         NFE_LOCK(sc);
 2936         mii = device_get_softc(sc->nfe_miibus);
 2937         mii_pollstat(mii);
 2938 
 2939         ifmr->ifm_active = mii->mii_media_active;
 2940         ifmr->ifm_status = mii->mii_media_status;
 2941         NFE_UNLOCK(sc);
 2942 }
 2943 
 2944 void
 2945 nfe_tick(void *xsc)
 2946 {
 2947         struct nfe_softc *sc;
 2948         struct mii_data *mii;
 2949         if_t ifp;
 2950 
 2951         sc = (struct nfe_softc *)xsc;
 2952 
 2953         NFE_LOCK_ASSERT(sc);
 2954 
 2955         ifp = sc->nfe_ifp;
 2956 
 2957         mii = device_get_softc(sc->nfe_miibus);
 2958         mii_tick(mii);
 2959         nfe_stats_update(sc);
 2960         nfe_watchdog(ifp);
 2961         callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
 2962 }
 2963 
 2964 static int
 2965 nfe_shutdown(device_t dev)
 2966 {
 2967 
 2968         return (nfe_suspend(dev));
 2969 }
 2970 
 2971 static void
 2972 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
 2973 {
 2974         uint32_t val;
 2975 
 2976         if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
 2977                 val = NFE_READ(sc, NFE_MACADDR_LO);
 2978                 addr[0] = (val >> 8) & 0xff;
 2979                 addr[1] = (val & 0xff);
 2980 
 2981                 val = NFE_READ(sc, NFE_MACADDR_HI);
 2982                 addr[2] = (val >> 24) & 0xff;
 2983                 addr[3] = (val >> 16) & 0xff;
 2984                 addr[4] = (val >>  8) & 0xff;
 2985                 addr[5] = (val & 0xff);
 2986         } else {
 2987                 val = NFE_READ(sc, NFE_MACADDR_LO);
 2988                 addr[5] = (val >> 8) & 0xff;
 2989                 addr[4] = (val & 0xff);
 2990 
 2991                 val = NFE_READ(sc, NFE_MACADDR_HI);
 2992                 addr[3] = (val >> 24) & 0xff;
 2993                 addr[2] = (val >> 16) & 0xff;
 2994                 addr[1] = (val >>  8) & 0xff;
 2995                 addr[0] = (val & 0xff);
 2996         }
 2997 }
 2998 
 2999 static void
 3000 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
 3001 {
 3002 
 3003         NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
 3004         NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
 3005             addr[1] << 8 | addr[0]);
 3006 }
 3007 
 3008 /*
 3009  * Map a single buffer address.
 3010  */
 3011 
 3012 static void
 3013 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 3014 {
 3015         struct nfe_dmamap_arg *ctx;
 3016 
 3017         if (error != 0)
 3018                 return;
 3019 
 3020         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
 3021 
 3022         ctx = (struct nfe_dmamap_arg *)arg;
 3023         ctx->nfe_busaddr = segs[0].ds_addr;
 3024 }
 3025 
 3026 static int
 3027 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3028 {
 3029         int error, value;
 3030 
 3031         if (!arg1)
 3032                 return (EINVAL);
 3033         value = *(int *)arg1;
 3034         error = sysctl_handle_int(oidp, &value, 0, req);
 3035         if (error || !req->newptr)
 3036                 return (error);
 3037         if (value < low || value > high)
 3038                 return (EINVAL);
 3039         *(int *)arg1 = value;
 3040 
 3041         return (0);
 3042 }
 3043 
 3044 static int
 3045 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
 3046 {
 3047 
 3048         return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
 3049             NFE_PROC_MAX));
 3050 }
 3051 
 3052 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d)    \
 3053             SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
 3054 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d)    \
 3055             SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
 3056 
 3057 static void
 3058 nfe_sysctl_node(struct nfe_softc *sc)
 3059 {
 3060         struct sysctl_ctx_list *ctx;
 3061         struct sysctl_oid_list *child, *parent;
 3062         struct sysctl_oid *tree;
 3063         struct nfe_hw_stats *stats;
 3064         int error;
 3065 
 3066         stats = &sc->nfe_stats;
 3067         ctx = device_get_sysctl_ctx(sc->nfe_dev);
 3068         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
 3069         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
 3070             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
 3071             &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
 3072             "max number of Rx events to process");
 3073 
 3074         sc->nfe_process_limit = NFE_PROC_DEFAULT;
 3075         error = resource_int_value(device_get_name(sc->nfe_dev),
 3076             device_get_unit(sc->nfe_dev), "process_limit",
 3077             &sc->nfe_process_limit);
 3078         if (error == 0) {
 3079                 if (sc->nfe_process_limit < NFE_PROC_MIN ||
 3080                     sc->nfe_process_limit > NFE_PROC_MAX) {
 3081                         device_printf(sc->nfe_dev,
 3082                             "process_limit value out of range; "
 3083                             "using default: %d\n", NFE_PROC_DEFAULT);
 3084                         sc->nfe_process_limit = NFE_PROC_DEFAULT;
 3085                 }
 3086         }
 3087 
 3088         if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
 3089                 return;
 3090 
 3091         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
 3092             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NFE statistics");
 3093         parent = SYSCTL_CHILDREN(tree);
 3094 
 3095         /* Rx statistics. */
 3096         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
 3097             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
 3098         child = SYSCTL_CHILDREN(tree);
 3099 
 3100         NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
 3101             &stats->rx_frame_errors, "Framing Errors");
 3102         NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
 3103             &stats->rx_extra_bytes, "Extra Bytes");
 3104         NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
 3105             &stats->rx_late_cols, "Late Collisions");
 3106         NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
 3107             &stats->rx_runts, "Runts");
 3108         NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
 3109             &stats->rx_jumbos, "Jumbos");
 3110         NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
 3111             &stats->rx_fifo_overuns, "FIFO Overruns");
 3112         NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
 3113             &stats->rx_crc_errors, "CRC Errors");
 3114         NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
 3115             &stats->rx_fae, "Frame Alignment Errors");
 3116         NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
 3117             &stats->rx_len_errors, "Length Errors");
 3118         NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
 3119             &stats->rx_unicast, "Unicast Frames");
 3120         NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
 3121             &stats->rx_multicast, "Multicast Frames");
 3122         NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
 3123             &stats->rx_broadcast, "Broadcast Frames");
 3124         if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
 3125                 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
 3126                     &stats->rx_octets, "Octets");
 3127                 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
 3128                     &stats->rx_pause, "Pause frames");
 3129                 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
 3130                     &stats->rx_drops, "Drop frames");
 3131         }
 3132 
 3133         /* Tx statistics. */
 3134         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
 3135             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
 3136         child = SYSCTL_CHILDREN(tree);
 3137         NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
 3138             &stats->tx_octets, "Octets");
 3139         NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
 3140             &stats->tx_zero_rexmits, "Zero Retransmits");
 3141         NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
 3142             &stats->tx_one_rexmits, "One Retransmits");
 3143         NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
 3144             &stats->tx_multi_rexmits, "Multiple Retransmits");
 3145         NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
 3146             &stats->tx_late_cols, "Late Collisions");
 3147         NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
 3148             &stats->tx_fifo_underuns, "FIFO Underruns");
 3149         NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
 3150             &stats->tx_carrier_losts, "Carrier Losts");
 3151         NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
 3152             &stats->tx_excess_deferals, "Excess Deferrals");
 3153         NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
 3154             &stats->tx_retry_errors, "Retry Errors");
 3155         if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
 3156                 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
 3157                     &stats->tx_deferals, "Deferrals");
 3158                 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
 3159                     &stats->tx_frames, "Frames");
 3160                 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
 3161                     &stats->tx_pause, "Pause Frames");
 3162         }
 3163         if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
 3164                 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
 3165                     &stats->tx_deferals, "Unicast Frames");
 3166                 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
 3167                     &stats->tx_frames, "Multicast Frames");
 3168                 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
 3169                     &stats->tx_pause, "Broadcast Frames");
 3170         }
 3171 }
 3172 
 3173 #undef NFE_SYSCTL_STAT_ADD32
 3174 #undef NFE_SYSCTL_STAT_ADD64
 3175 
 3176 static void
 3177 nfe_stats_clear(struct nfe_softc *sc)
 3178 {
 3179         int i, mib_cnt;
 3180 
 3181         if ((sc->nfe_flags & NFE_MIB_V1) != 0)
 3182                 mib_cnt = NFE_NUM_MIB_STATV1;
 3183         else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
 3184                 mib_cnt = NFE_NUM_MIB_STATV2;
 3185         else
 3186                 return;
 3187 
 3188         for (i = 0; i < mib_cnt; i++)
 3189                 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
 3190 
 3191         if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
 3192                 NFE_READ(sc, NFE_TX_UNICAST);
 3193                 NFE_READ(sc, NFE_TX_MULTICAST);
 3194                 NFE_READ(sc, NFE_TX_BROADCAST);
 3195         }
 3196 }
 3197 
 3198 static void
 3199 nfe_stats_update(struct nfe_softc *sc)
 3200 {
 3201         struct nfe_hw_stats *stats;
 3202 
 3203         NFE_LOCK_ASSERT(sc);
 3204 
 3205         if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
 3206                 return;
 3207 
 3208         stats = &sc->nfe_stats;
 3209         stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
 3210         stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
 3211         stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
 3212         stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
 3213         stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
 3214         stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
 3215         stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
 3216         stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
 3217         stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
 3218         stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
 3219         stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
 3220         stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
 3221         stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
 3222         stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
 3223         stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
 3224         stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
 3225         stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
 3226         stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
 3227         stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
 3228         stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
 3229         stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
 3230 
 3231         if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
 3232                 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
 3233                 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
 3234                 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
 3235                 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
 3236                 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
 3237                 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
 3238         }
 3239 
 3240         if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
 3241                 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
 3242                 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
 3243                 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
 3244         }
 3245 }
 3246 
 3247 static void
 3248 nfe_set_linkspeed(struct nfe_softc *sc)
 3249 {
 3250         struct mii_softc *miisc;
 3251         struct mii_data *mii;
 3252         int aneg, i, phyno;
 3253 
 3254         NFE_LOCK_ASSERT(sc);
 3255 
 3256         mii = device_get_softc(sc->nfe_miibus);
 3257         mii_pollstat(mii);
 3258         aneg = 0;
 3259         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
 3260             (IFM_ACTIVE | IFM_AVALID)) {
 3261                 switch IFM_SUBTYPE(mii->mii_media_active) {
 3262                 case IFM_10_T:
 3263                 case IFM_100_TX:
 3264                         return;
 3265                 case IFM_1000_T:
 3266                         aneg++;
 3267                         break;
 3268                 default:
 3269                         break;
 3270                 }
 3271         }
 3272         miisc = LIST_FIRST(&mii->mii_phys);
 3273         phyno = miisc->mii_phy;
 3274         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 3275                 PHY_RESET(miisc);
 3276         nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
 3277         nfe_miibus_writereg(sc->nfe_dev, phyno,
 3278             MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
 3279         nfe_miibus_writereg(sc->nfe_dev, phyno,
 3280             MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
 3281         DELAY(1000);
 3282         if (aneg != 0) {
 3283                 /*
 3284                  * Poll link state until nfe(4) get a 10/100Mbps link.
 3285                  */
 3286                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
 3287                         mii_pollstat(mii);
 3288                         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
 3289                             == (IFM_ACTIVE | IFM_AVALID)) {
 3290                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 3291                                 case IFM_10_T:
 3292                                 case IFM_100_TX:
 3293                                         nfe_mac_config(sc, mii);
 3294                                         return;
 3295                                 default:
 3296                                         break;
 3297                                 }
 3298                         }
 3299                         NFE_UNLOCK(sc);
 3300                         pause("nfelnk", hz);
 3301                         NFE_LOCK(sc);
 3302                 }
 3303                 if (i == MII_ANEGTICKS_GIGE)
 3304                         device_printf(sc->nfe_dev,
 3305                             "establishing a link failed, WOL may not work!");
 3306         }
 3307         /*
 3308          * No link, force MAC to have 100Mbps, full-duplex link.
 3309          * This is the last resort and may/may not work.
 3310          */
 3311         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
 3312         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
 3313         nfe_mac_config(sc, mii);
 3314 }
 3315 
 3316 static void
 3317 nfe_set_wol(struct nfe_softc *sc)
 3318 {
 3319         if_t ifp;
 3320         uint32_t wolctl;
 3321         int pmc;
 3322         uint16_t pmstat;
 3323 
 3324         NFE_LOCK_ASSERT(sc);
 3325 
 3326         if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
 3327                 return;
 3328         ifp = sc->nfe_ifp;
 3329         if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
 3330                 wolctl = NFE_WOL_MAGIC;
 3331         else
 3332                 wolctl = 0;
 3333         NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
 3334         if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
 3335                 nfe_set_linkspeed(sc);
 3336                 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
 3337                         NFE_WRITE(sc, NFE_PWR2_CTL,
 3338                             NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
 3339                 /* Enable RX. */
 3340                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
 3341                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
 3342                 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
 3343                     NFE_RX_START);
 3344         }
 3345         /* Request PME if WOL is requested. */
 3346         pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
 3347         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 3348         if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
 3349                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 3350         pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 3351 }

Cache object: 182d08c3b2f6c29b9a153feb73874bfe


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.