The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/e1000/if_lem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2 
    3   Copyright (c) 2001-2012, Intel Corporation 
    4   All rights reserved.
    5   
    6   Redistribution and use in source and binary forms, with or without 
    7   modification, are permitted provided that the following conditions are met:
    8   
    9    1. Redistributions of source code must retain the above copyright notice, 
   10       this list of conditions and the following disclaimer.
   11   
   12    2. Redistributions in binary form must reproduce the above copyright 
   13       notice, this list of conditions and the following disclaimer in the 
   14       documentation and/or other materials provided with the distribution.
   15   
   16    3. Neither the name of the Intel Corporation nor the names of its 
   17       contributors may be used to endorse or promote products derived from 
   18       this software without specific prior written permission.
   19   
   20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
   22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
   23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
   24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
   25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
   26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
   27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
   28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
   29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30   POSSIBILITY OF SUCH DAMAGE.
   31 
   32 ******************************************************************************/
   33 /*$FreeBSD$*/
   34 
   35 #include "opt_inet.h"
   36 #include "opt_inet6.h"
   37 
   38 #ifdef HAVE_KERNEL_OPTION_HEADERS
   39 #include "opt_device_polling.h"
   40 #endif
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/bus.h>
   45 #include <sys/endian.h>
   46 #include <sys/kernel.h>
   47 #include <sys/kthread.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mbuf.h>
   50 #include <sys/module.h>
   51 #include <sys/rman.h>
   52 #include <sys/socket.h>
   53 #include <sys/sockio.h>
   54 #include <sys/sysctl.h>
   55 #include <sys/taskqueue.h>
   56 #include <sys/eventhandler.h>
   57 #include <machine/bus.h>
   58 #include <machine/resource.h>
   59 
   60 #include <net/bpf.h>
   61 #include <net/ethernet.h>
   62 #include <net/if.h>
   63 #include <net/if_arp.h>
   64 #include <net/if_dl.h>
   65 #include <net/if_media.h>
   66 
   67 #include <net/if_types.h>
   68 #include <net/if_vlan_var.h>
   69 
   70 #include <netinet/in_systm.h>
   71 #include <netinet/in.h>
   72 #include <netinet/if_ether.h>
   73 #include <netinet/ip.h>
   74 #include <netinet/ip6.h>
   75 #include <netinet/tcp.h>
   76 #include <netinet/udp.h>
   77 
   78 #include <machine/in_cksum.h>
   79 #include <dev/led/led.h>
   80 #include <dev/pci/pcivar.h>
   81 #include <dev/pci/pcireg.h>
   82 
   83 #include "e1000_api.h"
   84 #include "if_lem.h"
   85 
   86 /*********************************************************************
   87  *  Legacy Em Driver version:
   88  *********************************************************************/
   89 char lem_driver_version[] = "1.0.6";
   90 
   91 /*********************************************************************
   92  *  PCI Device ID Table
   93  *
   94  *  Used by probe to select devices to load on
   95  *  Last field stores an index into e1000_strings
   96  *  Last entry must be all 0s
   97  *
   98  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
   99  *********************************************************************/
  100 
  101 static em_vendor_info_t lem_vendor_info_array[] =
  102 {
  103         /* Intel(R) PRO/1000 Network Connection */
  104         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
  105         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
  106         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
  107         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
  108         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
  109 
  110         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
  111         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
  112         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
  113         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
  114         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
  115         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
  116         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
  117 
  118         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
  119 
  120         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
  121         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
  122 
  123         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
  124         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
  125         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
  126         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
  127 
  128         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
  129         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
  130         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
  131         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
  132         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
  133 
  134         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
  135         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
  136         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
  137         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
  138         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
  139         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
  140         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
  141         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
  142         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
  143                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
  144 
  145         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
  146         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
  147         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
  148         /* required last entry */
  149         { 0, 0, 0, 0, 0}
  150 };
  151 
  152 /*********************************************************************
  153  *  Table of branding strings for all supported NICs.
  154  *********************************************************************/
  155 
  156 static char *lem_strings[] = {
  157         "Intel(R) PRO/1000 Legacy Network Connection"
  158 };
  159 
  160 /*********************************************************************
  161  *  Function prototypes
  162  *********************************************************************/
  163 static int      lem_probe(device_t);
  164 static int      lem_attach(device_t);
  165 static int      lem_detach(device_t);
  166 static int      lem_shutdown(device_t);
  167 static int      lem_suspend(device_t);
  168 static int      lem_resume(device_t);
  169 static void     lem_start(struct ifnet *);
  170 static void     lem_start_locked(struct ifnet *ifp);
  171 static int      lem_ioctl(struct ifnet *, u_long, caddr_t);
  172 static void     lem_init(void *);
  173 static void     lem_init_locked(struct adapter *);
  174 static void     lem_stop(void *);
  175 static void     lem_media_status(struct ifnet *, struct ifmediareq *);
  176 static int      lem_media_change(struct ifnet *);
  177 static void     lem_identify_hardware(struct adapter *);
  178 static int      lem_allocate_pci_resources(struct adapter *);
  179 static int      lem_allocate_irq(struct adapter *adapter);
  180 static void     lem_free_pci_resources(struct adapter *);
  181 static void     lem_local_timer(void *);
  182 static int      lem_hardware_init(struct adapter *);
  183 static int      lem_setup_interface(device_t, struct adapter *);
  184 static void     lem_setup_transmit_structures(struct adapter *);
  185 static void     lem_initialize_transmit_unit(struct adapter *);
  186 static int      lem_setup_receive_structures(struct adapter *);
  187 static void     lem_initialize_receive_unit(struct adapter *);
  188 static void     lem_enable_intr(struct adapter *);
  189 static void     lem_disable_intr(struct adapter *);
  190 static void     lem_free_transmit_structures(struct adapter *);
  191 static void     lem_free_receive_structures(struct adapter *);
  192 static void     lem_update_stats_counters(struct adapter *);
  193 static void     lem_add_hw_stats(struct adapter *adapter);
  194 static void     lem_txeof(struct adapter *);
  195 static void     lem_tx_purge(struct adapter *);
  196 static int      lem_allocate_receive_structures(struct adapter *);
  197 static int      lem_allocate_transmit_structures(struct adapter *);
  198 static bool     lem_rxeof(struct adapter *, int, int *);
  199 #ifndef __NO_STRICT_ALIGNMENT
  200 static int      lem_fixup_rx(struct adapter *);
  201 #endif
  202 static void     lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
  203                     struct mbuf *);
  204 static void     lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
  205                     u32 *, u32 *);
  206 static void     lem_set_promisc(struct adapter *);
  207 static void     lem_disable_promisc(struct adapter *);
  208 static void     lem_set_multi(struct adapter *);
  209 static void     lem_update_link_status(struct adapter *);
  210 static int      lem_get_buf(struct adapter *, int);
  211 static void     lem_register_vlan(void *, struct ifnet *, u16);
  212 static void     lem_unregister_vlan(void *, struct ifnet *, u16);
  213 static void     lem_setup_vlan_hw_support(struct adapter *);
  214 static int      lem_xmit(struct adapter *, struct mbuf **);
  215 static void     lem_smartspeed(struct adapter *);
  216 static int      lem_82547_fifo_workaround(struct adapter *, int);
  217 static void     lem_82547_update_fifo_head(struct adapter *, int);
  218 static int      lem_82547_tx_fifo_reset(struct adapter *);
  219 static void     lem_82547_move_tail(void *);
  220 static int      lem_dma_malloc(struct adapter *, bus_size_t,
  221                     struct em_dma_alloc *, int);
  222 static void     lem_dma_free(struct adapter *, struct em_dma_alloc *);
  223 static int      lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
  224 static void     lem_print_nvm_info(struct adapter *);
  225 static int      lem_is_valid_ether_addr(u8 *);
  226 static u32      lem_fill_descriptors (bus_addr_t address, u32 length,
  227                     PDESC_ARRAY desc_array);
  228 static int      lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
  229 static void     lem_add_int_delay_sysctl(struct adapter *, const char *,
  230                     const char *, struct em_int_delay_info *, int, int);
  231 static void     lem_set_flow_cntrl(struct adapter *, const char *,
  232                     const char *, int *, int);
  233 /* Management and WOL Support */
  234 static void     lem_init_manageability(struct adapter *);
  235 static void     lem_release_manageability(struct adapter *);
  236 static void     lem_get_hw_control(struct adapter *);
  237 static void     lem_release_hw_control(struct adapter *);
  238 static void     lem_get_wakeup(device_t);
  239 static void     lem_enable_wakeup(device_t);
  240 static int      lem_enable_phy_wakeup(struct adapter *);
  241 static void     lem_led_func(void *, int);
  242 
  243 static void     lem_intr(void *);
  244 static int      lem_irq_fast(void *);
  245 static void     lem_handle_rxtx(void *context, int pending);
  246 static void     lem_handle_link(void *context, int pending);
  247 static void     lem_add_rx_process_limit(struct adapter *, const char *,
  248                     const char *, int *, int);
  249 
  250 #ifdef DEVICE_POLLING
  251 static poll_handler_t lem_poll;
  252 #endif /* POLLING */
  253 
  254 /*********************************************************************
  255  *  FreeBSD Device Interface Entry Points
  256  *********************************************************************/
  257 
  258 static device_method_t lem_methods[] = {
  259         /* Device interface */
  260         DEVMETHOD(device_probe, lem_probe),
  261         DEVMETHOD(device_attach, lem_attach),
  262         DEVMETHOD(device_detach, lem_detach),
  263         DEVMETHOD(device_shutdown, lem_shutdown),
  264         DEVMETHOD(device_suspend, lem_suspend),
  265         DEVMETHOD(device_resume, lem_resume),
  266         DEVMETHOD_END
  267 };
  268 
  269 static driver_t lem_driver = {
  270         "em", lem_methods, sizeof(struct adapter),
  271 };
  272 
  273 extern devclass_t em_devclass;
  274 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
  275 MODULE_DEPEND(lem, pci, 1, 1, 1);
  276 MODULE_DEPEND(lem, ether, 1, 1, 1);
  277 
  278 /*********************************************************************
  279  *  Tunable default values.
  280  *********************************************************************/
  281 
  282 #define EM_TICKS_TO_USECS(ticks)        ((1024 * (ticks) + 500) / 1000)
  283 #define EM_USECS_TO_TICKS(usecs)        ((1000 * (usecs) + 512) / 1024)
  284 
  285 #define MAX_INTS_PER_SEC        8000
  286 #define DEFAULT_ITR             (1000000000/(MAX_INTS_PER_SEC * 256))
  287 
  288 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
  289 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
  290 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
  291 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
  292 static int lem_rxd = EM_DEFAULT_RXD;
  293 static int lem_txd = EM_DEFAULT_TXD;
  294 static int lem_smart_pwr_down = FALSE;
  295 
  296 /* Controls whether promiscuous also shows bad packets */
  297 static int lem_debug_sbp = FALSE;
  298 
  299 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
  300 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
  301 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
  302 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
  303 TUNABLE_INT("hw.em.rxd", &lem_rxd);
  304 TUNABLE_INT("hw.em.txd", &lem_txd);
  305 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
  306 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
  307 
  308 /* Interrupt style - default to fast */
  309 static int lem_use_legacy_irq = 0;
  310 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
  311 
  312 /* How many packets rxeof tries to clean at a time */
  313 static int lem_rx_process_limit = 100;
  314 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
  315 
  316 /* Flow control setting - default to FULL */
  317 static int lem_fc_setting = e1000_fc_full;
  318 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
  319 
  320 /* Global used in WOL setup with multiport cards */
  321 static int global_quad_port_a = 0;
  322 
  323 #ifdef DEV_NETMAP       /* see ixgbe.c for details */
  324 #include <dev/netmap/if_lem_netmap.h>
  325 #endif /* DEV_NETMAP */
  326 
  327 /*********************************************************************
  328  *  Device identification routine
  329  *
  330  *  em_probe determines if the driver should be loaded on
  331  *  adapter based on PCI vendor/device id of the adapter.
  332  *
  333  *  return BUS_PROBE_DEFAULT on success, positive on failure
  334  *********************************************************************/
  335 
  336 static int
  337 lem_probe(device_t dev)
  338 {
  339         char            adapter_name[60];
  340         u16             pci_vendor_id = 0;
  341         u16             pci_device_id = 0;
  342         u16             pci_subvendor_id = 0;
  343         u16             pci_subdevice_id = 0;
  344         em_vendor_info_t *ent;
  345 
  346         INIT_DEBUGOUT("em_probe: begin");
  347 
  348         pci_vendor_id = pci_get_vendor(dev);
  349         if (pci_vendor_id != EM_VENDOR_ID)
  350                 return (ENXIO);
  351 
  352         pci_device_id = pci_get_device(dev);
  353         pci_subvendor_id = pci_get_subvendor(dev);
  354         pci_subdevice_id = pci_get_subdevice(dev);
  355 
  356         ent = lem_vendor_info_array;
  357         while (ent->vendor_id != 0) {
  358                 if ((pci_vendor_id == ent->vendor_id) &&
  359                     (pci_device_id == ent->device_id) &&
  360 
  361                     ((pci_subvendor_id == ent->subvendor_id) ||
  362                     (ent->subvendor_id == PCI_ANY_ID)) &&
  363 
  364                     ((pci_subdevice_id == ent->subdevice_id) ||
  365                     (ent->subdevice_id == PCI_ANY_ID))) {
  366                         sprintf(adapter_name, "%s %s",
  367                                 lem_strings[ent->index],
  368                                 lem_driver_version);
  369                         device_set_desc_copy(dev, adapter_name);
  370                         return (BUS_PROBE_DEFAULT);
  371                 }
  372                 ent++;
  373         }
  374 
  375         return (ENXIO);
  376 }
  377 
  378 /*********************************************************************
  379  *  Device initialization routine
  380  *
  381  *  The attach entry point is called when the driver is being loaded.
  382  *  This routine identifies the type of hardware, allocates all resources
  383  *  and initializes the hardware.
  384  *
  385  *  return 0 on success, positive on failure
  386  *********************************************************************/
  387 
  388 static int
  389 lem_attach(device_t dev)
  390 {
  391         struct adapter  *adapter;
  392         int             tsize, rsize;
  393         int             error = 0;
  394 
  395         INIT_DEBUGOUT("lem_attach: begin");
  396 
  397         adapter = device_get_softc(dev);
  398         adapter->dev = adapter->osdep.dev = dev;
  399         EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
  400         EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
  401         EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
  402 
  403         /* SYSCTL stuff */
  404         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  405             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  406             OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
  407             lem_sysctl_nvm_info, "I", "NVM Information");
  408 
  409         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
  410         callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
  411 
  412         /* Determine hardware and mac info */
  413         lem_identify_hardware(adapter);
  414 
  415         /* Setup PCI resources */
  416         if (lem_allocate_pci_resources(adapter)) {
  417                 device_printf(dev, "Allocation of PCI resources failed\n");
  418                 error = ENXIO;
  419                 goto err_pci;
  420         }
  421 
  422         /* Do Shared Code initialization */
  423         if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
  424                 device_printf(dev, "Setup of Shared code failed\n");
  425                 error = ENXIO;
  426                 goto err_pci;
  427         }
  428 
  429         e1000_get_bus_info(&adapter->hw);
  430 
  431         /* Set up some sysctls for the tunable interrupt delays */
  432         lem_add_int_delay_sysctl(adapter, "rx_int_delay",
  433             "receive interrupt delay in usecs", &adapter->rx_int_delay,
  434             E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
  435         lem_add_int_delay_sysctl(adapter, "tx_int_delay",
  436             "transmit interrupt delay in usecs", &adapter->tx_int_delay,
  437             E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
  438         if (adapter->hw.mac.type >= e1000_82540) {
  439                 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
  440                     "receive interrupt delay limit in usecs",
  441                     &adapter->rx_abs_int_delay,
  442                     E1000_REGISTER(&adapter->hw, E1000_RADV),
  443                     lem_rx_abs_int_delay_dflt);
  444                 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
  445                     "transmit interrupt delay limit in usecs",
  446                     &adapter->tx_abs_int_delay,
  447                     E1000_REGISTER(&adapter->hw, E1000_TADV),
  448                     lem_tx_abs_int_delay_dflt);
  449                 lem_add_int_delay_sysctl(adapter, "itr",
  450                     "interrupt delay limit in usecs/4",
  451                     &adapter->tx_itr,
  452                     E1000_REGISTER(&adapter->hw, E1000_ITR),
  453                     DEFAULT_ITR);
  454         }
  455 
  456         /* Sysctls for limiting the amount of work done in the taskqueue */
  457         lem_add_rx_process_limit(adapter, "rx_processing_limit",
  458             "max number of rx packets to process", &adapter->rx_process_limit,
  459             lem_rx_process_limit);
  460 
  461         /* Sysctl for setting the interface flow control */
  462         lem_set_flow_cntrl(adapter, "flow_control",
  463             "flow control setting",
  464             &adapter->fc_setting, lem_fc_setting);
  465 
  466         /*
  467          * Validate number of transmit and receive descriptors. It
  468          * must not exceed hardware maximum, and must be multiple
  469          * of E1000_DBA_ALIGN.
  470          */
  471         if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
  472             (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
  473             (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
  474             (lem_txd < EM_MIN_TXD)) {
  475                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
  476                     EM_DEFAULT_TXD, lem_txd);
  477                 adapter->num_tx_desc = EM_DEFAULT_TXD;
  478         } else
  479                 adapter->num_tx_desc = lem_txd;
  480         if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
  481             (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
  482             (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
  483             (lem_rxd < EM_MIN_RXD)) {
  484                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
  485                     EM_DEFAULT_RXD, lem_rxd);
  486                 adapter->num_rx_desc = EM_DEFAULT_RXD;
  487         } else
  488                 adapter->num_rx_desc = lem_rxd;
  489 
  490         adapter->hw.mac.autoneg = DO_AUTO_NEG;
  491         adapter->hw.phy.autoneg_wait_to_complete = FALSE;
  492         adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
  493         adapter->rx_buffer_len = 2048;
  494 
  495         e1000_init_script_state_82541(&adapter->hw, TRUE);
  496         e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
  497 
  498         /* Copper options */
  499         if (adapter->hw.phy.media_type == e1000_media_type_copper) {
  500                 adapter->hw.phy.mdix = AUTO_ALL_MODES;
  501                 adapter->hw.phy.disable_polarity_correction = FALSE;
  502                 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
  503         }
  504 
  505         /*
  506          * Set the frame limits assuming
  507          * standard ethernet sized frames.
  508          */
  509         adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
  510         adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
  511 
  512         /*
  513          * This controls when hardware reports transmit completion
  514          * status.
  515          */
  516         adapter->hw.mac.report_tx_early = 1;
  517 
  518         tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
  519             EM_DBA_ALIGN);
  520 
  521         /* Allocate Transmit Descriptor ring */
  522         if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
  523                 device_printf(dev, "Unable to allocate tx_desc memory\n");
  524                 error = ENOMEM;
  525                 goto err_tx_desc;
  526         }
  527         adapter->tx_desc_base = 
  528             (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
  529 
  530         rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
  531             EM_DBA_ALIGN);
  532 
  533         /* Allocate Receive Descriptor ring */
  534         if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
  535                 device_printf(dev, "Unable to allocate rx_desc memory\n");
  536                 error = ENOMEM;
  537                 goto err_rx_desc;
  538         }
  539         adapter->rx_desc_base =
  540             (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
  541 
  542         /* Allocate multicast array memory. */
  543         adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
  544             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
  545         if (adapter->mta == NULL) {
  546                 device_printf(dev, "Can not allocate multicast setup array\n");
  547                 error = ENOMEM;
  548                 goto err_hw_init;
  549         }
  550 
  551         /*
  552         ** Start from a known state, this is
  553         ** important in reading the nvm and
  554         ** mac from that.
  555         */
  556         e1000_reset_hw(&adapter->hw);
  557 
  558         /* Make sure we have a good EEPROM before we read from it */
  559         if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
  560                 /*
  561                 ** Some PCI-E parts fail the first check due to
  562                 ** the link being in sleep state, call it again,
  563                 ** if it fails a second time its a real issue.
  564                 */
  565                 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
  566                         device_printf(dev,
  567                             "The EEPROM Checksum Is Not Valid\n");
  568                         error = EIO;
  569                         goto err_hw_init;
  570                 }
  571         }
  572 
  573         /* Copy the permanent MAC address out of the EEPROM */
  574         if (e1000_read_mac_addr(&adapter->hw) < 0) {
  575                 device_printf(dev, "EEPROM read error while reading MAC"
  576                     " address\n");
  577                 error = EIO;
  578                 goto err_hw_init;
  579         }
  580 
  581         if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
  582                 device_printf(dev, "Invalid MAC address\n");
  583                 error = EIO;
  584                 goto err_hw_init;
  585         }
  586 
  587         /* Initialize the hardware */
  588         if (lem_hardware_init(adapter)) {
  589                 device_printf(dev, "Unable to initialize the hardware\n");
  590                 error = EIO;
  591                 goto err_hw_init;
  592         }
  593 
  594         /* Allocate transmit descriptors and buffers */
  595         if (lem_allocate_transmit_structures(adapter)) {
  596                 device_printf(dev, "Could not setup transmit structures\n");
  597                 error = ENOMEM;
  598                 goto err_tx_struct;
  599         }
  600 
  601         /* Allocate receive descriptors and buffers */
  602         if (lem_allocate_receive_structures(adapter)) {
  603                 device_printf(dev, "Could not setup receive structures\n");
  604                 error = ENOMEM;
  605                 goto err_rx_struct;
  606         }
  607 
  608         /*
  609         **  Do interrupt configuration
  610         */
  611         error = lem_allocate_irq(adapter);
  612         if (error)
  613                 goto err_rx_struct;
  614 
  615         /*
  616          * Get Wake-on-Lan and Management info for later use
  617          */
  618         lem_get_wakeup(dev);
  619 
  620         /* Setup OS specific network interface */
  621         if (lem_setup_interface(dev, adapter) != 0)
  622                 goto err_rx_struct;
  623 
  624         /* Initialize statistics */
  625         lem_update_stats_counters(adapter);
  626 
  627         adapter->hw.mac.get_link_status = 1;
  628         lem_update_link_status(adapter);
  629 
  630         /* Indicate SOL/IDER usage */
  631         if (e1000_check_reset_block(&adapter->hw))
  632                 device_printf(dev,
  633                     "PHY reset is blocked due to SOL/IDER session.\n");
  634 
  635         /* Do we need workaround for 82544 PCI-X adapter? */
  636         if (adapter->hw.bus.type == e1000_bus_type_pcix &&
  637             adapter->hw.mac.type == e1000_82544)
  638                 adapter->pcix_82544 = TRUE;
  639         else
  640                 adapter->pcix_82544 = FALSE;
  641 
  642         /* Register for VLAN events */
  643         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
  644             lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
  645         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
  646             lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 
  647 
  648         lem_add_hw_stats(adapter);
  649 
  650         /* Non-AMT based hardware can now take control from firmware */
  651         if (adapter->has_manage && !adapter->has_amt)
  652                 lem_get_hw_control(adapter);
  653 
  654         /* Tell the stack that the interface is not active */
  655         adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  656 
  657         adapter->led_dev = led_create(lem_led_func, adapter,
  658             device_get_nameunit(dev));
  659 
  660 #ifdef DEV_NETMAP
  661         lem_netmap_attach(adapter);
  662 #endif /* DEV_NETMAP */
  663         INIT_DEBUGOUT("lem_attach: end");
  664 
  665         return (0);
  666 
  667 err_rx_struct:
  668         lem_free_transmit_structures(adapter);
  669 err_tx_struct:
  670 err_hw_init:
  671         lem_release_hw_control(adapter);
  672         lem_dma_free(adapter, &adapter->rxdma);
  673 err_rx_desc:
  674         lem_dma_free(adapter, &adapter->txdma);
  675 err_tx_desc:
  676 err_pci:
  677         if (adapter->ifp != NULL)
  678                 if_free(adapter->ifp);
  679         lem_free_pci_resources(adapter);
  680         free(adapter->mta, M_DEVBUF);
  681         EM_TX_LOCK_DESTROY(adapter);
  682         EM_RX_LOCK_DESTROY(adapter);
  683         EM_CORE_LOCK_DESTROY(adapter);
  684 
  685         return (error);
  686 }
  687 
  688 /*********************************************************************
  689  *  Device removal routine
  690  *
  691  *  The detach entry point is called when the driver is being removed.
  692  *  This routine stops the adapter and deallocates all the resources
  693  *  that were allocated for driver operation.
  694  *
  695  *  return 0 on success, positive on failure
  696  *********************************************************************/
  697 
  698 static int
  699 lem_detach(device_t dev)
  700 {
  701         struct adapter  *adapter = device_get_softc(dev);
  702         struct ifnet    *ifp = adapter->ifp;
  703 
  704         INIT_DEBUGOUT("em_detach: begin");
  705 
  706         /* Make sure VLANS are not using driver */
  707         if (adapter->ifp->if_vlantrunk != NULL) {
  708                 device_printf(dev,"Vlan in use, detach first\n");
  709                 return (EBUSY);
  710         }
  711 
  712 #ifdef DEVICE_POLLING
  713         if (ifp->if_capenable & IFCAP_POLLING)
  714                 ether_poll_deregister(ifp);
  715 #endif
  716 
  717         if (adapter->led_dev != NULL)
  718                 led_destroy(adapter->led_dev);
  719 
  720         EM_CORE_LOCK(adapter);
  721         EM_TX_LOCK(adapter);
  722         adapter->in_detach = 1;
  723         lem_stop(adapter);
  724         e1000_phy_hw_reset(&adapter->hw);
  725 
  726         lem_release_manageability(adapter);
  727 
  728         EM_TX_UNLOCK(adapter);
  729         EM_CORE_UNLOCK(adapter);
  730 
  731         /* Unregister VLAN events */
  732         if (adapter->vlan_attach != NULL)
  733                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
  734         if (adapter->vlan_detach != NULL)
  735                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 
  736 
  737         ether_ifdetach(adapter->ifp);
  738         callout_drain(&adapter->timer);
  739         callout_drain(&adapter->tx_fifo_timer);
  740 
  741 #ifdef DEV_NETMAP
  742         netmap_detach(ifp);
  743 #endif /* DEV_NETMAP */
  744         lem_free_pci_resources(adapter);
  745         bus_generic_detach(dev);
  746         if_free(ifp);
  747 
  748         lem_free_transmit_structures(adapter);
  749         lem_free_receive_structures(adapter);
  750 
  751         /* Free Transmit Descriptor ring */
  752         if (adapter->tx_desc_base) {
  753                 lem_dma_free(adapter, &adapter->txdma);
  754                 adapter->tx_desc_base = NULL;
  755         }
  756 
  757         /* Free Receive Descriptor ring */
  758         if (adapter->rx_desc_base) {
  759                 lem_dma_free(adapter, &adapter->rxdma);
  760                 adapter->rx_desc_base = NULL;
  761         }
  762 
  763         lem_release_hw_control(adapter);
  764         free(adapter->mta, M_DEVBUF);
  765         EM_TX_LOCK_DESTROY(adapter);
  766         EM_RX_LOCK_DESTROY(adapter);
  767         EM_CORE_LOCK_DESTROY(adapter);
  768 
  769         return (0);
  770 }
  771 
  772 /*********************************************************************
  773  *
  774  *  Shutdown entry point
  775  *
  776  **********************************************************************/
  777 
  778 static int
  779 lem_shutdown(device_t dev)
  780 {
  781         return lem_suspend(dev);
  782 }
  783 
  784 /*
  785  * Suspend/resume device methods.
  786  */
  787 static int
  788 lem_suspend(device_t dev)
  789 {
  790         struct adapter *adapter = device_get_softc(dev);
  791 
  792         EM_CORE_LOCK(adapter);
  793 
  794         lem_release_manageability(adapter);
  795         lem_release_hw_control(adapter);
  796         lem_enable_wakeup(dev);
  797 
  798         EM_CORE_UNLOCK(adapter);
  799 
  800         return bus_generic_suspend(dev);
  801 }
  802 
  803 static int
  804 lem_resume(device_t dev)
  805 {
  806         struct adapter *adapter = device_get_softc(dev);
  807         struct ifnet *ifp = adapter->ifp;
  808 
  809         EM_CORE_LOCK(adapter);
  810         lem_init_locked(adapter);
  811         lem_init_manageability(adapter);
  812         EM_CORE_UNLOCK(adapter);
  813         lem_start(ifp);
  814 
  815         return bus_generic_resume(dev);
  816 }
  817 
  818 
  819 static void
  820 lem_start_locked(struct ifnet *ifp)
  821 {
  822         struct adapter  *adapter = ifp->if_softc;
  823         struct mbuf     *m_head;
  824 
  825         EM_TX_LOCK_ASSERT(adapter);
  826 
  827         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
  828             IFF_DRV_RUNNING)
  829                 return;
  830         if (!adapter->link_active)
  831                 return;
  832 
  833         /*
  834          * Force a cleanup if number of TX descriptors
  835          * available hits the threshold
  836          */
  837         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
  838                 lem_txeof(adapter);
  839                 /* Now do we at least have a minimal? */
  840                 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
  841                         adapter->no_tx_desc_avail1++;
  842                         return;
  843                 }
  844         }
  845 
  846         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
  847 
  848                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
  849                 if (m_head == NULL)
  850                         break;
  851                 /*
  852                  *  Encapsulation can modify our pointer, and or make it
  853                  *  NULL on failure.  In that event, we can't requeue.
  854                  */
  855                 if (lem_xmit(adapter, &m_head)) {
  856                         if (m_head == NULL)
  857                                 break;
  858                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  859                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
  860                         break;
  861                 }
  862 
  863                 /* Send a copy of the frame to the BPF listener */
  864                 ETHER_BPF_MTAP(ifp, m_head);
  865 
  866                 /* Set timeout in case hardware has problems transmitting. */
  867                 adapter->watchdog_check = TRUE;
  868                 adapter->watchdog_time = ticks;
  869         }
  870         if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
  871                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  872 
  873         return;
  874 }
  875 
  876 static void
  877 lem_start(struct ifnet *ifp)
  878 {
  879         struct adapter *adapter = ifp->if_softc;
  880 
  881         EM_TX_LOCK(adapter);
  882         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  883                 lem_start_locked(ifp);
  884         EM_TX_UNLOCK(adapter);
  885 }
  886 
  887 /*********************************************************************
  888  *  Ioctl entry point
  889  *
  890  *  em_ioctl is called when the user wants to configure the
  891  *  interface.
  892  *
  893  *  return 0 on success, positive on failure
  894  **********************************************************************/
  895 
  896 static int
  897 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
  898 {
  899         struct adapter  *adapter = ifp->if_softc;
  900         struct ifreq    *ifr = (struct ifreq *)data;
  901 #if defined(INET) || defined(INET6)
  902         struct ifaddr   *ifa = (struct ifaddr *)data;
  903 #endif
  904         bool            avoid_reset = FALSE;
  905         int             error = 0;
  906 
  907         if (adapter->in_detach)
  908                 return (error);
  909 
  910         switch (command) {
  911         case SIOCSIFADDR:
  912 #ifdef INET
  913                 if (ifa->ifa_addr->sa_family == AF_INET)
  914                         avoid_reset = TRUE;
  915 #endif
  916 #ifdef INET6
  917                 if (ifa->ifa_addr->sa_family == AF_INET6)
  918                         avoid_reset = TRUE;
  919 #endif
  920                 /*
  921                 ** Calling init results in link renegotiation,
  922                 ** so we avoid doing it when possible.
  923                 */
  924                 if (avoid_reset) {
  925                         ifp->if_flags |= IFF_UP;
  926                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
  927                                 lem_init(adapter);
  928 #ifdef INET
  929                         if (!(ifp->if_flags & IFF_NOARP))
  930                                 arp_ifinit(ifp, ifa);
  931 #endif
  932                 } else
  933                         error = ether_ioctl(ifp, command, data);
  934                 break;
  935         case SIOCSIFMTU:
  936             {
  937                 int max_frame_size;
  938 
  939                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
  940 
  941                 EM_CORE_LOCK(adapter);
  942                 switch (adapter->hw.mac.type) {
  943                 case e1000_82542:
  944                         max_frame_size = ETHER_MAX_LEN;
  945                         break;
  946                 default:
  947                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
  948                 }
  949                 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
  950                     ETHER_CRC_LEN) {
  951                         EM_CORE_UNLOCK(adapter);
  952                         error = EINVAL;
  953                         break;
  954                 }
  955 
  956                 ifp->if_mtu = ifr->ifr_mtu;
  957                 adapter->max_frame_size =
  958                     ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
  959                 lem_init_locked(adapter);
  960                 EM_CORE_UNLOCK(adapter);
  961                 break;
  962             }
  963         case SIOCSIFFLAGS:
  964                 IOCTL_DEBUGOUT("ioctl rcv'd:\
  965                     SIOCSIFFLAGS (Set Interface Flags)");
  966                 EM_CORE_LOCK(adapter);
  967                 if (ifp->if_flags & IFF_UP) {
  968                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  969                                 if ((ifp->if_flags ^ adapter->if_flags) &
  970                                     (IFF_PROMISC | IFF_ALLMULTI)) {
  971                                         lem_disable_promisc(adapter);
  972                                         lem_set_promisc(adapter);
  973                                 }
  974                         } else
  975                                 lem_init_locked(adapter);
  976                 } else
  977                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  978                                 EM_TX_LOCK(adapter);
  979                                 lem_stop(adapter);
  980                                 EM_TX_UNLOCK(adapter);
  981                         }
  982                 adapter->if_flags = ifp->if_flags;
  983                 EM_CORE_UNLOCK(adapter);
  984                 break;
  985         case SIOCADDMULTI:
  986         case SIOCDELMULTI:
  987                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
  988                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  989                         EM_CORE_LOCK(adapter);
  990                         lem_disable_intr(adapter);
  991                         lem_set_multi(adapter);
  992                         if (adapter->hw.mac.type == e1000_82542 && 
  993                             adapter->hw.revision_id == E1000_REVISION_2) {
  994                                 lem_initialize_receive_unit(adapter);
  995                         }
  996 #ifdef DEVICE_POLLING
  997                         if (!(ifp->if_capenable & IFCAP_POLLING))
  998 #endif
  999                                 lem_enable_intr(adapter);
 1000                         EM_CORE_UNLOCK(adapter);
 1001                 }
 1002                 break;
 1003         case SIOCSIFMEDIA:
 1004                 /* Check SOL/IDER usage */
 1005                 EM_CORE_LOCK(adapter);
 1006                 if (e1000_check_reset_block(&adapter->hw)) {
 1007                         EM_CORE_UNLOCK(adapter);
 1008                         device_printf(adapter->dev, "Media change is"
 1009                             " blocked due to SOL/IDER session.\n");
 1010                         break;
 1011                 }
 1012                 EM_CORE_UNLOCK(adapter);
 1013         case SIOCGIFMEDIA:
 1014                 IOCTL_DEBUGOUT("ioctl rcv'd: \
 1015                     SIOCxIFMEDIA (Get/Set Interface Media)");
 1016                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
 1017                 break;
 1018         case SIOCSIFCAP:
 1019             {
 1020                 int mask, reinit;
 1021 
 1022                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
 1023                 reinit = 0;
 1024                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1025 #ifdef DEVICE_POLLING
 1026                 if (mask & IFCAP_POLLING) {
 1027                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 1028                                 error = ether_poll_register(lem_poll, ifp);
 1029                                 if (error)
 1030                                         return (error);
 1031                                 EM_CORE_LOCK(adapter);
 1032                                 lem_disable_intr(adapter);
 1033                                 ifp->if_capenable |= IFCAP_POLLING;
 1034                                 EM_CORE_UNLOCK(adapter);
 1035                         } else {
 1036                                 error = ether_poll_deregister(ifp);
 1037                                 /* Enable interrupt even in error case */
 1038                                 EM_CORE_LOCK(adapter);
 1039                                 lem_enable_intr(adapter);
 1040                                 ifp->if_capenable &= ~IFCAP_POLLING;
 1041                                 EM_CORE_UNLOCK(adapter);
 1042                         }
 1043                 }
 1044 #endif
 1045                 if (mask & IFCAP_HWCSUM) {
 1046                         ifp->if_capenable ^= IFCAP_HWCSUM;
 1047                         reinit = 1;
 1048                 }
 1049                 if (mask & IFCAP_VLAN_HWTAGGING) {
 1050                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1051                         reinit = 1;
 1052                 }
 1053                 if ((mask & IFCAP_WOL) &&
 1054                     (ifp->if_capabilities & IFCAP_WOL) != 0) {
 1055                         if (mask & IFCAP_WOL_MCAST)
 1056                                 ifp->if_capenable ^= IFCAP_WOL_MCAST;
 1057                         if (mask & IFCAP_WOL_MAGIC)
 1058                                 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 1059                 }
 1060                 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
 1061                         lem_init(adapter);
 1062                 VLAN_CAPABILITIES(ifp);
 1063                 break;
 1064             }
 1065 
 1066         default:
 1067                 error = ether_ioctl(ifp, command, data);
 1068                 break;
 1069         }
 1070 
 1071         return (error);
 1072 }
 1073 
 1074 
 1075 /*********************************************************************
 1076  *  Init entry point
 1077  *
 1078  *  This routine is used in two ways. It is used by the stack as
 1079  *  init entry point in network interface structure. It is also used
 1080  *  by the driver as a hw/sw initialization routine to get to a
 1081  *  consistent state.
 1082  *
 1083  *  return 0 on success, positive on failure
 1084  **********************************************************************/
 1085 
 1086 static void
 1087 lem_init_locked(struct adapter *adapter)
 1088 {
 1089         struct ifnet    *ifp = adapter->ifp;
 1090         device_t        dev = adapter->dev;
 1091         u32             pba;
 1092 
 1093         INIT_DEBUGOUT("lem_init: begin");
 1094 
 1095         EM_CORE_LOCK_ASSERT(adapter);
 1096 
 1097         EM_TX_LOCK(adapter);
 1098         lem_stop(adapter);
 1099         EM_TX_UNLOCK(adapter);
 1100 
 1101         /*
 1102          * Packet Buffer Allocation (PBA)
 1103          * Writing PBA sets the receive portion of the buffer
 1104          * the remainder is used for the transmit buffer.
 1105          *
 1106          * Devices before the 82547 had a Packet Buffer of 64K.
 1107          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
 1108          * After the 82547 the buffer was reduced to 40K.
 1109          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
 1110          *   Note: default does not leave enough room for Jumbo Frame >10k.
 1111          */
 1112         switch (adapter->hw.mac.type) {
 1113         case e1000_82547:
 1114         case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
 1115                 if (adapter->max_frame_size > 8192)
 1116                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
 1117                 else
 1118                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
 1119                 adapter->tx_fifo_head = 0;
 1120                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
 1121                 adapter->tx_fifo_size =
 1122                     (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
 1123                 break;
 1124         default:
 1125                 /* Devices before 82547 had a Packet Buffer of 64K.   */
 1126                 if (adapter->max_frame_size > 8192)
 1127                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
 1128                 else
 1129                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
 1130         }
 1131 
 1132         INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
 1133         E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
 1134         
 1135         /* Get the latest mac address, User can use a LAA */
 1136         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
 1137               ETHER_ADDR_LEN);
 1138 
 1139         /* Put the address into the Receive Address Array */
 1140         e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
 1141 
 1142         /* Initialize the hardware */
 1143         if (lem_hardware_init(adapter)) {
 1144                 device_printf(dev, "Unable to initialize the hardware\n");
 1145                 return;
 1146         }
 1147         lem_update_link_status(adapter);
 1148 
 1149         /* Setup VLAN support, basic and offload if available */
 1150         E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
 1151 
 1152         /* Set hardware offload abilities */
 1153         ifp->if_hwassist = 0;
 1154         if (adapter->hw.mac.type >= e1000_82543) {
 1155                 if (ifp->if_capenable & IFCAP_TXCSUM)
 1156                         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
 1157         }
 1158 
 1159         /* Configure for OS presence */
 1160         lem_init_manageability(adapter);
 1161 
 1162         /* Prepare transmit descriptors and buffers */
 1163         lem_setup_transmit_structures(adapter);
 1164         lem_initialize_transmit_unit(adapter);
 1165 
 1166         /* Setup Multicast table */
 1167         lem_set_multi(adapter);
 1168 
 1169         /* Prepare receive descriptors and buffers */
 1170         if (lem_setup_receive_structures(adapter)) {
 1171                 device_printf(dev, "Could not setup receive structures\n");
 1172                 EM_TX_LOCK(adapter);
 1173                 lem_stop(adapter);
 1174                 EM_TX_UNLOCK(adapter);
 1175                 return;
 1176         }
 1177         lem_initialize_receive_unit(adapter);
 1178 
 1179         /* Use real VLAN Filter support? */
 1180         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
 1181                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
 1182                         /* Use real VLAN Filter support */
 1183                         lem_setup_vlan_hw_support(adapter);
 1184                 else {
 1185                         u32 ctrl;
 1186                         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
 1187                         ctrl |= E1000_CTRL_VME;
 1188                         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
 1189                 }
 1190         }
 1191 
 1192         /* Don't lose promiscuous settings */
 1193         lem_set_promisc(adapter);
 1194 
 1195         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1196         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1197 
 1198         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
 1199         e1000_clear_hw_cntrs_base_generic(&adapter->hw);
 1200 
 1201 #ifdef DEVICE_POLLING
 1202         /*
 1203          * Only enable interrupts if we are not polling, make sure
 1204          * they are off otherwise.
 1205          */
 1206         if (ifp->if_capenable & IFCAP_POLLING)
 1207                 lem_disable_intr(adapter);
 1208         else
 1209 #endif /* DEVICE_POLLING */
 1210                 lem_enable_intr(adapter);
 1211 
 1212         /* AMT based hardware can now take control from firmware */
 1213         if (adapter->has_manage && adapter->has_amt)
 1214                 lem_get_hw_control(adapter);
 1215 }
 1216 
 1217 static void
 1218 lem_init(void *arg)
 1219 {
 1220         struct adapter *adapter = arg;
 1221 
 1222         EM_CORE_LOCK(adapter);
 1223         lem_init_locked(adapter);
 1224         EM_CORE_UNLOCK(adapter);
 1225 }
 1226 
 1227 
 1228 #ifdef DEVICE_POLLING
 1229 /*********************************************************************
 1230  *
 1231  *  Legacy polling routine  
 1232  *
 1233  *********************************************************************/
 1234 static int
 1235 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1236 {
 1237         struct adapter *adapter = ifp->if_softc;
 1238         u32             reg_icr, rx_done = 0;
 1239 
 1240         EM_CORE_LOCK(adapter);
 1241         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1242                 EM_CORE_UNLOCK(adapter);
 1243                 return (rx_done);
 1244         }
 1245 
 1246         if (cmd == POLL_AND_CHECK_STATUS) {
 1247                 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
 1248                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
 1249                         callout_stop(&adapter->timer);
 1250                         adapter->hw.mac.get_link_status = 1;
 1251                         lem_update_link_status(adapter);
 1252                         callout_reset(&adapter->timer, hz,
 1253                             lem_local_timer, adapter);
 1254                 }
 1255         }
 1256         EM_CORE_UNLOCK(adapter);
 1257 
 1258         lem_rxeof(adapter, count, &rx_done);
 1259 
 1260         EM_TX_LOCK(adapter);
 1261         lem_txeof(adapter);
 1262         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1263                 lem_start_locked(ifp);
 1264         EM_TX_UNLOCK(adapter);
 1265         return (rx_done);
 1266 }
 1267 #endif /* DEVICE_POLLING */
 1268 
 1269 /*********************************************************************
 1270  *
 1271  *  Legacy Interrupt Service routine  
 1272  *
 1273  *********************************************************************/
 1274 static void
 1275 lem_intr(void *arg)
 1276 {
 1277         struct adapter  *adapter = arg;
 1278         struct ifnet    *ifp = adapter->ifp;
 1279         u32             reg_icr;
 1280 
 1281 
 1282         if ((ifp->if_capenable & IFCAP_POLLING) ||
 1283             ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
 1284                 return;
 1285 
 1286         EM_CORE_LOCK(adapter);
 1287         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
 1288         if (reg_icr & E1000_ICR_RXO)
 1289                 adapter->rx_overruns++;
 1290 
 1291         if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
 1292                 EM_CORE_UNLOCK(adapter);
 1293                 return;
 1294         }
 1295 
 1296         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
 1297                 callout_stop(&adapter->timer);
 1298                 adapter->hw.mac.get_link_status = 1;
 1299                 lem_update_link_status(adapter);
 1300                 /* Deal with TX cruft when link lost */
 1301                 lem_tx_purge(adapter);
 1302                 callout_reset(&adapter->timer, hz,
 1303                     lem_local_timer, adapter);
 1304                 EM_CORE_UNLOCK(adapter);
 1305                 return;
 1306         }
 1307 
 1308         EM_CORE_UNLOCK(adapter);
 1309         lem_rxeof(adapter, -1, NULL);
 1310 
 1311         EM_TX_LOCK(adapter);
 1312         lem_txeof(adapter);
 1313         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 1314             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1315                 lem_start_locked(ifp);
 1316         EM_TX_UNLOCK(adapter);
 1317         return;
 1318 }
 1319 
 1320 
 1321 static void
 1322 lem_handle_link(void *context, int pending)
 1323 {
 1324         struct adapter  *adapter = context;
 1325         struct ifnet *ifp = adapter->ifp;
 1326 
 1327         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1328                 return;
 1329 
 1330         EM_CORE_LOCK(adapter);
 1331         callout_stop(&adapter->timer);
 1332         lem_update_link_status(adapter);
 1333         /* Deal with TX cruft when link lost */
 1334         lem_tx_purge(adapter);
 1335         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
 1336         EM_CORE_UNLOCK(adapter);
 1337 }
 1338 
 1339 
 1340 /* Combined RX/TX handler, used by Legacy and MSI */
 1341 static void
 1342 lem_handle_rxtx(void *context, int pending)
 1343 {
 1344         struct adapter  *adapter = context;
 1345         struct ifnet    *ifp = adapter->ifp;
 1346 
 1347 
 1348         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1349                 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
 1350                 EM_TX_LOCK(adapter);
 1351                 lem_txeof(adapter);
 1352                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1353                         lem_start_locked(ifp);
 1354                 EM_TX_UNLOCK(adapter);
 1355                 if (more) {
 1356                         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
 1357                         return;
 1358                 }
 1359         }
 1360 
 1361         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1362                 lem_enable_intr(adapter);
 1363 }
 1364 
 1365 /*********************************************************************
 1366  *
 1367  *  Fast Legacy/MSI Combined Interrupt Service routine  
 1368  *
 1369  *********************************************************************/
 1370 static int
 1371 lem_irq_fast(void *arg)
 1372 {
 1373         struct adapter  *adapter = arg;
 1374         struct ifnet    *ifp;
 1375         u32             reg_icr;
 1376 
 1377         ifp = adapter->ifp;
 1378 
 1379         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
 1380 
 1381         /* Hot eject?  */
 1382         if (reg_icr == 0xffffffff)
 1383                 return FILTER_STRAY;
 1384 
 1385         /* Definitely not our interrupt.  */
 1386         if (reg_icr == 0x0)
 1387                 return FILTER_STRAY;
 1388 
 1389         /*
 1390          * Mask interrupts until the taskqueue is finished running.  This is
 1391          * cheap, just assume that it is needed.  This also works around the
 1392          * MSI message reordering errata on certain systems.
 1393          */
 1394         lem_disable_intr(adapter);
 1395         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
 1396 
 1397         /* Link status change */
 1398         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
 1399                 adapter->hw.mac.get_link_status = 1;
 1400                 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
 1401         }
 1402 
 1403         if (reg_icr & E1000_ICR_RXO)
 1404                 adapter->rx_overruns++;
 1405         return FILTER_HANDLED;
 1406 }
 1407 
 1408 
 1409 /*********************************************************************
 1410  *
 1411  *  Media Ioctl callback
 1412  *
 1413  *  This routine is called whenever the user queries the status of
 1414  *  the interface using ifconfig.
 1415  *
 1416  **********************************************************************/
 1417 static void
 1418 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 1419 {
 1420         struct adapter *adapter = ifp->if_softc;
 1421         u_char fiber_type = IFM_1000_SX;
 1422 
 1423         INIT_DEBUGOUT("lem_media_status: begin");
 1424 
 1425         EM_CORE_LOCK(adapter);
 1426         lem_update_link_status(adapter);
 1427 
 1428         ifmr->ifm_status = IFM_AVALID;
 1429         ifmr->ifm_active = IFM_ETHER;
 1430 
 1431         if (!adapter->link_active) {
 1432                 EM_CORE_UNLOCK(adapter);
 1433                 return;
 1434         }
 1435 
 1436         ifmr->ifm_status |= IFM_ACTIVE;
 1437 
 1438         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
 1439             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
 1440                 if (adapter->hw.mac.type == e1000_82545)
 1441                         fiber_type = IFM_1000_LX;
 1442                 ifmr->ifm_active |= fiber_type | IFM_FDX;
 1443         } else {
 1444                 switch (adapter->link_speed) {
 1445                 case 10:
 1446                         ifmr->ifm_active |= IFM_10_T;
 1447                         break;
 1448                 case 100:
 1449                         ifmr->ifm_active |= IFM_100_TX;
 1450                         break;
 1451                 case 1000:
 1452                         ifmr->ifm_active |= IFM_1000_T;
 1453                         break;
 1454                 }
 1455                 if (adapter->link_duplex == FULL_DUPLEX)
 1456                         ifmr->ifm_active |= IFM_FDX;
 1457                 else
 1458                         ifmr->ifm_active |= IFM_HDX;
 1459         }
 1460         EM_CORE_UNLOCK(adapter);
 1461 }
 1462 
 1463 /*********************************************************************
 1464  *
 1465  *  Media Ioctl callback
 1466  *
 1467  *  This routine is called when the user changes speed/duplex using
 1468  *  media/mediopt option with ifconfig.
 1469  *
 1470  **********************************************************************/
 1471 static int
 1472 lem_media_change(struct ifnet *ifp)
 1473 {
 1474         struct adapter *adapter = ifp->if_softc;
 1475         struct ifmedia  *ifm = &adapter->media;
 1476 
 1477         INIT_DEBUGOUT("lem_media_change: begin");
 1478 
 1479         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1480                 return (EINVAL);
 1481 
 1482         EM_CORE_LOCK(adapter);
 1483         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 1484         case IFM_AUTO:
 1485                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
 1486                 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
 1487                 break;
 1488         case IFM_1000_LX:
 1489         case IFM_1000_SX:
 1490         case IFM_1000_T:
 1491                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
 1492                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
 1493                 break;
 1494         case IFM_100_TX:
 1495                 adapter->hw.mac.autoneg = FALSE;
 1496                 adapter->hw.phy.autoneg_advertised = 0;
 1497                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1498                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
 1499                 else
 1500                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
 1501                 break;
 1502         case IFM_10_T:
 1503                 adapter->hw.mac.autoneg = FALSE;
 1504                 adapter->hw.phy.autoneg_advertised = 0;
 1505                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1506                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
 1507                 else
 1508                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
 1509                 break;
 1510         default:
 1511                 device_printf(adapter->dev, "Unsupported media type\n");
 1512         }
 1513 
 1514         lem_init_locked(adapter);
 1515         EM_CORE_UNLOCK(adapter);
 1516 
 1517         return (0);
 1518 }
 1519 
 1520 /*********************************************************************
 1521  *
 1522  *  This routine maps the mbufs to tx descriptors.
 1523  *
 1524  *  return 0 on success, positive on failure
 1525  **********************************************************************/
 1526 
 1527 static int
 1528 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
 1529 {
 1530         bus_dma_segment_t       segs[EM_MAX_SCATTER];
 1531         bus_dmamap_t            map;
 1532         struct em_buffer        *tx_buffer, *tx_buffer_mapped;
 1533         struct e1000_tx_desc    *ctxd = NULL;
 1534         struct mbuf             *m_head;
 1535         u32                     txd_upper, txd_lower, txd_used, txd_saved;
 1536         int                     error, nsegs, i, j, first, last = 0;
 1537 
 1538         m_head = *m_headp;
 1539         txd_upper = txd_lower = txd_used = txd_saved = 0;
 1540 
 1541         /*
 1542         ** When doing checksum offload, it is critical to
 1543         ** make sure the first mbuf has more than header,
 1544         ** because that routine expects data to be present.
 1545         */
 1546         if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
 1547             (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
 1548                 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
 1549                 *m_headp = m_head;
 1550                 if (m_head == NULL)
 1551                         return (ENOBUFS);
 1552         }
 1553 
 1554         /*
 1555          * Map the packet for DMA
 1556          *
 1557          * Capture the first descriptor index,
 1558          * this descriptor will have the index
 1559          * of the EOP which is the only one that
 1560          * now gets a DONE bit writeback.
 1561          */
 1562         first = adapter->next_avail_tx_desc;
 1563         tx_buffer = &adapter->tx_buffer_area[first];
 1564         tx_buffer_mapped = tx_buffer;
 1565         map = tx_buffer->map;
 1566 
 1567         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
 1568             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 1569 
 1570         /*
 1571          * There are two types of errors we can (try) to handle:
 1572          * - EFBIG means the mbuf chain was too long and bus_dma ran
 1573          *   out of segments.  Defragment the mbuf chain and try again.
 1574          * - ENOMEM means bus_dma could not obtain enough bounce buffers
 1575          *   at this point in time.  Defer sending and try again later.
 1576          * All other errors, in particular EINVAL, are fatal and prevent the
 1577          * mbuf chain from ever going through.  Drop it and report error.
 1578          */
 1579         if (error == EFBIG) {
 1580                 struct mbuf *m;
 1581 
 1582                 m = m_defrag(*m_headp, M_NOWAIT);
 1583                 if (m == NULL) {
 1584                         adapter->mbuf_alloc_failed++;
 1585                         m_freem(*m_headp);
 1586                         *m_headp = NULL;
 1587                         return (ENOBUFS);
 1588                 }
 1589                 *m_headp = m;
 1590 
 1591                 /* Try it again */
 1592                 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
 1593                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 1594 
 1595                 if (error) {
 1596                         adapter->no_tx_dma_setup++;
 1597                         m_freem(*m_headp);
 1598                         *m_headp = NULL;
 1599                         return (error);
 1600                 }
 1601         } else if (error != 0) {
 1602                 adapter->no_tx_dma_setup++;
 1603                 return (error);
 1604         }
 1605 
 1606         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
 1607                 adapter->no_tx_desc_avail2++;
 1608                 bus_dmamap_unload(adapter->txtag, map);
 1609                 return (ENOBUFS);
 1610         }
 1611         m_head = *m_headp;
 1612 
 1613         /* Do hardware assists */
 1614         if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
 1615                 lem_transmit_checksum_setup(adapter,  m_head,
 1616                     &txd_upper, &txd_lower);
 1617 
 1618         i = adapter->next_avail_tx_desc;
 1619         if (adapter->pcix_82544) 
 1620                 txd_saved = i;
 1621 
 1622         /* Set up our transmit descriptors */
 1623         for (j = 0; j < nsegs; j++) {
 1624                 bus_size_t seg_len;
 1625                 bus_addr_t seg_addr;
 1626                 /* If adapter is 82544 and on PCIX bus */
 1627                 if(adapter->pcix_82544) {
 1628                         DESC_ARRAY      desc_array;
 1629                         u32             array_elements, counter;
 1630                         /*
 1631                          * Check the Address and Length combination and
 1632                          * split the data accordingly
 1633                          */
 1634                         array_elements = lem_fill_descriptors(segs[j].ds_addr,
 1635                             segs[j].ds_len, &desc_array);
 1636                         for (counter = 0; counter < array_elements; counter++) {
 1637                                 if (txd_used == adapter->num_tx_desc_avail) {
 1638                                         adapter->next_avail_tx_desc = txd_saved;
 1639                                         adapter->no_tx_desc_avail2++;
 1640                                         bus_dmamap_unload(adapter->txtag, map);
 1641                                         return (ENOBUFS);
 1642                                 }
 1643                                 tx_buffer = &adapter->tx_buffer_area[i];
 1644                                 ctxd = &adapter->tx_desc_base[i];
 1645                                 ctxd->buffer_addr = htole64(
 1646                                     desc_array.descriptor[counter].address);
 1647                                 ctxd->lower.data = htole32(
 1648                                     (adapter->txd_cmd | txd_lower | (u16)
 1649                                     desc_array.descriptor[counter].length));
 1650                                 ctxd->upper.data =
 1651                                     htole32((txd_upper));
 1652                                 last = i;
 1653                                 if (++i == adapter->num_tx_desc)
 1654                                          i = 0;
 1655                                 tx_buffer->m_head = NULL;
 1656                                 tx_buffer->next_eop = -1;
 1657                                 txd_used++;
 1658                         }
 1659                 } else {
 1660                         tx_buffer = &adapter->tx_buffer_area[i];
 1661                         ctxd = &adapter->tx_desc_base[i];
 1662                         seg_addr = segs[j].ds_addr;
 1663                         seg_len  = segs[j].ds_len;
 1664                         ctxd->buffer_addr = htole64(seg_addr);
 1665                         ctxd->lower.data = htole32(
 1666                         adapter->txd_cmd | txd_lower | seg_len);
 1667                         ctxd->upper.data =
 1668                             htole32(txd_upper);
 1669                         last = i;
 1670                         if (++i == adapter->num_tx_desc)
 1671                                 i = 0;
 1672                         tx_buffer->m_head = NULL;
 1673                         tx_buffer->next_eop = -1;
 1674                 }
 1675         }
 1676 
 1677         adapter->next_avail_tx_desc = i;
 1678 
 1679         if (adapter->pcix_82544)
 1680                 adapter->num_tx_desc_avail -= txd_used;
 1681         else
 1682                 adapter->num_tx_desc_avail -= nsegs;
 1683 
 1684         if (m_head->m_flags & M_VLANTAG) {
 1685                 /* Set the vlan id. */
 1686                 ctxd->upper.fields.special =
 1687                     htole16(m_head->m_pkthdr.ether_vtag);
 1688                 /* Tell hardware to add tag */
 1689                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
 1690         }
 1691 
 1692         tx_buffer->m_head = m_head;
 1693         tx_buffer_mapped->map = tx_buffer->map;
 1694         tx_buffer->map = map;
 1695         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
 1696 
 1697         /*
 1698          * Last Descriptor of Packet
 1699          * needs End Of Packet (EOP)
 1700          * and Report Status (RS)
 1701          */
 1702         ctxd->lower.data |=
 1703             htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
 1704         /*
 1705          * Keep track in the first buffer which
 1706          * descriptor will be written back
 1707          */
 1708         tx_buffer = &adapter->tx_buffer_area[first];
 1709         tx_buffer->next_eop = last;
 1710         adapter->watchdog_time = ticks;
 1711 
 1712         /*
 1713          * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
 1714          * that this frame is available to transmit.
 1715          */
 1716         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
 1717             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1718         if (adapter->hw.mac.type == e1000_82547 &&
 1719             adapter->link_duplex == HALF_DUPLEX)
 1720                 lem_82547_move_tail(adapter);
 1721         else {
 1722                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
 1723                 if (adapter->hw.mac.type == e1000_82547)
 1724                         lem_82547_update_fifo_head(adapter,
 1725                             m_head->m_pkthdr.len);
 1726         }
 1727 
 1728         return (0);
 1729 }
 1730 
 1731 /*********************************************************************
 1732  *
 1733  * 82547 workaround to avoid controller hang in half-duplex environment.
 1734  * The workaround is to avoid queuing a large packet that would span
 1735  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
 1736  * in this case. We do that only when FIFO is quiescent.
 1737  *
 1738  **********************************************************************/
 1739 static void
 1740 lem_82547_move_tail(void *arg)
 1741 {
 1742         struct adapter *adapter = arg;
 1743         struct e1000_tx_desc *tx_desc;
 1744         u16     hw_tdt, sw_tdt, length = 0;
 1745         bool    eop = 0;
 1746 
 1747         EM_TX_LOCK_ASSERT(adapter);
 1748 
 1749         hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
 1750         sw_tdt = adapter->next_avail_tx_desc;
 1751         
 1752         while (hw_tdt != sw_tdt) {
 1753                 tx_desc = &adapter->tx_desc_base[hw_tdt];
 1754                 length += tx_desc->lower.flags.length;
 1755                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
 1756                 if (++hw_tdt == adapter->num_tx_desc)
 1757                         hw_tdt = 0;
 1758 
 1759                 if (eop) {
 1760                         if (lem_82547_fifo_workaround(adapter, length)) {
 1761                                 adapter->tx_fifo_wrk_cnt++;
 1762                                 callout_reset(&adapter->tx_fifo_timer, 1,
 1763                                         lem_82547_move_tail, adapter);
 1764                                 break;
 1765                         }
 1766                         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
 1767                         lem_82547_update_fifo_head(adapter, length);
 1768                         length = 0;
 1769                 }
 1770         }       
 1771 }
 1772 
 1773 static int
 1774 lem_82547_fifo_workaround(struct adapter *adapter, int len)
 1775 {       
 1776         int fifo_space, fifo_pkt_len;
 1777 
 1778         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
 1779 
 1780         if (adapter->link_duplex == HALF_DUPLEX) {
 1781                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
 1782 
 1783                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
 1784                         if (lem_82547_tx_fifo_reset(adapter))
 1785                                 return (0);
 1786                         else
 1787                                 return (1);
 1788                 }
 1789         }
 1790 
 1791         return (0);
 1792 }
 1793 
 1794 static void
 1795 lem_82547_update_fifo_head(struct adapter *adapter, int len)
 1796 {
 1797         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
 1798         
 1799         /* tx_fifo_head is always 16 byte aligned */
 1800         adapter->tx_fifo_head += fifo_pkt_len;
 1801         if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
 1802                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
 1803         }
 1804 }
 1805 
 1806 
 1807 static int
 1808 lem_82547_tx_fifo_reset(struct adapter *adapter)
 1809 {
 1810         u32 tctl;
 1811 
 1812         if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
 1813             E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
 1814             (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 
 1815             E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
 1816             (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
 1817             E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
 1818             (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
 1819                 /* Disable TX unit */
 1820                 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
 1821                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
 1822                     tctl & ~E1000_TCTL_EN);
 1823 
 1824                 /* Reset FIFO pointers */
 1825                 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
 1826                     adapter->tx_head_addr);
 1827                 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
 1828                     adapter->tx_head_addr);
 1829                 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
 1830                     adapter->tx_head_addr);
 1831                 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
 1832                     adapter->tx_head_addr);
 1833 
 1834                 /* Re-enable TX unit */
 1835                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
 1836                 E1000_WRITE_FLUSH(&adapter->hw);
 1837 
 1838                 adapter->tx_fifo_head = 0;
 1839                 adapter->tx_fifo_reset_cnt++;
 1840 
 1841                 return (TRUE);
 1842         }
 1843         else {
 1844                 return (FALSE);
 1845         }
 1846 }
 1847 
 1848 static void
 1849 lem_set_promisc(struct adapter *adapter)
 1850 {
 1851         struct ifnet    *ifp = adapter->ifp;
 1852         u32             reg_rctl;
 1853 
 1854         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
 1855 
 1856         if (ifp->if_flags & IFF_PROMISC) {
 1857                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
 1858                 /* Turn this on if you want to see bad packets */
 1859                 if (lem_debug_sbp)
 1860                         reg_rctl |= E1000_RCTL_SBP;
 1861                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
 1862         } else if (ifp->if_flags & IFF_ALLMULTI) {
 1863                 reg_rctl |= E1000_RCTL_MPE;
 1864                 reg_rctl &= ~E1000_RCTL_UPE;
 1865                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
 1866         }
 1867 }
 1868 
 1869 static void
 1870 lem_disable_promisc(struct adapter *adapter)
 1871 {
 1872         struct ifnet    *ifp = adapter->ifp;
 1873         u32             reg_rctl;
 1874         int             mcnt = 0;
 1875 
 1876         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
 1877         reg_rctl &=  (~E1000_RCTL_UPE);
 1878         if (ifp->if_flags & IFF_ALLMULTI)
 1879                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
 1880         else {
 1881                 struct  ifmultiaddr *ifma;
 1882 #if __FreeBSD_version < 800000
 1883                 IF_ADDR_LOCK(ifp);
 1884 #else   
 1885                 if_maddr_rlock(ifp);
 1886 #endif
 1887                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1888                         if (ifma->ifma_addr->sa_family != AF_LINK)
 1889                                 continue;
 1890                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
 1891                                 break;
 1892                         mcnt++;
 1893                 }
 1894 #if __FreeBSD_version < 800000
 1895                 IF_ADDR_UNLOCK(ifp);
 1896 #else
 1897                 if_maddr_runlock(ifp);
 1898 #endif
 1899         }
 1900         /* Don't disable if in MAX groups */
 1901         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
 1902                 reg_rctl &=  (~E1000_RCTL_MPE);
 1903         reg_rctl &=  (~E1000_RCTL_SBP);
 1904         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
 1905 }
 1906 
 1907 
 1908 /*********************************************************************
 1909  *  Multicast Update
 1910  *
 1911  *  This routine is called whenever multicast address list is updated.
 1912  *
 1913  **********************************************************************/
 1914 
 1915 static void
 1916 lem_set_multi(struct adapter *adapter)
 1917 {
 1918         struct ifnet    *ifp = adapter->ifp;
 1919         struct ifmultiaddr *ifma;
 1920         u32 reg_rctl = 0;
 1921         u8  *mta; /* Multicast array memory */
 1922         int mcnt = 0;
 1923 
 1924         IOCTL_DEBUGOUT("lem_set_multi: begin");
 1925 
 1926         mta = adapter->mta;
 1927         bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
 1928 
 1929         if (adapter->hw.mac.type == e1000_82542 && 
 1930             adapter->hw.revision_id == E1000_REVISION_2) {
 1931                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
 1932                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
 1933                         e1000_pci_clear_mwi(&adapter->hw);
 1934                 reg_rctl |= E1000_RCTL_RST;
 1935                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
 1936                 msec_delay(5);
 1937         }
 1938 
 1939 #if __FreeBSD_version < 800000
 1940         IF_ADDR_LOCK(ifp);
 1941 #else
 1942         if_maddr_rlock(ifp);
 1943 #endif
 1944         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1945                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1946                         continue;
 1947 
 1948                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
 1949                         break;
 1950 
 1951                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
 1952                     &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
 1953                 mcnt++;
 1954         }
 1955 #if __FreeBSD_version < 800000
 1956         IF_ADDR_UNLOCK(ifp);
 1957 #else
 1958         if_maddr_runlock(ifp);
 1959 #endif
 1960         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
 1961                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
 1962                 reg_rctl |= E1000_RCTL_MPE;
 1963                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
 1964         } else
 1965                 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
 1966 
 1967         if (adapter->hw.mac.type == e1000_82542 && 
 1968             adapter->hw.revision_id == E1000_REVISION_2) {
 1969                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
 1970                 reg_rctl &= ~E1000_RCTL_RST;
 1971                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
 1972                 msec_delay(5);
 1973                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
 1974                         e1000_pci_set_mwi(&adapter->hw);
 1975         }
 1976 }
 1977 
 1978 
 1979 /*********************************************************************
 1980  *  Timer routine
 1981  *
 1982  *  This routine checks for link status and updates statistics.
 1983  *
 1984  **********************************************************************/
 1985 
 1986 static void
 1987 lem_local_timer(void *arg)
 1988 {
 1989         struct adapter  *adapter = arg;
 1990 
 1991         EM_CORE_LOCK_ASSERT(adapter);
 1992 
 1993         lem_update_link_status(adapter);
 1994         lem_update_stats_counters(adapter);
 1995 
 1996         lem_smartspeed(adapter);
 1997 
 1998         /*
 1999          * We check the watchdog: the time since
 2000          * the last TX descriptor was cleaned.
 2001          * This implies a functional TX engine.
 2002          */
 2003         if ((adapter->watchdog_check == TRUE) &&
 2004             (ticks - adapter->watchdog_time > EM_WATCHDOG))
 2005                 goto hung;
 2006 
 2007         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
 2008         return;
 2009 hung:
 2010         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
 2011         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2012         adapter->watchdog_events++;
 2013         lem_init_locked(adapter);
 2014 }
 2015 
 2016 static void
 2017 lem_update_link_status(struct adapter *adapter)
 2018 {
 2019         struct e1000_hw *hw = &adapter->hw;
 2020         struct ifnet *ifp = adapter->ifp;
 2021         device_t dev = adapter->dev;
 2022         u32 link_check = 0;
 2023 
 2024         /* Get the cached link value or read phy for real */
 2025         switch (hw->phy.media_type) {
 2026         case e1000_media_type_copper:
 2027                 if (hw->mac.get_link_status) {
 2028                         /* Do the work to read phy */
 2029                         e1000_check_for_link(hw);
 2030                         link_check = !hw->mac.get_link_status;
 2031                         if (link_check) /* ESB2 fix */
 2032                                 e1000_cfg_on_link_up(hw);
 2033                 } else
 2034                         link_check = TRUE;
 2035                 break;
 2036         case e1000_media_type_fiber:
 2037                 e1000_check_for_link(hw);
 2038                 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
 2039                                  E1000_STATUS_LU);
 2040                 break;
 2041         case e1000_media_type_internal_serdes:
 2042                 e1000_check_for_link(hw);
 2043                 link_check = adapter->hw.mac.serdes_has_link;
 2044                 break;
 2045         default:
 2046         case e1000_media_type_unknown:
 2047                 break;
 2048         }
 2049 
 2050         /* Now check for a transition */
 2051         if (link_check && (adapter->link_active == 0)) {
 2052                 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
 2053                     &adapter->link_duplex);
 2054                 if (bootverbose)
 2055                         device_printf(dev, "Link is up %d Mbps %s\n",
 2056                             adapter->link_speed,
 2057                             ((adapter->link_duplex == FULL_DUPLEX) ?
 2058                             "Full Duplex" : "Half Duplex"));
 2059                 adapter->link_active = 1;
 2060                 adapter->smartspeed = 0;
 2061                 ifp->if_baudrate = adapter->link_speed * 1000000;
 2062                 if_link_state_change(ifp, LINK_STATE_UP);
 2063         } else if (!link_check && (adapter->link_active == 1)) {
 2064                 ifp->if_baudrate = adapter->link_speed = 0;
 2065                 adapter->link_duplex = 0;
 2066                 if (bootverbose)
 2067                         device_printf(dev, "Link is Down\n");
 2068                 adapter->link_active = 0;
 2069                 /* Link down, disable watchdog */
 2070                 adapter->watchdog_check = FALSE;
 2071                 if_link_state_change(ifp, LINK_STATE_DOWN);
 2072         }
 2073 }
 2074 
 2075 /*********************************************************************
 2076  *
 2077  *  This routine disables all traffic on the adapter by issuing a
 2078  *  global reset on the MAC and deallocates TX/RX buffers.
 2079  *
 2080  *  This routine should always be called with BOTH the CORE
 2081  *  and TX locks.
 2082  **********************************************************************/
 2083 
 2084 static void
 2085 lem_stop(void *arg)
 2086 {
 2087         struct adapter  *adapter = arg;
 2088         struct ifnet    *ifp = adapter->ifp;
 2089 
 2090         EM_CORE_LOCK_ASSERT(adapter);
 2091         EM_TX_LOCK_ASSERT(adapter);
 2092 
 2093         INIT_DEBUGOUT("lem_stop: begin");
 2094 
 2095         lem_disable_intr(adapter);
 2096         callout_stop(&adapter->timer);
 2097         callout_stop(&adapter->tx_fifo_timer);
 2098 
 2099         /* Tell the stack that the interface is no longer active */
 2100         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2101 
 2102         e1000_reset_hw(&adapter->hw);
 2103         if (adapter->hw.mac.type >= e1000_82544)
 2104                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
 2105 
 2106         e1000_led_off(&adapter->hw);
 2107         e1000_cleanup_led(&adapter->hw);
 2108 }
 2109 
 2110 
 2111 /*********************************************************************
 2112  *
 2113  *  Determine hardware revision.
 2114  *
 2115  **********************************************************************/
 2116 static void
 2117 lem_identify_hardware(struct adapter *adapter)
 2118 {
 2119         device_t dev = adapter->dev;
 2120 
 2121         /* Make sure our PCI config space has the necessary stuff set */
 2122         pci_enable_busmaster(dev);
 2123         adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
 2124 
 2125         /* Save off the information about this board */
 2126         adapter->hw.vendor_id = pci_get_vendor(dev);
 2127         adapter->hw.device_id = pci_get_device(dev);
 2128         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
 2129         adapter->hw.subsystem_vendor_id =
 2130             pci_read_config(dev, PCIR_SUBVEND_0, 2);
 2131         adapter->hw.subsystem_device_id =
 2132             pci_read_config(dev, PCIR_SUBDEV_0, 2);
 2133 
 2134         /* Do Shared Code Init and Setup */
 2135         if (e1000_set_mac_type(&adapter->hw)) {
 2136                 device_printf(dev, "Setup init failure\n");
 2137                 return;
 2138         }
 2139 }
 2140 
 2141 static int
 2142 lem_allocate_pci_resources(struct adapter *adapter)
 2143 {
 2144         device_t        dev = adapter->dev;
 2145         int             val, rid, error = E1000_SUCCESS;
 2146 
 2147         rid = PCIR_BAR(0);
 2148         adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
 2149             &rid, RF_ACTIVE);
 2150         if (adapter->memory == NULL) {
 2151                 device_printf(dev, "Unable to allocate bus resource: memory\n");
 2152                 return (ENXIO);
 2153         }
 2154         adapter->osdep.mem_bus_space_tag =
 2155             rman_get_bustag(adapter->memory);
 2156         adapter->osdep.mem_bus_space_handle =
 2157             rman_get_bushandle(adapter->memory);
 2158         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
 2159 
 2160         /* Only older adapters use IO mapping */
 2161         if (adapter->hw.mac.type > e1000_82543) {
 2162                 /* Figure our where our IO BAR is ? */
 2163                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
 2164                         val = pci_read_config(dev, rid, 4);
 2165                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
 2166                                 adapter->io_rid = rid;
 2167                                 break;
 2168                         }
 2169                         rid += 4;
 2170                         /* check for 64bit BAR */
 2171                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
 2172                                 rid += 4;
 2173                 }
 2174                 if (rid >= PCIR_CIS) {
 2175                         device_printf(dev, "Unable to locate IO BAR\n");
 2176                         return (ENXIO);
 2177                 }
 2178                 adapter->ioport = bus_alloc_resource_any(dev,
 2179                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
 2180                 if (adapter->ioport == NULL) {
 2181                         device_printf(dev, "Unable to allocate bus resource: "
 2182                             "ioport\n");
 2183                         return (ENXIO);
 2184                 }
 2185                 adapter->hw.io_base = 0;
 2186                 adapter->osdep.io_bus_space_tag =
 2187                     rman_get_bustag(adapter->ioport);
 2188                 adapter->osdep.io_bus_space_handle =
 2189                     rman_get_bushandle(adapter->ioport);
 2190         }
 2191 
 2192         adapter->hw.back = &adapter->osdep;
 2193 
 2194         return (error);
 2195 }
 2196 
 2197 /*********************************************************************
 2198  *
 2199  *  Setup the Legacy or MSI Interrupt handler
 2200  *
 2201  **********************************************************************/
 2202 int
 2203 lem_allocate_irq(struct adapter *adapter)
 2204 {
 2205         device_t dev = adapter->dev;
 2206         int error, rid = 0;
 2207 
 2208         /* Manually turn off all interrupts */
 2209         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
 2210 
 2211         /* We allocate a single interrupt resource */
 2212         adapter->res[0] = bus_alloc_resource_any(dev,
 2213             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
 2214         if (adapter->res[0] == NULL) {
 2215                 device_printf(dev, "Unable to allocate bus resource: "
 2216                     "interrupt\n");
 2217                 return (ENXIO);
 2218         }
 2219 
 2220         /* Do Legacy setup? */
 2221         if (lem_use_legacy_irq) {
 2222                 if ((error = bus_setup_intr(dev, adapter->res[0],
 2223                     INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
 2224                     &adapter->tag[0])) != 0) {
 2225                         device_printf(dev,
 2226                             "Failed to register interrupt handler");
 2227                         return (error);
 2228                 }
 2229                 return (0);
 2230         }
 2231 
 2232         /*
 2233          * Use a Fast interrupt and the associated
 2234          * deferred processing contexts.
 2235          */
 2236         TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
 2237         TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
 2238         adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
 2239             taskqueue_thread_enqueue, &adapter->tq);
 2240         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
 2241             device_get_nameunit(adapter->dev));
 2242         if ((error = bus_setup_intr(dev, adapter->res[0],
 2243             INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
 2244             &adapter->tag[0])) != 0) {
 2245                 device_printf(dev, "Failed to register fast interrupt "
 2246                             "handler: %d\n", error);
 2247                 taskqueue_free(adapter->tq);
 2248                 adapter->tq = NULL;
 2249                 return (error);
 2250         }
 2251         
 2252         return (0);
 2253 }
 2254 
 2255 
 2256 static void
 2257 lem_free_pci_resources(struct adapter *adapter)
 2258 {
 2259         device_t dev = adapter->dev;
 2260 
 2261 
 2262         if (adapter->tag[0] != NULL) {
 2263                 bus_teardown_intr(dev, adapter->res[0],
 2264                     adapter->tag[0]);
 2265                 adapter->tag[0] = NULL;
 2266         }
 2267 
 2268         if (adapter->res[0] != NULL) {
 2269                 bus_release_resource(dev, SYS_RES_IRQ,
 2270                     0, adapter->res[0]);
 2271         }
 2272 
 2273         if (adapter->memory != NULL)
 2274                 bus_release_resource(dev, SYS_RES_MEMORY,
 2275                     PCIR_BAR(0), adapter->memory);
 2276 
 2277         if (adapter->ioport != NULL)
 2278                 bus_release_resource(dev, SYS_RES_IOPORT,
 2279                     adapter->io_rid, adapter->ioport);
 2280 }
 2281 
 2282 
 2283 /*********************************************************************
 2284  *
 2285  *  Initialize the hardware to a configuration
 2286  *  as specified by the adapter structure.
 2287  *
 2288  **********************************************************************/
 2289 static int
 2290 lem_hardware_init(struct adapter *adapter)
 2291 {
 2292         device_t dev = adapter->dev;
 2293         u16     rx_buffer_size;
 2294 
 2295         INIT_DEBUGOUT("lem_hardware_init: begin");
 2296 
 2297         /* Issue a global reset */
 2298         e1000_reset_hw(&adapter->hw);
 2299 
 2300         /* When hardware is reset, fifo_head is also reset */
 2301         adapter->tx_fifo_head = 0;
 2302 
 2303         /*
 2304          * These parameters control the automatic generation (Tx) and
 2305          * response (Rx) to Ethernet PAUSE frames.
 2306          * - High water mark should allow for at least two frames to be
 2307          *   received after sending an XOFF.
 2308          * - Low water mark works best when it is very near the high water mark.
 2309          *   This allows the receiver to restart by sending XON when it has
 2310          *   drained a bit. Here we use an arbitary value of 1500 which will
 2311          *   restart after one full frame is pulled from the buffer. There
 2312          *   could be several smaller frames in the buffer and if so they will
 2313          *   not trigger the XON until their total number reduces the buffer
 2314          *   by 1500.
 2315          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
 2316          */
 2317         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
 2318             0xffff) << 10 );
 2319 
 2320         adapter->hw.fc.high_water = rx_buffer_size -
 2321             roundup2(adapter->max_frame_size, 1024);
 2322         adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
 2323 
 2324         adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
 2325         adapter->hw.fc.send_xon = TRUE;
 2326 
 2327         /* Set Flow control, use the tunable location if sane */
 2328         if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
 2329                 adapter->hw.fc.requested_mode = lem_fc_setting;
 2330         else
 2331                 adapter->hw.fc.requested_mode = e1000_fc_none;
 2332 
 2333         if (e1000_init_hw(&adapter->hw) < 0) {
 2334                 device_printf(dev, "Hardware Initialization Failed\n");
 2335                 return (EIO);
 2336         }
 2337 
 2338         e1000_check_for_link(&adapter->hw);
 2339 
 2340         return (0);
 2341 }
 2342 
 2343 /*********************************************************************
 2344  *
 2345  *  Setup networking device structure and register an interface.
 2346  *
 2347  **********************************************************************/
 2348 static int
 2349 lem_setup_interface(device_t dev, struct adapter *adapter)
 2350 {
 2351         struct ifnet   *ifp;
 2352 
 2353         INIT_DEBUGOUT("lem_setup_interface: begin");
 2354 
 2355         ifp = adapter->ifp = if_alloc(IFT_ETHER);
 2356         if (ifp == NULL) {
 2357                 device_printf(dev, "can not allocate ifnet structure\n");
 2358                 return (-1);
 2359         }
 2360         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2361         ifp->if_init =  lem_init;
 2362         ifp->if_softc = adapter;
 2363         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2364         ifp->if_ioctl = lem_ioctl;
 2365         ifp->if_start = lem_start;
 2366         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
 2367         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
 2368         IFQ_SET_READY(&ifp->if_snd);
 2369 
 2370         ether_ifattach(ifp, adapter->hw.mac.addr);
 2371 
 2372         ifp->if_capabilities = ifp->if_capenable = 0;
 2373 
 2374         if (adapter->hw.mac.type >= e1000_82543) {
 2375                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
 2376                 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
 2377         }
 2378 
 2379         /*
 2380          * Tell the upper layer(s) we support long frames.
 2381          */
 2382         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 2383         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
 2384         ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
 2385 
 2386         /*
 2387         ** Dont turn this on by default, if vlans are
 2388         ** created on another pseudo device (eg. lagg)
 2389         ** then vlan events are not passed thru, breaking
 2390         ** operation, but with HW FILTER off it works. If
 2391         ** using vlans directly on the em driver you can
 2392         ** enable this and get full hardware tag filtering.
 2393         */
 2394         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
 2395 
 2396 #ifdef DEVICE_POLLING
 2397         ifp->if_capabilities |= IFCAP_POLLING;
 2398 #endif
 2399 
 2400         /* Enable only WOL MAGIC by default */
 2401         if (adapter->wol) {
 2402                 ifp->if_capabilities |= IFCAP_WOL;
 2403                 ifp->if_capenable |= IFCAP_WOL_MAGIC;
 2404         }
 2405                 
 2406         /*
 2407          * Specify the media types supported by this adapter and register
 2408          * callbacks to update media and link information
 2409          */
 2410         ifmedia_init(&adapter->media, IFM_IMASK,
 2411             lem_media_change, lem_media_status);
 2412         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
 2413             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
 2414                 u_char fiber_type = IFM_1000_SX;        /* default type */
 2415 
 2416                 if (adapter->hw.mac.type == e1000_82545)
 2417                         fiber_type = IFM_1000_LX;
 2418                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
 2419                             0, NULL);
 2420                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
 2421         } else {
 2422                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
 2423                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
 2424                             0, NULL);
 2425                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
 2426                             0, NULL);
 2427                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
 2428                             0, NULL);
 2429                 if (adapter->hw.phy.type != e1000_phy_ife) {
 2430                         ifmedia_add(&adapter->media,
 2431                                 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
 2432                         ifmedia_add(&adapter->media,
 2433                                 IFM_ETHER | IFM_1000_T, 0, NULL);
 2434                 }
 2435         }
 2436         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
 2437         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
 2438         return (0);
 2439 }
 2440 
 2441 
 2442 /*********************************************************************
 2443  *
 2444  *  Workaround for SmartSpeed on 82541 and 82547 controllers
 2445  *
 2446  **********************************************************************/
 2447 static void
 2448 lem_smartspeed(struct adapter *adapter)
 2449 {
 2450         u16 phy_tmp;
 2451 
 2452         if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
 2453             adapter->hw.mac.autoneg == 0 ||
 2454             (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
 2455                 return;
 2456 
 2457         if (adapter->smartspeed == 0) {
 2458                 /* If Master/Slave config fault is asserted twice,
 2459                  * we assume back-to-back */
 2460                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
 2461                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
 2462                         return;
 2463                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
 2464                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
 2465                         e1000_read_phy_reg(&adapter->hw,
 2466                             PHY_1000T_CTRL, &phy_tmp);
 2467                         if(phy_tmp & CR_1000T_MS_ENABLE) {
 2468                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
 2469                                 e1000_write_phy_reg(&adapter->hw,
 2470                                     PHY_1000T_CTRL, phy_tmp);
 2471                                 adapter->smartspeed++;
 2472                                 if(adapter->hw.mac.autoneg &&
 2473                                    !e1000_copper_link_autoneg(&adapter->hw) &&
 2474                                    !e1000_read_phy_reg(&adapter->hw,
 2475                                     PHY_CONTROL, &phy_tmp)) {
 2476                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
 2477                                                     MII_CR_RESTART_AUTO_NEG);
 2478                                         e1000_write_phy_reg(&adapter->hw,
 2479                                             PHY_CONTROL, phy_tmp);
 2480                                 }
 2481                         }
 2482                 }
 2483                 return;
 2484         } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
 2485                 /* If still no link, perhaps using 2/3 pair cable */
 2486                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
 2487                 phy_tmp |= CR_1000T_MS_ENABLE;
 2488                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
 2489                 if(adapter->hw.mac.autoneg &&
 2490                    !e1000_copper_link_autoneg(&adapter->hw) &&
 2491                    !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
 2492                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
 2493                                     MII_CR_RESTART_AUTO_NEG);
 2494                         e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
 2495                 }
 2496         }
 2497         /* Restart process after EM_SMARTSPEED_MAX iterations */
 2498         if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
 2499                 adapter->smartspeed = 0;
 2500 }
 2501 
 2502 
 2503 /*
 2504  * Manage DMA'able memory.
 2505  */
 2506 static void
 2507 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 2508 {
 2509         if (error)
 2510                 return;
 2511         *(bus_addr_t *) arg = segs[0].ds_addr;
 2512 }
 2513 
 2514 static int
 2515 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
 2516         struct em_dma_alloc *dma, int mapflags)
 2517 {
 2518         int error;
 2519 
 2520         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
 2521                                 EM_DBA_ALIGN, 0,        /* alignment, bounds */
 2522                                 BUS_SPACE_MAXADDR,      /* lowaddr */
 2523                                 BUS_SPACE_MAXADDR,      /* highaddr */
 2524                                 NULL, NULL,             /* filter, filterarg */
 2525                                 size,                   /* maxsize */
 2526                                 1,                      /* nsegments */
 2527                                 size,                   /* maxsegsize */
 2528                                 0,                      /* flags */
 2529                                 NULL,                   /* lockfunc */
 2530                                 NULL,                   /* lockarg */
 2531                                 &dma->dma_tag);
 2532         if (error) {
 2533                 device_printf(adapter->dev,
 2534                     "%s: bus_dma_tag_create failed: %d\n",
 2535                     __func__, error);
 2536                 goto fail_0;
 2537         }
 2538 
 2539         error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
 2540             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
 2541         if (error) {
 2542                 device_printf(adapter->dev,
 2543                     "%s: bus_dmamem_alloc(%ju) failed: %d\n",
 2544                     __func__, (uintmax_t)size, error);
 2545                 goto fail_2;
 2546         }
 2547 
 2548         dma->dma_paddr = 0;
 2549         error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
 2550             size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
 2551         if (error || dma->dma_paddr == 0) {
 2552                 device_printf(adapter->dev,
 2553                     "%s: bus_dmamap_load failed: %d\n",
 2554                     __func__, error);
 2555                 goto fail_3;
 2556         }
 2557 
 2558         return (0);
 2559 
 2560 fail_3:
 2561         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
 2562 fail_2:
 2563         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
 2564         bus_dma_tag_destroy(dma->dma_tag);
 2565 fail_0:
 2566         dma->dma_map = NULL;
 2567         dma->dma_tag = NULL;
 2568 
 2569         return (error);
 2570 }
 2571 
 2572 static void
 2573 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
 2574 {
 2575         if (dma->dma_tag == NULL)
 2576                 return;
 2577         if (dma->dma_map != NULL) {
 2578                 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
 2579                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2580                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
 2581                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
 2582                 dma->dma_map = NULL;
 2583         }
 2584         bus_dma_tag_destroy(dma->dma_tag);
 2585         dma->dma_tag = NULL;
 2586 }
 2587 
 2588 
 2589 /*********************************************************************
 2590  *
 2591  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
 2592  *  the information needed to transmit a packet on the wire.
 2593  *
 2594  **********************************************************************/
 2595 static int
 2596 lem_allocate_transmit_structures(struct adapter *adapter)
 2597 {
 2598         device_t dev = adapter->dev;
 2599         struct em_buffer *tx_buffer;
 2600         int error;
 2601 
 2602         /*
 2603          * Create DMA tags for tx descriptors
 2604          */
 2605         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
 2606                                 1, 0,                   /* alignment, bounds */
 2607                                 BUS_SPACE_MAXADDR,      /* lowaddr */
 2608                                 BUS_SPACE_MAXADDR,      /* highaddr */
 2609                                 NULL, NULL,             /* filter, filterarg */
 2610                                 MCLBYTES * EM_MAX_SCATTER,      /* maxsize */
 2611                                 EM_MAX_SCATTER,         /* nsegments */
 2612                                 MCLBYTES,               /* maxsegsize */
 2613                                 0,                      /* flags */
 2614                                 NULL,                   /* lockfunc */
 2615                                 NULL,                   /* lockarg */
 2616                                 &adapter->txtag)) != 0) {
 2617                 device_printf(dev, "Unable to allocate TX DMA tag\n");
 2618                 goto fail;
 2619         }
 2620 
 2621         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
 2622             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
 2623         if (adapter->tx_buffer_area == NULL) {
 2624                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
 2625                 error = ENOMEM;
 2626                 goto fail;
 2627         }
 2628 
 2629         /* Create the descriptor buffer dma maps */
 2630         for (int i = 0; i < adapter->num_tx_desc; i++) {
 2631                 tx_buffer = &adapter->tx_buffer_area[i];
 2632                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
 2633                 if (error != 0) {
 2634                         device_printf(dev, "Unable to create TX DMA map\n");
 2635                         goto fail;
 2636                 }
 2637                 tx_buffer->next_eop = -1;
 2638         }
 2639 
 2640         return (0);
 2641 fail:
 2642         lem_free_transmit_structures(adapter);
 2643         return (error);
 2644 }
 2645 
 2646 /*********************************************************************
 2647  *
 2648  *  (Re)Initialize transmit structures.
 2649  *
 2650  **********************************************************************/
 2651 static void
 2652 lem_setup_transmit_structures(struct adapter *adapter)
 2653 {
 2654         struct em_buffer *tx_buffer;
 2655 #ifdef DEV_NETMAP
 2656         /* we are already locked */
 2657         struct netmap_adapter *na = NA(adapter->ifp);
 2658         struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
 2659 #endif /* DEV_NETMAP */
 2660 
 2661         /* Clear the old ring contents */
 2662         bzero(adapter->tx_desc_base,
 2663             (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
 2664 
 2665         /* Free any existing TX buffers */
 2666         for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
 2667                 tx_buffer = &adapter->tx_buffer_area[i];
 2668                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
 2669                     BUS_DMASYNC_POSTWRITE);
 2670                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
 2671                 m_freem(tx_buffer->m_head);
 2672                 tx_buffer->m_head = NULL;
 2673 #ifdef DEV_NETMAP
 2674                 if (slot) {
 2675                         /* the i-th NIC entry goes to slot si */
 2676                         int si = netmap_idx_n2k(&na->tx_rings[0], i);
 2677                         uint64_t paddr;
 2678                         void *addr;
 2679 
 2680                         addr = PNMB(slot + si, &paddr);
 2681                         adapter->tx_desc_base[i].buffer_addr = htole64(paddr);
 2682                         /* reload the map for netmap mode */
 2683                         netmap_load_map(adapter->txtag, tx_buffer->map, addr);
 2684                 }
 2685 #endif /* DEV_NETMAP */
 2686                 tx_buffer->next_eop = -1;
 2687         }
 2688 
 2689         /* Reset state */
 2690         adapter->last_hw_offload = 0;
 2691         adapter->next_avail_tx_desc = 0;
 2692         adapter->next_tx_to_clean = 0;
 2693         adapter->num_tx_desc_avail = adapter->num_tx_desc;
 2694 
 2695         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
 2696             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2697 
 2698         return;
 2699 }
 2700 
 2701 /*********************************************************************
 2702  *
 2703  *  Enable transmit unit.
 2704  *
 2705  **********************************************************************/
 2706 static void
 2707 lem_initialize_transmit_unit(struct adapter *adapter)
 2708 {
 2709         u32     tctl, tipg = 0;
 2710         u64     bus_addr;
 2711 
 2712          INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
 2713         /* Setup the Base and Length of the Tx Descriptor Ring */
 2714         bus_addr = adapter->txdma.dma_paddr;
 2715         E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
 2716             adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
 2717         E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
 2718             (u32)(bus_addr >> 32));
 2719         E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
 2720             (u32)bus_addr);
 2721         /* Setup the HW Tx Head and Tail descriptor pointers */
 2722         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
 2723         E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
 2724 
 2725         HW_DEBUGOUT2("Base = %x, Length = %x\n",
 2726             E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
 2727             E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
 2728 
 2729         /* Set the default values for the Tx Inter Packet Gap timer */
 2730         switch (adapter->hw.mac.type) {
 2731         case e1000_82542:
 2732                 tipg = DEFAULT_82542_TIPG_IPGT;
 2733                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
 2734                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
 2735                 break;
 2736         default:
 2737                 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
 2738                     (adapter->hw.phy.media_type ==
 2739                     e1000_media_type_internal_serdes))
 2740                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
 2741                 else
 2742                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
 2743                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
 2744                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
 2745         }
 2746 
 2747         E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
 2748         E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
 2749         if(adapter->hw.mac.type >= e1000_82540)
 2750                 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
 2751                     adapter->tx_abs_int_delay.value);
 2752 
 2753         /* Program the Transmit Control Register */
 2754         tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
 2755         tctl &= ~E1000_TCTL_CT;
 2756         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
 2757                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
 2758 
 2759         /* This write will effectively turn on the transmit unit. */
 2760         E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
 2761 
 2762         /* Setup Transmit Descriptor Base Settings */   
 2763         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
 2764 
 2765         if (adapter->tx_int_delay.value > 0)
 2766                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
 2767 }
 2768 
 2769 /*********************************************************************
 2770  *
 2771  *  Free all transmit related data structures.
 2772  *
 2773  **********************************************************************/
 2774 static void
 2775 lem_free_transmit_structures(struct adapter *adapter)
 2776 {
 2777         struct em_buffer *tx_buffer;
 2778 
 2779         INIT_DEBUGOUT("free_transmit_structures: begin");
 2780 
 2781         if (adapter->tx_buffer_area != NULL) {
 2782                 for (int i = 0; i < adapter->num_tx_desc; i++) {
 2783                         tx_buffer = &adapter->tx_buffer_area[i];
 2784                         if (tx_buffer->m_head != NULL) {
 2785                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
 2786                                     BUS_DMASYNC_POSTWRITE);
 2787                                 bus_dmamap_unload(adapter->txtag,
 2788                                     tx_buffer->map);
 2789                                 m_freem(tx_buffer->m_head);
 2790                                 tx_buffer->m_head = NULL;
 2791                         } else if (tx_buffer->map != NULL)
 2792                                 bus_dmamap_unload(adapter->txtag,
 2793                                     tx_buffer->map);
 2794                         if (tx_buffer->map != NULL) {
 2795                                 bus_dmamap_destroy(adapter->txtag,
 2796                                     tx_buffer->map);
 2797                                 tx_buffer->map = NULL;
 2798                         }
 2799                 }
 2800         }
 2801         if (adapter->tx_buffer_area != NULL) {
 2802                 free(adapter->tx_buffer_area, M_DEVBUF);
 2803                 adapter->tx_buffer_area = NULL;
 2804         }
 2805         if (adapter->txtag != NULL) {
 2806                 bus_dma_tag_destroy(adapter->txtag);
 2807                 adapter->txtag = NULL;
 2808         }
 2809 #if __FreeBSD_version >= 800000
 2810         if (adapter->br != NULL)
 2811                 buf_ring_free(adapter->br, M_DEVBUF);
 2812 #endif
 2813 }
 2814 
 2815 /*********************************************************************
 2816  *
 2817  *  The offload context needs to be set when we transfer the first
 2818  *  packet of a particular protocol (TCP/UDP). This routine has been
 2819  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
 2820  *
 2821  *  Added back the old method of keeping the current context type
 2822  *  and not setting if unnecessary, as this is reported to be a
 2823  *  big performance win.  -jfv
 2824  **********************************************************************/
 2825 static void
 2826 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
 2827     u32 *txd_upper, u32 *txd_lower)
 2828 {
 2829         struct e1000_context_desc *TXD = NULL;
 2830         struct em_buffer *tx_buffer;
 2831         struct ether_vlan_header *eh;
 2832         struct ip *ip = NULL;
 2833         struct ip6_hdr *ip6;
 2834         int curr_txd, ehdrlen;
 2835         u32 cmd, hdr_len, ip_hlen;
 2836         u16 etype;
 2837         u8 ipproto;
 2838 
 2839 
 2840         cmd = hdr_len = ipproto = 0;
 2841         *txd_upper = *txd_lower = 0;
 2842         curr_txd = adapter->next_avail_tx_desc;
 2843 
 2844         /*
 2845          * Determine where frame payload starts.
 2846          * Jump over vlan headers if already present,
 2847          * helpful for QinQ too.
 2848          */
 2849         eh = mtod(mp, struct ether_vlan_header *);
 2850         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 2851                 etype = ntohs(eh->evl_proto);
 2852                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 2853         } else {
 2854                 etype = ntohs(eh->evl_encap_proto);
 2855                 ehdrlen = ETHER_HDR_LEN;
 2856         }
 2857 
 2858         /*
 2859          * We only support TCP/UDP for IPv4 and IPv6 for the moment.
 2860          * TODO: Support SCTP too when it hits the tree.
 2861          */
 2862         switch (etype) {
 2863         case ETHERTYPE_IP:
 2864                 ip = (struct ip *)(mp->m_data + ehdrlen);
 2865                 ip_hlen = ip->ip_hl << 2;
 2866 
 2867                 /* Setup of IP header checksum. */
 2868                 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
 2869                         /*
 2870                          * Start offset for header checksum calculation.
 2871                          * End offset for header checksum calculation.
 2872                          * Offset of place to put the checksum.
 2873                          */
 2874                         TXD = (struct e1000_context_desc *)
 2875                             &adapter->tx_desc_base[curr_txd];
 2876                         TXD->lower_setup.ip_fields.ipcss = ehdrlen;
 2877                         TXD->lower_setup.ip_fields.ipcse =
 2878                             htole16(ehdrlen + ip_hlen);
 2879                         TXD->lower_setup.ip_fields.ipcso =
 2880                             ehdrlen + offsetof(struct ip, ip_sum);
 2881                         cmd |= E1000_TXD_CMD_IP;
 2882                         *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
 2883                 }
 2884 
 2885                 hdr_len = ehdrlen + ip_hlen;
 2886                 ipproto = ip->ip_p;
 2887 
 2888                 break;
 2889         case ETHERTYPE_IPV6:
 2890                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 2891                 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
 2892 
 2893                 /* IPv6 doesn't have a header checksum. */
 2894 
 2895                 hdr_len = ehdrlen + ip_hlen;
 2896                 ipproto = ip6->ip6_nxt;
 2897                 break;
 2898 
 2899         default:
 2900                 return;
 2901         }
 2902 
 2903         switch (ipproto) {
 2904         case IPPROTO_TCP:
 2905                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
 2906                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
 2907                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
 2908                         /* no need for context if already set */
 2909                         if (adapter->last_hw_offload == CSUM_TCP)
 2910                                 return;
 2911                         adapter->last_hw_offload = CSUM_TCP;
 2912                         /*
 2913                          * Start offset for payload checksum calculation.
 2914                          * End offset for payload checksum calculation.
 2915                          * Offset of place to put the checksum.
 2916                          */
 2917                         TXD = (struct e1000_context_desc *)
 2918                             &adapter->tx_desc_base[curr_txd];
 2919                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
 2920                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
 2921                         TXD->upper_setup.tcp_fields.tucso =
 2922                             hdr_len + offsetof(struct tcphdr, th_sum);
 2923                         cmd |= E1000_TXD_CMD_TCP;
 2924                 }
 2925                 break;
 2926         case IPPROTO_UDP:
 2927         {
 2928                 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
 2929                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
 2930                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
 2931                         /* no need for context if already set */
 2932                         if (adapter->last_hw_offload == CSUM_UDP)
 2933                                 return;
 2934                         adapter->last_hw_offload = CSUM_UDP;
 2935                         /*
 2936                          * Start offset for header checksum calculation.
 2937                          * End offset for header checksum calculation.
 2938                          * Offset of place to put the checksum.
 2939                          */
 2940                         TXD = (struct e1000_context_desc *)
 2941                             &adapter->tx_desc_base[curr_txd];
 2942                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
 2943                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
 2944                         TXD->upper_setup.tcp_fields.tucso =
 2945                             hdr_len + offsetof(struct udphdr, uh_sum);
 2946                 }
 2947                 /* Fall Thru */
 2948         }
 2949         default:
 2950                 break;
 2951         }
 2952 
 2953         if (TXD == NULL)
 2954                 return;
 2955         TXD->tcp_seg_setup.data = htole32(0);
 2956         TXD->cmd_and_length =
 2957             htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
 2958         tx_buffer = &adapter->tx_buffer_area[curr_txd];
 2959         tx_buffer->m_head = NULL;
 2960         tx_buffer->next_eop = -1;
 2961 
 2962         if (++curr_txd == adapter->num_tx_desc)
 2963                 curr_txd = 0;
 2964 
 2965         adapter->num_tx_desc_avail--;
 2966         adapter->next_avail_tx_desc = curr_txd;
 2967 }
 2968 
 2969 
 2970 /**********************************************************************
 2971  *
 2972  *  Examine each tx_buffer in the used queue. If the hardware is done
 2973  *  processing the packet then free associated resources. The
 2974  *  tx_buffer is put back on the free queue.
 2975  *
 2976  **********************************************************************/
 2977 static void
 2978 lem_txeof(struct adapter *adapter)
 2979 {
 2980         int first, last, done, num_avail;
 2981         struct em_buffer *tx_buffer;
 2982         struct e1000_tx_desc   *tx_desc, *eop_desc;
 2983         struct ifnet   *ifp = adapter->ifp;
 2984 
 2985         EM_TX_LOCK_ASSERT(adapter);
 2986 
 2987 #ifdef DEV_NETMAP
 2988         if (netmap_tx_irq(ifp, 0))
 2989                 return;
 2990 #endif /* DEV_NETMAP */
 2991         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
 2992                 return;
 2993 
 2994         num_avail = adapter->num_tx_desc_avail;
 2995         first = adapter->next_tx_to_clean;
 2996         tx_desc = &adapter->tx_desc_base[first];
 2997         tx_buffer = &adapter->tx_buffer_area[first];
 2998         last = tx_buffer->next_eop;
 2999         eop_desc = &adapter->tx_desc_base[last];
 3000 
 3001         /*
 3002          * What this does is get the index of the
 3003          * first descriptor AFTER the EOP of the 
 3004          * first packet, that way we can do the
 3005          * simple comparison on the inner while loop.
 3006          */
 3007         if (++last == adapter->num_tx_desc)
 3008                 last = 0;
 3009         done = last;
 3010 
 3011         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
 3012             BUS_DMASYNC_POSTREAD);
 3013 
 3014         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
 3015                 /* We clean the range of the packet */
 3016                 while (first != done) {
 3017                         tx_desc->upper.data = 0;
 3018                         tx_desc->lower.data = 0;
 3019                         tx_desc->buffer_addr = 0;
 3020                         ++num_avail;
 3021 
 3022                         if (tx_buffer->m_head) {
 3023                                 ifp->if_opackets++;
 3024                                 bus_dmamap_sync(adapter->txtag,
 3025                                     tx_buffer->map,
 3026                                     BUS_DMASYNC_POSTWRITE);
 3027                                 bus_dmamap_unload(adapter->txtag,
 3028                                     tx_buffer->map);
 3029 
 3030                                 m_freem(tx_buffer->m_head);
 3031                                 tx_buffer->m_head = NULL;
 3032                         }
 3033                         tx_buffer->next_eop = -1;
 3034                         adapter->watchdog_time = ticks;
 3035 
 3036                         if (++first == adapter->num_tx_desc)
 3037                                 first = 0;
 3038 
 3039                         tx_buffer = &adapter->tx_buffer_area[first];
 3040                         tx_desc = &adapter->tx_desc_base[first];
 3041                 }
 3042                 /* See if we can continue to the next packet */
 3043                 last = tx_buffer->next_eop;
 3044                 if (last != -1) {
 3045                         eop_desc = &adapter->tx_desc_base[last];
 3046                         /* Get new done point */
 3047                         if (++last == adapter->num_tx_desc) last = 0;
 3048                         done = last;
 3049                 } else
 3050                         break;
 3051         }
 3052         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
 3053             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3054 
 3055         adapter->next_tx_to_clean = first;
 3056         adapter->num_tx_desc_avail = num_avail;
 3057 
 3058         /*
 3059          * If we have enough room, clear IFF_DRV_OACTIVE to
 3060          * tell the stack that it is OK to send packets.
 3061          * If there are no pending descriptors, clear the watchdog.
 3062          */
 3063         if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {                
 3064                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3065                 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
 3066                         adapter->watchdog_check = FALSE;
 3067                         return;
 3068                 } 
 3069         }
 3070 }
 3071 
 3072 /*********************************************************************
 3073  *
 3074  *  When Link is lost sometimes there is work still in the TX ring
 3075  *  which may result in a watchdog, rather than allow that we do an
 3076  *  attempted cleanup and then reinit here. Note that this has been
 3077  *  seens mostly with fiber adapters.
 3078  *
 3079  **********************************************************************/
 3080 static void
 3081 lem_tx_purge(struct adapter *adapter)
 3082 {
 3083         if ((!adapter->link_active) && (adapter->watchdog_check)) {
 3084                 EM_TX_LOCK(adapter);
 3085                 lem_txeof(adapter);
 3086                 EM_TX_UNLOCK(adapter);
 3087                 if (adapter->watchdog_check) /* Still outstanding? */
 3088                         lem_init_locked(adapter);
 3089         }
 3090 }
 3091 
 3092 /*********************************************************************
 3093  *
 3094  *  Get a buffer from system mbuf buffer pool.
 3095  *
 3096  **********************************************************************/
 3097 static int
 3098 lem_get_buf(struct adapter *adapter, int i)
 3099 {
 3100         struct mbuf             *m;
 3101         bus_dma_segment_t       segs[1];
 3102         bus_dmamap_t            map;
 3103         struct em_buffer        *rx_buffer;
 3104         int                     error, nsegs;
 3105 
 3106         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 3107         if (m == NULL) {
 3108                 adapter->mbuf_cluster_failed++;
 3109                 return (ENOBUFS);
 3110         }
 3111         m->m_len = m->m_pkthdr.len = MCLBYTES;
 3112 
 3113         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
 3114                 m_adj(m, ETHER_ALIGN);
 3115 
 3116         /*
 3117          * Using memory from the mbuf cluster pool, invoke the
 3118          * bus_dma machinery to arrange the memory mapping.
 3119          */
 3120         error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
 3121             adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
 3122         if (error != 0) {
 3123                 m_free(m);
 3124                 return (error);
 3125         }
 3126 
 3127         /* If nsegs is wrong then the stack is corrupt. */
 3128         KASSERT(nsegs == 1, ("Too many segments returned!"));
 3129 
 3130         rx_buffer = &adapter->rx_buffer_area[i];
 3131         if (rx_buffer->m_head != NULL)
 3132                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
 3133 
 3134         map = rx_buffer->map;
 3135         rx_buffer->map = adapter->rx_sparemap;
 3136         adapter->rx_sparemap = map;
 3137         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
 3138         rx_buffer->m_head = m;
 3139 
 3140         adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
 3141         return (0);
 3142 }
 3143 
 3144 /*********************************************************************
 3145  *
 3146  *  Allocate memory for rx_buffer structures. Since we use one
 3147  *  rx_buffer per received packet, the maximum number of rx_buffer's
 3148  *  that we'll need is equal to the number of receive descriptors
 3149  *  that we've allocated.
 3150  *
 3151  **********************************************************************/
 3152 static int
 3153 lem_allocate_receive_structures(struct adapter *adapter)
 3154 {
 3155         device_t dev = adapter->dev;
 3156         struct em_buffer *rx_buffer;
 3157         int i, error;
 3158 
 3159         adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
 3160             adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
 3161         if (adapter->rx_buffer_area == NULL) {
 3162                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
 3163                 return (ENOMEM);
 3164         }
 3165 
 3166         error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
 3167                                 1, 0,                   /* alignment, bounds */
 3168                                 BUS_SPACE_MAXADDR,      /* lowaddr */
 3169                                 BUS_SPACE_MAXADDR,      /* highaddr */
 3170                                 NULL, NULL,             /* filter, filterarg */
 3171                                 MCLBYTES,               /* maxsize */
 3172                                 1,                      /* nsegments */
 3173                                 MCLBYTES,               /* maxsegsize */
 3174                                 0,                      /* flags */
 3175                                 NULL,                   /* lockfunc */
 3176                                 NULL,                   /* lockarg */
 3177                                 &adapter->rxtag);
 3178         if (error) {
 3179                 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
 3180                     __func__, error);
 3181                 goto fail;
 3182         }
 3183 
 3184         /* Create the spare map (used by getbuf) */
 3185         error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
 3186              &adapter->rx_sparemap);
 3187         if (error) {
 3188                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
 3189                     __func__, error);
 3190                 goto fail;
 3191         }
 3192 
 3193         rx_buffer = adapter->rx_buffer_area;
 3194         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
 3195                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
 3196                     &rx_buffer->map);
 3197                 if (error) {
 3198                         device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
 3199                             __func__, error);
 3200                         goto fail;
 3201                 }
 3202         }
 3203 
 3204         return (0);
 3205 
 3206 fail:
 3207         lem_free_receive_structures(adapter);
 3208         return (error);
 3209 }
 3210 
 3211 /*********************************************************************
 3212  *
 3213  *  (Re)initialize receive structures.
 3214  *
 3215  **********************************************************************/
 3216 static int
 3217 lem_setup_receive_structures(struct adapter *adapter)
 3218 {
 3219         struct em_buffer *rx_buffer;
 3220         int i, error;
 3221 #ifdef DEV_NETMAP
 3222         /* we are already under lock */
 3223         struct netmap_adapter *na = NA(adapter->ifp);
 3224         struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
 3225 #endif
 3226 
 3227         /* Reset descriptor ring */
 3228         bzero(adapter->rx_desc_base,
 3229             (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
 3230 
 3231         /* Free current RX buffers. */
 3232         rx_buffer = adapter->rx_buffer_area;
 3233         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
 3234                 if (rx_buffer->m_head != NULL) {
 3235                         bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
 3236                             BUS_DMASYNC_POSTREAD);
 3237                         bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
 3238                         m_freem(rx_buffer->m_head);
 3239                         rx_buffer->m_head = NULL;
 3240                 }
 3241         }
 3242 
 3243         /* Allocate new ones. */
 3244         for (i = 0; i < adapter->num_rx_desc; i++) {
 3245 #ifdef DEV_NETMAP
 3246                 if (slot) {
 3247                         /* the i-th NIC entry goes to slot si */
 3248                         int si = netmap_idx_n2k(&na->rx_rings[0], i);
 3249                         uint64_t paddr;
 3250                         void *addr;
 3251 
 3252                         addr = PNMB(slot + si, &paddr);
 3253                         netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
 3254                         /* Update descriptor */
 3255                         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
 3256                         continue;
 3257                 }
 3258 #endif /* DEV_NETMAP */
 3259                 error = lem_get_buf(adapter, i);
 3260                 if (error)
 3261                         return (error);
 3262         }
 3263 
 3264         /* Setup our descriptor pointers */
 3265         adapter->next_rx_desc_to_check = 0;
 3266         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
 3267             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3268 
 3269         return (0);
 3270 }
 3271 
 3272 /*********************************************************************
 3273  *
 3274  *  Enable receive unit.
 3275  *
 3276  **********************************************************************/
 3277 
 3278 static void
 3279 lem_initialize_receive_unit(struct adapter *adapter)
 3280 {
 3281         struct ifnet    *ifp = adapter->ifp;
 3282         u64     bus_addr;
 3283         u32     rctl, rxcsum;
 3284 
 3285         INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
 3286 
 3287         /*
 3288          * Make sure receives are disabled while setting
 3289          * up the descriptor ring
 3290          */
 3291         rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
 3292         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
 3293 
 3294         if (adapter->hw.mac.type >= e1000_82540) {
 3295                 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
 3296                     adapter->rx_abs_int_delay.value);
 3297                 /*
 3298                  * Set the interrupt throttling rate. Value is calculated
 3299                  * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
 3300                  */
 3301                 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
 3302         }
 3303 
 3304         /* Setup the Base and Length of the Rx Descriptor Ring */
 3305         bus_addr = adapter->rxdma.dma_paddr;
 3306         E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
 3307             adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
 3308         E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
 3309             (u32)(bus_addr >> 32));
 3310         E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
 3311             (u32)bus_addr);
 3312 
 3313         /* Setup the Receive Control Register */
 3314         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
 3315         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
 3316                    E1000_RCTL_RDMTS_HALF |
 3317                    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 3318 
 3319         /* Make sure VLAN Filters are off */
 3320         rctl &= ~E1000_RCTL_VFE;
 3321 
 3322         if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
 3323                 rctl |= E1000_RCTL_SBP;
 3324         else
 3325                 rctl &= ~E1000_RCTL_SBP;
 3326 
 3327         switch (adapter->rx_buffer_len) {
 3328         default:
 3329         case 2048:
 3330                 rctl |= E1000_RCTL_SZ_2048;
 3331                 break;
 3332         case 4096:
 3333                 rctl |= E1000_RCTL_SZ_4096 |
 3334                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
 3335                 break;
 3336         case 8192:
 3337                 rctl |= E1000_RCTL_SZ_8192 |
 3338                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
 3339                 break;
 3340         case 16384:
 3341                 rctl |= E1000_RCTL_SZ_16384 |
 3342                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
 3343                 break;
 3344         }
 3345 
 3346         if (ifp->if_mtu > ETHERMTU)
 3347                 rctl |= E1000_RCTL_LPE;
 3348         else
 3349                 rctl &= ~E1000_RCTL_LPE;
 3350 
 3351         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
 3352         if ((adapter->hw.mac.type >= e1000_82543) &&
 3353             (ifp->if_capenable & IFCAP_RXCSUM)) {
 3354                 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
 3355                 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
 3356                 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
 3357         }
 3358 
 3359         /* Enable Receives */
 3360         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
 3361 
 3362         /*
 3363          * Setup the HW Rx Head and
 3364          * Tail Descriptor Pointers
 3365          */
 3366         E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
 3367         rctl = adapter->num_rx_desc - 1; /* default RDT value */
 3368 #ifdef DEV_NETMAP
 3369         /* preserve buffers already made available to clients */
 3370         if (ifp->if_capenable & IFCAP_NETMAP)
 3371                 rctl -= nm_kr_rxspace(&NA(adapter->ifp)->rx_rings[0]);
 3372 #endif /* DEV_NETMAP */
 3373         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
 3374 
 3375         return;
 3376 }
 3377 
 3378 /*********************************************************************
 3379  *
 3380  *  Free receive related data structures.
 3381  *
 3382  **********************************************************************/
 3383 static void
 3384 lem_free_receive_structures(struct adapter *adapter)
 3385 {
 3386         struct em_buffer *rx_buffer;
 3387         int i;
 3388 
 3389         INIT_DEBUGOUT("free_receive_structures: begin");
 3390 
 3391         if (adapter->rx_sparemap) {
 3392                 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
 3393                 adapter->rx_sparemap = NULL;
 3394         }
 3395 
 3396         /* Cleanup any existing buffers */
 3397         if (adapter->rx_buffer_area != NULL) {
 3398                 rx_buffer = adapter->rx_buffer_area;
 3399                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
 3400                         if (rx_buffer->m_head != NULL) {
 3401                                 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
 3402                                     BUS_DMASYNC_POSTREAD);
 3403                                 bus_dmamap_unload(adapter->rxtag,
 3404                                     rx_buffer->map);
 3405                                 m_freem(rx_buffer->m_head);
 3406                                 rx_buffer->m_head = NULL;
 3407                         } else if (rx_buffer->map != NULL)
 3408                                 bus_dmamap_unload(adapter->rxtag,
 3409                                     rx_buffer->map);
 3410                         if (rx_buffer->map != NULL) {
 3411                                 bus_dmamap_destroy(adapter->rxtag,
 3412                                     rx_buffer->map);
 3413                                 rx_buffer->map = NULL;
 3414                         }
 3415                 }
 3416         }
 3417 
 3418         if (adapter->rx_buffer_area != NULL) {
 3419                 free(adapter->rx_buffer_area, M_DEVBUF);
 3420                 adapter->rx_buffer_area = NULL;
 3421         }
 3422 
 3423         if (adapter->rxtag != NULL) {
 3424                 bus_dma_tag_destroy(adapter->rxtag);
 3425                 adapter->rxtag = NULL;
 3426         }
 3427 }
 3428 
 3429 /*********************************************************************
 3430  *
 3431  *  This routine executes in interrupt context. It replenishes
 3432  *  the mbufs in the descriptor and sends data which has been
 3433  *  dma'ed into host memory to upper layer.
 3434  *
 3435  *  We loop at most count times if count is > 0, or until done if
 3436  *  count < 0.
 3437  *  
 3438  *  For polling we also now return the number of cleaned packets
 3439  *********************************************************************/
 3440 static bool
 3441 lem_rxeof(struct adapter *adapter, int count, int *done)
 3442 {
 3443         struct ifnet    *ifp = adapter->ifp;
 3444         struct mbuf     *mp;
 3445         u8              status = 0, accept_frame = 0, eop = 0;
 3446         u16             len, desc_len, prev_len_adj;
 3447         int             i, rx_sent = 0;
 3448         struct e1000_rx_desc   *current_desc;
 3449 
 3450         EM_RX_LOCK(adapter);
 3451         i = adapter->next_rx_desc_to_check;
 3452         current_desc = &adapter->rx_desc_base[i];
 3453         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
 3454             BUS_DMASYNC_POSTREAD);
 3455 
 3456 #ifdef DEV_NETMAP
 3457         if (netmap_rx_irq(ifp, 0, &rx_sent)) {
 3458                 EM_RX_UNLOCK(adapter);
 3459                 return (FALSE);
 3460         }
 3461 #endif /* DEV_NETMAP */
 3462 
 3463         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
 3464                 if (done != NULL)
 3465                         *done = rx_sent;
 3466                 EM_RX_UNLOCK(adapter);
 3467                 return (FALSE);
 3468         }
 3469 
 3470         while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3471                 struct mbuf *m = NULL;
 3472 
 3473                 status = current_desc->status;
 3474                 if ((status & E1000_RXD_STAT_DD) == 0)
 3475                         break;
 3476 
 3477                 mp = adapter->rx_buffer_area[i].m_head;
 3478                 /*
 3479                  * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
 3480                  * needs to access the last received byte in the mbuf.
 3481                  */
 3482                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
 3483                     BUS_DMASYNC_POSTREAD);
 3484 
 3485                 accept_frame = 1;
 3486                 prev_len_adj = 0;
 3487                 desc_len = le16toh(current_desc->length);
 3488                 if (status & E1000_RXD_STAT_EOP) {
 3489                         count--;
 3490                         eop = 1;
 3491                         if (desc_len < ETHER_CRC_LEN) {
 3492                                 len = 0;
 3493                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
 3494                         } else
 3495                                 len = desc_len - ETHER_CRC_LEN;
 3496                 } else {
 3497                         eop = 0;
 3498                         len = desc_len;
 3499                 }
 3500 
 3501                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
 3502                         u8      last_byte;
 3503                         u32     pkt_len = desc_len;
 3504 
 3505                         if (adapter->fmp != NULL)
 3506                                 pkt_len += adapter->fmp->m_pkthdr.len;
 3507 
 3508                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);                        
 3509                         if (TBI_ACCEPT(&adapter->hw, status,
 3510                             current_desc->errors, pkt_len, last_byte,
 3511                             adapter->min_frame_size, adapter->max_frame_size)) {
 3512                                 e1000_tbi_adjust_stats_82543(&adapter->hw,
 3513                                     &adapter->stats, pkt_len,
 3514                                     adapter->hw.mac.addr,
 3515                                     adapter->max_frame_size);
 3516                                 if (len > 0)
 3517                                         len--;
 3518                         } else
 3519                                 accept_frame = 0;
 3520                 }
 3521 
 3522                 if (accept_frame) {
 3523                         if (lem_get_buf(adapter, i) != 0) {
 3524                                 ifp->if_iqdrops++;
 3525                                 goto discard;
 3526                         }
 3527 
 3528                         /* Assign correct length to the current fragment */
 3529                         mp->m_len = len;
 3530 
 3531                         if (adapter->fmp == NULL) {
 3532                                 mp->m_pkthdr.len = len;
 3533                                 adapter->fmp = mp; /* Store the first mbuf */
 3534                                 adapter->lmp = mp;
 3535                         } else {
 3536                                 /* Chain mbuf's together */
 3537                                 mp->m_flags &= ~M_PKTHDR;
 3538                                 /*
 3539                                  * Adjust length of previous mbuf in chain if
 3540                                  * we received less than 4 bytes in the last
 3541                                  * descriptor.
 3542                                  */
 3543                                 if (prev_len_adj > 0) {
 3544                                         adapter->lmp->m_len -= prev_len_adj;
 3545                                         adapter->fmp->m_pkthdr.len -=
 3546                                             prev_len_adj;
 3547                                 }
 3548                                 adapter->lmp->m_next = mp;
 3549                                 adapter->lmp = adapter->lmp->m_next;
 3550                                 adapter->fmp->m_pkthdr.len += len;
 3551                         }
 3552 
 3553                         if (eop) {
 3554                                 adapter->fmp->m_pkthdr.rcvif = ifp;
 3555                                 ifp->if_ipackets++;
 3556                                 lem_receive_checksum(adapter, current_desc,
 3557                                     adapter->fmp);
 3558 #ifndef __NO_STRICT_ALIGNMENT
 3559                                 if (adapter->max_frame_size >
 3560                                     (MCLBYTES - ETHER_ALIGN) &&
 3561                                     lem_fixup_rx(adapter) != 0)
 3562                                         goto skip;
 3563 #endif
 3564                                 if (status & E1000_RXD_STAT_VP) {
 3565                                         adapter->fmp->m_pkthdr.ether_vtag =
 3566                                             le16toh(current_desc->special);
 3567                                         adapter->fmp->m_flags |= M_VLANTAG;
 3568                                 }
 3569 #ifndef __NO_STRICT_ALIGNMENT
 3570 skip:
 3571 #endif
 3572                                 m = adapter->fmp;
 3573                                 adapter->fmp = NULL;
 3574                                 adapter->lmp = NULL;
 3575                         }
 3576                 } else {
 3577                         adapter->dropped_pkts++;
 3578 discard:
 3579                         /* Reuse loaded DMA map and just update mbuf chain */
 3580                         mp = adapter->rx_buffer_area[i].m_head;
 3581                         mp->m_len = mp->m_pkthdr.len = MCLBYTES;
 3582                         mp->m_data = mp->m_ext.ext_buf;
 3583                         mp->m_next = NULL;
 3584                         if (adapter->max_frame_size <=
 3585                             (MCLBYTES - ETHER_ALIGN))
 3586                                 m_adj(mp, ETHER_ALIGN);
 3587                         if (adapter->fmp != NULL) {
 3588                                 m_freem(adapter->fmp);
 3589                                 adapter->fmp = NULL;
 3590                                 adapter->lmp = NULL;
 3591                         }
 3592                         m = NULL;
 3593                 }
 3594 
 3595                 /* Zero out the receive descriptors status. */
 3596                 current_desc->status = 0;
 3597                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
 3598                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3599 
 3600                 /* Advance our pointers to the next descriptor. */
 3601                 if (++i == adapter->num_rx_desc)
 3602                         i = 0;
 3603                 /* Call into the stack */
 3604                 if (m != NULL) {
 3605                         adapter->next_rx_desc_to_check = i;
 3606                         EM_RX_UNLOCK(adapter);
 3607                         (*ifp->if_input)(ifp, m);
 3608                         EM_RX_LOCK(adapter);
 3609                         rx_sent++;
 3610                         i = adapter->next_rx_desc_to_check;
 3611                 }
 3612                 current_desc = &adapter->rx_desc_base[i];
 3613         }
 3614         adapter->next_rx_desc_to_check = i;
 3615 
 3616         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
 3617         if (--i < 0)
 3618                 i = adapter->num_rx_desc - 1;
 3619         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
 3620         if (done != NULL)
 3621                 *done = rx_sent;
 3622         EM_RX_UNLOCK(adapter);
 3623         return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
 3624 }
 3625 
 3626 #ifndef __NO_STRICT_ALIGNMENT
 3627 /*
 3628  * When jumbo frames are enabled we should realign entire payload on
 3629  * architecures with strict alignment. This is serious design mistake of 8254x
 3630  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
 3631  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
 3632  * payload. On architecures without strict alignment restrictions 8254x still
 3633  * performs unaligned memory access which would reduce the performance too.
 3634  * To avoid copying over an entire frame to align, we allocate a new mbuf and
 3635  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
 3636  * existing mbuf chain.
 3637  *
 3638  * Be aware, best performance of the 8254x is achived only when jumbo frame is
 3639  * not used at all on architectures with strict alignment.
 3640  */
 3641 static int
 3642 lem_fixup_rx(struct adapter *adapter)
 3643 {
 3644         struct mbuf *m, *n;
 3645         int error;
 3646 
 3647         error = 0;
 3648         m = adapter->fmp;
 3649         if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
 3650                 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
 3651                 m->m_data += ETHER_HDR_LEN;
 3652         } else {
 3653                 MGETHDR(n, M_NOWAIT, MT_DATA);
 3654                 if (n != NULL) {
 3655                         bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
 3656                         m->m_data += ETHER_HDR_LEN;
 3657                         m->m_len -= ETHER_HDR_LEN;
 3658                         n->m_len = ETHER_HDR_LEN;
 3659                         M_MOVE_PKTHDR(n, m);
 3660                         n->m_next = m;
 3661                         adapter->fmp = n;
 3662                 } else {
 3663                         adapter->dropped_pkts++;
 3664                         m_freem(adapter->fmp);
 3665                         adapter->fmp = NULL;
 3666                         error = ENOMEM;
 3667                 }
 3668         }
 3669 
 3670         return (error);
 3671 }
 3672 #endif
 3673 
 3674 /*********************************************************************
 3675  *
 3676  *  Verify that the hardware indicated that the checksum is valid.
 3677  *  Inform the stack about the status of checksum so that stack
 3678  *  doesn't spend time verifying the checksum.
 3679  *
 3680  *********************************************************************/
 3681 static void
 3682 lem_receive_checksum(struct adapter *adapter,
 3683             struct e1000_rx_desc *rx_desc, struct mbuf *mp)
 3684 {
 3685         /* 82543 or newer only */
 3686         if ((adapter->hw.mac.type < e1000_82543) ||
 3687             /* Ignore Checksum bit is set */
 3688             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
 3689                 mp->m_pkthdr.csum_flags = 0;
 3690                 return;
 3691         }
 3692 
 3693         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
 3694                 /* Did it pass? */
 3695                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
 3696                         /* IP Checksum Good */
 3697                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
 3698                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3699 
 3700                 } else {
 3701                         mp->m_pkthdr.csum_flags = 0;
 3702                 }
 3703         }
 3704 
 3705         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
 3706                 /* Did it pass? */
 3707                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
 3708                         mp->m_pkthdr.csum_flags |=
 3709                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 3710                         mp->m_pkthdr.csum_data = htons(0xffff);
 3711                 }
 3712         }
 3713 }
 3714 
 3715 /*
 3716  * This routine is run via an vlan
 3717  * config EVENT
 3718  */
 3719 static void
 3720 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
 3721 {
 3722         struct adapter  *adapter = ifp->if_softc;
 3723         u32             index, bit;
 3724 
 3725         if (ifp->if_softc !=  arg)   /* Not our event */
 3726                 return;
 3727 
 3728         if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
 3729                 return;
 3730 
 3731         EM_CORE_LOCK(adapter);
 3732         index = (vtag >> 5) & 0x7F;
 3733         bit = vtag & 0x1F;
 3734         adapter->shadow_vfta[index] |= (1 << bit);
 3735         ++adapter->num_vlans;
 3736         /* Re-init to load the changes */
 3737         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
 3738                 lem_init_locked(adapter);
 3739         EM_CORE_UNLOCK(adapter);
 3740 }
 3741 
 3742 /*
 3743  * This routine is run via an vlan
 3744  * unconfig EVENT
 3745  */
 3746 static void
 3747 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
 3748 {
 3749         struct adapter  *adapter = ifp->if_softc;
 3750         u32             index, bit;
 3751 
 3752         if (ifp->if_softc !=  arg)
 3753                 return;
 3754 
 3755         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
 3756                 return;
 3757 
 3758         EM_CORE_LOCK(adapter);
 3759         index = (vtag >> 5) & 0x7F;
 3760         bit = vtag & 0x1F;
 3761         adapter->shadow_vfta[index] &= ~(1 << bit);
 3762         --adapter->num_vlans;
 3763         /* Re-init to load the changes */
 3764         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
 3765                 lem_init_locked(adapter);
 3766         EM_CORE_UNLOCK(adapter);
 3767 }
 3768 
 3769 static void
 3770 lem_setup_vlan_hw_support(struct adapter *adapter)
 3771 {
 3772         struct e1000_hw *hw = &adapter->hw;
 3773         u32             reg;
 3774 
 3775         /*
 3776         ** We get here thru init_locked, meaning
 3777         ** a soft reset, this has already cleared
 3778         ** the VFTA and other state, so if there
 3779         ** have been no vlan's registered do nothing.
 3780         */
 3781         if (adapter->num_vlans == 0)
 3782                 return;
 3783 
 3784         /*
 3785         ** A soft reset zero's out the VFTA, so
 3786         ** we need to repopulate it now.
 3787         */
 3788         for (int i = 0; i < EM_VFTA_SIZE; i++)
 3789                 if (adapter->shadow_vfta[i] != 0)
 3790                         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
 3791                             i, adapter->shadow_vfta[i]);
 3792 
 3793         reg = E1000_READ_REG(hw, E1000_CTRL);
 3794         reg |= E1000_CTRL_VME;
 3795         E1000_WRITE_REG(hw, E1000_CTRL, reg);
 3796 
 3797         /* Enable the Filter Table */
 3798         reg = E1000_READ_REG(hw, E1000_RCTL);
 3799         reg &= ~E1000_RCTL_CFIEN;
 3800         reg |= E1000_RCTL_VFE;
 3801         E1000_WRITE_REG(hw, E1000_RCTL, reg);
 3802 }
 3803 
 3804 static void
 3805 lem_enable_intr(struct adapter *adapter)
 3806 {
 3807         struct e1000_hw *hw = &adapter->hw;
 3808         u32 ims_mask = IMS_ENABLE_MASK;
 3809 
 3810         E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
 3811 }
 3812 
 3813 static void
 3814 lem_disable_intr(struct adapter *adapter)
 3815 {
 3816         struct e1000_hw *hw = &adapter->hw;
 3817 
 3818         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
 3819 }
 3820 
 3821 /*
 3822  * Bit of a misnomer, what this really means is
 3823  * to enable OS management of the system... aka
 3824  * to disable special hardware management features 
 3825  */
 3826 static void
 3827 lem_init_manageability(struct adapter *adapter)
 3828 {
 3829         /* A shared code workaround */
 3830         if (adapter->has_manage) {
 3831                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
 3832                 /* disable hardware interception of ARP */
 3833                 manc &= ~(E1000_MANC_ARP_EN);
 3834                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
 3835         }
 3836 }
 3837 
 3838 /*
 3839  * Give control back to hardware management
 3840  * controller if there is one.
 3841  */
 3842 static void
 3843 lem_release_manageability(struct adapter *adapter)
 3844 {
 3845         if (adapter->has_manage) {
 3846                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
 3847 
 3848                 /* re-enable hardware interception of ARP */
 3849                 manc |= E1000_MANC_ARP_EN;
 3850                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
 3851         }
 3852 }
 3853 
 3854 /*
 3855  * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
 3856  * For ASF and Pass Through versions of f/w this means
 3857  * that the driver is loaded. For AMT version type f/w
 3858  * this means that the network i/f is open.
 3859  */
 3860 static void
 3861 lem_get_hw_control(struct adapter *adapter)
 3862 {
 3863         u32 ctrl_ext;
 3864 
 3865         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
 3866         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
 3867             ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
 3868         return;
 3869 }
 3870 
 3871 /*
 3872  * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
 3873  * For ASF and Pass Through versions of f/w this means that
 3874  * the driver is no longer loaded. For AMT versions of the
 3875  * f/w this means that the network i/f is closed.
 3876  */
 3877 static void
 3878 lem_release_hw_control(struct adapter *adapter)
 3879 {
 3880         u32 ctrl_ext;
 3881 
 3882         if (!adapter->has_manage)
 3883                 return;
 3884 
 3885         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
 3886         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
 3887             ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
 3888         return;
 3889 }
 3890 
 3891 static int
 3892 lem_is_valid_ether_addr(u8 *addr)
 3893 {
 3894         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
 3895 
 3896         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
 3897                 return (FALSE);
 3898         }
 3899 
 3900         return (TRUE);
 3901 }
 3902 
 3903 /*
 3904 ** Parse the interface capabilities with regard
 3905 ** to both system management and wake-on-lan for
 3906 ** later use.
 3907 */
 3908 static void
 3909 lem_get_wakeup(device_t dev)
 3910 {
 3911         struct adapter  *adapter = device_get_softc(dev);
 3912         u16             eeprom_data = 0, device_id, apme_mask;
 3913 
 3914         adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
 3915         apme_mask = EM_EEPROM_APME;
 3916 
 3917         switch (adapter->hw.mac.type) {
 3918         case e1000_82542:
 3919         case e1000_82543:
 3920                 break;
 3921         case e1000_82544:
 3922                 e1000_read_nvm(&adapter->hw,
 3923                     NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
 3924                 apme_mask = EM_82544_APME;
 3925                 break;
 3926         case e1000_82546:
 3927         case e1000_82546_rev_3:
 3928                 if (adapter->hw.bus.func == 1) {
 3929                         e1000_read_nvm(&adapter->hw,
 3930                             NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
 3931                         break;
 3932                 } else
 3933                         e1000_read_nvm(&adapter->hw,
 3934                             NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
 3935                 break;
 3936         default:
 3937                 e1000_read_nvm(&adapter->hw,
 3938                     NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
 3939                 break;
 3940         }
 3941         if (eeprom_data & apme_mask)
 3942                 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
 3943         /*
 3944          * We have the eeprom settings, now apply the special cases
 3945          * where the eeprom may be wrong or the board won't support
 3946          * wake on lan on a particular port
 3947          */
 3948         device_id = pci_get_device(dev);
 3949         switch (device_id) {
 3950         case E1000_DEV_ID_82546GB_PCIE:
 3951                 adapter->wol = 0;
 3952                 break;
 3953         case E1000_DEV_ID_82546EB_FIBER:
 3954         case E1000_DEV_ID_82546GB_FIBER:
 3955                 /* Wake events only supported on port A for dual fiber
 3956                  * regardless of eeprom setting */
 3957                 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
 3958                     E1000_STATUS_FUNC_1)
 3959                         adapter->wol = 0;
 3960                 break;
 3961         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
 3962                 /* if quad port adapter, disable WoL on all but port A */
 3963                 if (global_quad_port_a != 0)
 3964                         adapter->wol = 0;
 3965                 /* Reset for multiple quad port adapters */
 3966                 if (++global_quad_port_a == 4)
 3967                         global_quad_port_a = 0;
 3968                 break;
 3969         }
 3970         return;
 3971 }
 3972 
 3973 
 3974 /*
 3975  * Enable PCI Wake On Lan capability
 3976  */
 3977 static void
 3978 lem_enable_wakeup(device_t dev)
 3979 {
 3980         struct adapter  *adapter = device_get_softc(dev);
 3981         struct ifnet    *ifp = adapter->ifp;
 3982         u32             pmc, ctrl, ctrl_ext, rctl;
 3983         u16             status;
 3984 
 3985         if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
 3986                 return;
 3987 
 3988         /* Advertise the wakeup capability */
 3989         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
 3990         ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
 3991         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
 3992         E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
 3993 
 3994         /* Keep the laser running on Fiber adapters */
 3995         if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
 3996             adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
 3997                 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
 3998                 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
 3999                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
 4000         }
 4001 
 4002         /*
 4003         ** Determine type of Wakeup: note that wol
 4004         ** is set with all bits on by default.
 4005         */
 4006         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
 4007                 adapter->wol &= ~E1000_WUFC_MAG;
 4008 
 4009         if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
 4010                 adapter->wol &= ~E1000_WUFC_MC;
 4011         else {
 4012                 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
 4013                 rctl |= E1000_RCTL_MPE;
 4014                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
 4015         }
 4016 
 4017         if (adapter->hw.mac.type == e1000_pchlan) {
 4018                 if (lem_enable_phy_wakeup(adapter))
 4019                         return;
 4020         } else {
 4021                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
 4022                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
 4023         }
 4024 
 4025 
 4026         /* Request PME */
 4027         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
 4028         status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 4029         if (ifp->if_capenable & IFCAP_WOL)
 4030                 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 4031         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
 4032 
 4033         return;
 4034 }
 4035 
 4036 /*
 4037 ** WOL in the newer chipset interfaces (pchlan)
 4038 ** require thing to be copied into the phy
 4039 */
 4040 static int
 4041 lem_enable_phy_wakeup(struct adapter *adapter)
 4042 {
 4043         struct e1000_hw *hw = &adapter->hw;
 4044         u32 mreg, ret = 0;
 4045         u16 preg;
 4046 
 4047         /* copy MAC RARs to PHY RARs */
 4048         for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
 4049                 mreg = E1000_READ_REG(hw, E1000_RAL(i));
 4050                 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
 4051                 e1000_write_phy_reg(hw, BM_RAR_M(i),
 4052                     (u16)((mreg >> 16) & 0xFFFF));
 4053                 mreg = E1000_READ_REG(hw, E1000_RAH(i));
 4054                 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
 4055                 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
 4056                     (u16)((mreg >> 16) & 0xFFFF));
 4057         }
 4058 
 4059         /* copy MAC MTA to PHY MTA */
 4060         for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
 4061                 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
 4062                 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
 4063                 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
 4064                     (u16)((mreg >> 16) & 0xFFFF));
 4065         }
 4066 
 4067         /* configure PHY Rx Control register */
 4068         e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
 4069         mreg = E1000_READ_REG(hw, E1000_RCTL);
 4070         if (mreg & E1000_RCTL_UPE)
 4071                 preg |= BM_RCTL_UPE;
 4072         if (mreg & E1000_RCTL_MPE)
 4073                 preg |= BM_RCTL_MPE;
 4074         preg &= ~(BM_RCTL_MO_MASK);
 4075         if (mreg & E1000_RCTL_MO_3)
 4076                 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
 4077                                 << BM_RCTL_MO_SHIFT);
 4078         if (mreg & E1000_RCTL_BAM)
 4079                 preg |= BM_RCTL_BAM;
 4080         if (mreg & E1000_RCTL_PMCF)
 4081                 preg |= BM_RCTL_PMCF;
 4082         mreg = E1000_READ_REG(hw, E1000_CTRL);
 4083         if (mreg & E1000_CTRL_RFCE)
 4084                 preg |= BM_RCTL_RFCE;
 4085         e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
 4086 
 4087         /* enable PHY wakeup in MAC register */
 4088         E1000_WRITE_REG(hw, E1000_WUC,
 4089             E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
 4090         E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
 4091 
 4092         /* configure and enable PHY wakeup in PHY registers */
 4093         e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
 4094         e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
 4095 
 4096         /* activate PHY wakeup */
 4097         ret = hw->phy.ops.acquire(hw);
 4098         if (ret) {
 4099                 printf("Could not acquire PHY\n");
 4100                 return ret;
 4101         }
 4102         e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
 4103                                  (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
 4104         ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
 4105         if (ret) {
 4106                 printf("Could not read PHY page 769\n");
 4107                 goto out;
 4108         }
 4109         preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
 4110         ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
 4111         if (ret)
 4112                 printf("Could not set PHY Host Wakeup bit\n");
 4113 out:
 4114         hw->phy.ops.release(hw);
 4115 
 4116         return ret;
 4117 }
 4118 
 4119 static void
 4120 lem_led_func(void *arg, int onoff)
 4121 {
 4122         struct adapter  *adapter = arg;
 4123 
 4124         EM_CORE_LOCK(adapter);
 4125         if (onoff) {
 4126                 e1000_setup_led(&adapter->hw);
 4127                 e1000_led_on(&adapter->hw);
 4128         } else {
 4129                 e1000_led_off(&adapter->hw);
 4130                 e1000_cleanup_led(&adapter->hw);
 4131         }
 4132         EM_CORE_UNLOCK(adapter);
 4133 }
 4134 
 4135 /*********************************************************************
 4136 * 82544 Coexistence issue workaround.
 4137 *    There are 2 issues.
 4138 *       1. Transmit Hang issue.
 4139 *    To detect this issue, following equation can be used...
 4140 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
 4141 *         If SUM[3:0] is in between 1 to 4, we will have this issue.
 4142 *
 4143 *       2. DAC issue.
 4144 *    To detect this issue, following equation can be used...
 4145 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
 4146 *         If SUM[3:0] is in between 9 to c, we will have this issue.
 4147 *
 4148 *
 4149 *    WORKAROUND:
 4150 *         Make sure we do not have ending address
 4151 *         as 1,2,3,4(Hang) or 9,a,b,c (DAC)
 4152 *
 4153 *************************************************************************/
 4154 static u32
 4155 lem_fill_descriptors (bus_addr_t address, u32 length,
 4156                 PDESC_ARRAY desc_array)
 4157 {
 4158         u32 safe_terminator;
 4159 
 4160         /* Since issue is sensitive to length and address.*/
 4161         /* Let us first check the address...*/
 4162         if (length <= 4) {
 4163                 desc_array->descriptor[0].address = address;
 4164                 desc_array->descriptor[0].length = length;
 4165                 desc_array->elements = 1;
 4166                 return (desc_array->elements);
 4167         }
 4168         safe_terminator = (u32)((((u32)address & 0x7) +
 4169             (length & 0xF)) & 0xF);
 4170         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
 4171         if (safe_terminator == 0   ||
 4172         (safe_terminator > 4   &&
 4173         safe_terminator < 9)   ||
 4174         (safe_terminator > 0xC &&
 4175         safe_terminator <= 0xF)) {
 4176                 desc_array->descriptor[0].address = address;
 4177                 desc_array->descriptor[0].length = length;
 4178                 desc_array->elements = 1;
 4179                 return (desc_array->elements);
 4180         }
 4181 
 4182         desc_array->descriptor[0].address = address;
 4183         desc_array->descriptor[0].length = length - 4;
 4184         desc_array->descriptor[1].address = address + (length - 4);
 4185         desc_array->descriptor[1].length = 4;
 4186         desc_array->elements = 2;
 4187         return (desc_array->elements);
 4188 }
 4189 
 4190 /**********************************************************************
 4191  *
 4192  *  Update the board statistics counters.
 4193  *
 4194  **********************************************************************/
 4195 static void
 4196 lem_update_stats_counters(struct adapter *adapter)
 4197 {
 4198         struct ifnet   *ifp;
 4199 
 4200         if(adapter->hw.phy.media_type == e1000_media_type_copper ||
 4201            (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
 4202                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
 4203                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
 4204         }
 4205         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
 4206         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
 4207         adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
 4208         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
 4209 
 4210         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
 4211         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
 4212         adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
 4213         adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
 4214         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
 4215         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
 4216         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
 4217         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
 4218         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
 4219         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
 4220         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
 4221         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
 4222         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
 4223         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
 4224         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
 4225         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
 4226         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
 4227         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
 4228         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
 4229         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
 4230 
 4231         /* For the 64-bit byte counters the low dword must be read first. */
 4232         /* Both registers clear on the read of the high dword */
 4233 
 4234         adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
 4235             ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
 4236         adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
 4237             ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
 4238 
 4239         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
 4240         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
 4241         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
 4242         adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
 4243         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
 4244 
 4245         adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
 4246         adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
 4247 
 4248         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
 4249         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
 4250         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
 4251         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
 4252         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
 4253         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
 4254         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
 4255         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
 4256         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
 4257         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
 4258 
 4259         if (adapter->hw.mac.type >= e1000_82543) {
 4260                 adapter->stats.algnerrc += 
 4261                 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
 4262                 adapter->stats.rxerrc += 
 4263                 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
 4264                 adapter->stats.tncrs += 
 4265                 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
 4266                 adapter->stats.cexterr += 
 4267                 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
 4268                 adapter->stats.tsctc += 
 4269                 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
 4270                 adapter->stats.tsctfc += 
 4271                 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
 4272         }
 4273         ifp = adapter->ifp;
 4274 
 4275         ifp->if_collisions = adapter->stats.colc;
 4276 
 4277         /* Rx Errors */
 4278         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
 4279             adapter->stats.crcerrs + adapter->stats.algnerrc +
 4280             adapter->stats.ruc + adapter->stats.roc +
 4281             adapter->stats.mpc + adapter->stats.cexterr;
 4282 
 4283         /* Tx Errors */
 4284         ifp->if_oerrors = adapter->stats.ecol +
 4285             adapter->stats.latecol + adapter->watchdog_events;
 4286 }
 4287 
 4288 /* Export a single 32-bit register via a read-only sysctl. */
 4289 static int
 4290 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
 4291 {
 4292         struct adapter *adapter;
 4293         u_int val;
 4294 
 4295         adapter = oidp->oid_arg1;
 4296         val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
 4297         return (sysctl_handle_int(oidp, &val, 0, req));
 4298 }
 4299 
 4300 /*
 4301  * Add sysctl variables, one per statistic, to the system.
 4302  */
 4303 static void
 4304 lem_add_hw_stats(struct adapter *adapter)
 4305 {
 4306         device_t dev = adapter->dev;
 4307 
 4308         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
 4309         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
 4310         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
 4311         struct e1000_hw_stats *stats = &adapter->stats;
 4312 
 4313         struct sysctl_oid *stat_node;
 4314         struct sysctl_oid_list *stat_list;
 4315 
 4316         /* Driver Statistics */
 4317         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", 
 4318                          CTLFLAG_RD, &adapter->mbuf_alloc_failed,
 4319                          "Std mbuf failed");
 4320         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", 
 4321                          CTLFLAG_RD, &adapter->mbuf_cluster_failed,
 4322                          "Std mbuf cluster failed");
 4323         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
 4324                         CTLFLAG_RD, &adapter->dropped_pkts,
 4325                         "Driver dropped packets");
 4326         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
 4327                         CTLFLAG_RD, &adapter->no_tx_dma_setup,
 4328                         "Driver tx dma failure in xmit");
 4329         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
 4330                         CTLFLAG_RD, &adapter->no_tx_desc_avail1,
 4331                         "Not enough tx descriptors failure in xmit");
 4332         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
 4333                         CTLFLAG_RD, &adapter->no_tx_desc_avail2,
 4334                         "Not enough tx descriptors failure in xmit");
 4335         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
 4336                         CTLFLAG_RD, &adapter->rx_overruns,
 4337                         "RX overruns");
 4338         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
 4339                         CTLFLAG_RD, &adapter->watchdog_events,
 4340                         "Watchdog timeouts");
 4341 
 4342         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
 4343                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
 4344                         lem_sysctl_reg_handler, "IU",
 4345                         "Device Control Register");
 4346         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
 4347                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
 4348                         lem_sysctl_reg_handler, "IU",
 4349                         "Receiver Control Register");
 4350         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
 4351                         CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
 4352                         "Flow Control High Watermark");
 4353         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
 4354                         CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
 4355                         "Flow Control Low Watermark");
 4356         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
 4357                         CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
 4358                         "TX FIFO workaround events");
 4359         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
 4360                         CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
 4361                         "TX FIFO resets");
 4362 
 4363         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head", 
 4364                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
 4365                         lem_sysctl_reg_handler, "IU",
 4366                         "Transmit Descriptor Head");
 4367         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail", 
 4368                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
 4369                         lem_sysctl_reg_handler, "IU",
 4370                         "Transmit Descriptor Tail");
 4371         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head", 
 4372                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
 4373                         lem_sysctl_reg_handler, "IU",
 4374                         "Receive Descriptor Head");
 4375         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail", 
 4376                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
 4377                         lem_sysctl_reg_handler, "IU",
 4378                         "Receive Descriptor Tail");
 4379         
 4380 
 4381         /* MAC stats get their own sub node */
 4382 
 4383         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
 4384                                     CTLFLAG_RD, NULL, "Statistics");
 4385         stat_list = SYSCTL_CHILDREN(stat_node);
 4386 
 4387         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
 4388                         CTLFLAG_RD, &stats->ecol,
 4389                         "Excessive collisions");
 4390         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
 4391                         CTLFLAG_RD, &stats->scc,
 4392                         "Single collisions");
 4393         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
 4394                         CTLFLAG_RD, &stats->mcc,
 4395                         "Multiple collisions");
 4396         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
 4397                         CTLFLAG_RD, &stats->latecol,
 4398                         "Late collisions");
 4399         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
 4400                         CTLFLAG_RD, &stats->colc,
 4401                         "Collision Count");
 4402         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
 4403                         CTLFLAG_RD, &adapter->stats.symerrs,
 4404                         "Symbol Errors");
 4405         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
 4406                         CTLFLAG_RD, &adapter->stats.sec,
 4407                         "Sequence Errors");
 4408         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
 4409                         CTLFLAG_RD, &adapter->stats.dc,
 4410                         "Defer Count");
 4411         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
 4412                         CTLFLAG_RD, &adapter->stats.mpc,
 4413                         "Missed Packets");
 4414         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
 4415                         CTLFLAG_RD, &adapter->stats.rnbc,
 4416                         "Receive No Buffers");
 4417         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
 4418                         CTLFLAG_RD, &adapter->stats.ruc,
 4419                         "Receive Undersize");
 4420         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
 4421                         CTLFLAG_RD, &adapter->stats.rfc,
 4422                         "Fragmented Packets Received ");
 4423         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
 4424                         CTLFLAG_RD, &adapter->stats.roc,
 4425                         "Oversized Packets Received");
 4426         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
 4427                         CTLFLAG_RD, &adapter->stats.rjc,
 4428                         "Recevied Jabber");
 4429         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
 4430                         CTLFLAG_RD, &adapter->stats.rxerrc,
 4431                         "Receive Errors");
 4432         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
 4433                         CTLFLAG_RD, &adapter->stats.crcerrs,
 4434                         "CRC errors");
 4435         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
 4436                         CTLFLAG_RD, &adapter->stats.algnerrc,
 4437                         "Alignment Errors");
 4438         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
 4439                         CTLFLAG_RD, &adapter->stats.cexterr,
 4440                         "Collision/Carrier extension errors");
 4441         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
 4442                         CTLFLAG_RD, &adapter->stats.xonrxc,
 4443                         "XON Received");
 4444         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
 4445                         CTLFLAG_RD, &adapter->stats.xontxc,
 4446                         "XON Transmitted");
 4447         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
 4448                         CTLFLAG_RD, &adapter->stats.xoffrxc,
 4449                         "XOFF Received");
 4450         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
 4451                         CTLFLAG_RD, &adapter->stats.xofftxc,
 4452                         "XOFF Transmitted");
 4453 
 4454         /* Packet Reception Stats */
 4455         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
 4456                         CTLFLAG_RD, &adapter->stats.tpr,
 4457                         "Total Packets Received ");
 4458         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
 4459                         CTLFLAG_RD, &adapter->stats.gprc,
 4460                         "Good Packets Received");
 4461         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
 4462                         CTLFLAG_RD, &adapter->stats.bprc,
 4463                         "Broadcast Packets Received");
 4464         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
 4465                         CTLFLAG_RD, &adapter->stats.mprc,
 4466                         "Multicast Packets Received");
 4467         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
 4468                         CTLFLAG_RD, &adapter->stats.prc64,
 4469                         "64 byte frames received ");
 4470         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
 4471                         CTLFLAG_RD, &adapter->stats.prc127,
 4472                         "65-127 byte frames received");
 4473         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
 4474                         CTLFLAG_RD, &adapter->stats.prc255,
 4475                         "128-255 byte frames received");
 4476         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
 4477                         CTLFLAG_RD, &adapter->stats.prc511,
 4478                         "256-511 byte frames received");
 4479         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
 4480                         CTLFLAG_RD, &adapter->stats.prc1023,
 4481                         "512-1023 byte frames received");
 4482         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
 4483                         CTLFLAG_RD, &adapter->stats.prc1522,
 4484                         "1023-1522 byte frames received");
 4485         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
 4486                         CTLFLAG_RD, &adapter->stats.gorc, 
 4487                         "Good Octets Received");
 4488 
 4489         /* Packet Transmission Stats */
 4490         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
 4491                         CTLFLAG_RD, &adapter->stats.gotc, 
 4492                         "Good Octets Transmitted"); 
 4493         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
 4494                         CTLFLAG_RD, &adapter->stats.tpt,
 4495                         "Total Packets Transmitted");
 4496         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
 4497                         CTLFLAG_RD, &adapter->stats.gptc,
 4498                         "Good Packets Transmitted");
 4499         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
 4500                         CTLFLAG_RD, &adapter->stats.bptc,
 4501                         "Broadcast Packets Transmitted");
 4502         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
 4503                         CTLFLAG_RD, &adapter->stats.mptc,
 4504                         "Multicast Packets Transmitted");
 4505         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
 4506                         CTLFLAG_RD, &adapter->stats.ptc64,
 4507                         "64 byte frames transmitted ");
 4508         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
 4509                         CTLFLAG_RD, &adapter->stats.ptc127,
 4510                         "65-127 byte frames transmitted");
 4511         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
 4512                         CTLFLAG_RD, &adapter->stats.ptc255,
 4513                         "128-255 byte frames transmitted");
 4514         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
 4515                         CTLFLAG_RD, &adapter->stats.ptc511,
 4516                         "256-511 byte frames transmitted");
 4517         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
 4518                         CTLFLAG_RD, &adapter->stats.ptc1023,
 4519                         "512-1023 byte frames transmitted");
 4520         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
 4521                         CTLFLAG_RD, &adapter->stats.ptc1522,
 4522                         "1024-1522 byte frames transmitted");
 4523         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
 4524                         CTLFLAG_RD, &adapter->stats.tsctc,
 4525                         "TSO Contexts Transmitted");
 4526         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
 4527                         CTLFLAG_RD, &adapter->stats.tsctfc,
 4528                         "TSO Contexts Failed");
 4529 }
 4530 
 4531 /**********************************************************************
 4532  *
 4533  *  This routine provides a way to dump out the adapter eeprom,
 4534  *  often a useful debug/service tool. This only dumps the first
 4535  *  32 words, stuff that matters is in that extent.
 4536  *
 4537  **********************************************************************/
 4538 
 4539 static int
 4540 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
 4541 {
 4542         struct adapter *adapter;
 4543         int error;
 4544         int result;
 4545 
 4546         result = -1;
 4547         error = sysctl_handle_int(oidp, &result, 0, req);
 4548 
 4549         if (error || !req->newptr)
 4550                 return (error);
 4551 
 4552         /*
 4553          * This value will cause a hex dump of the
 4554          * first 32 16-bit words of the EEPROM to
 4555          * the screen.
 4556          */
 4557         if (result == 1) {
 4558                 adapter = (struct adapter *)arg1;
 4559                 lem_print_nvm_info(adapter);
 4560         }
 4561 
 4562         return (error);
 4563 }
 4564 
 4565 static void
 4566 lem_print_nvm_info(struct adapter *adapter)
 4567 {
 4568         u16     eeprom_data;
 4569         int     i, j, row = 0;
 4570 
 4571         /* Its a bit crude, but it gets the job done */
 4572         printf("\nInterface EEPROM Dump:\n");
 4573         printf("Offset\n0x0000  ");
 4574         for (i = 0, j = 0; i < 32; i++, j++) {
 4575                 if (j == 8) { /* Make the offset block */
 4576                         j = 0; ++row;
 4577                         printf("\n0x00%x0  ",row);
 4578                 }
 4579                 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
 4580                 printf("%04x ", eeprom_data);
 4581         }
 4582         printf("\n");
 4583 }
 4584 
 4585 static int
 4586 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
 4587 {
 4588         struct em_int_delay_info *info;
 4589         struct adapter *adapter;
 4590         u32 regval;
 4591         int error;
 4592         int usecs;
 4593         int ticks;
 4594 
 4595         info = (struct em_int_delay_info *)arg1;
 4596         usecs = info->value;
 4597         error = sysctl_handle_int(oidp, &usecs, 0, req);
 4598         if (error != 0 || req->newptr == NULL)
 4599                 return (error);
 4600         if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
 4601                 return (EINVAL);
 4602         info->value = usecs;
 4603         ticks = EM_USECS_TO_TICKS(usecs);
 4604         if (info->offset == E1000_ITR)  /* units are 256ns here */
 4605                 ticks *= 4;
 4606 
 4607         adapter = info->adapter;
 4608         
 4609         EM_CORE_LOCK(adapter);
 4610         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
 4611         regval = (regval & ~0xffff) | (ticks & 0xffff);
 4612         /* Handle a few special cases. */
 4613         switch (info->offset) {
 4614         case E1000_RDTR:
 4615                 break;
 4616         case E1000_TIDV:
 4617                 if (ticks == 0) {
 4618                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
 4619                         /* Don't write 0 into the TIDV register. */
 4620                         regval++;
 4621                 } else
 4622                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
 4623                 break;
 4624         }
 4625         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
 4626         EM_CORE_UNLOCK(adapter);
 4627         return (0);
 4628 }
 4629 
 4630 static void
 4631 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
 4632         const char *description, struct em_int_delay_info *info,
 4633         int offset, int value)
 4634 {
 4635         info->adapter = adapter;
 4636         info->offset = offset;
 4637         info->value = value;
 4638         SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
 4639             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
 4640             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
 4641             info, 0, lem_sysctl_int_delay, "I", description);
 4642 }
 4643 
 4644 static void
 4645 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
 4646         const char *description, int *limit, int value)
 4647 {
 4648         *limit = value;
 4649         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
 4650             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
 4651             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
 4652 }
 4653 
 4654 static void
 4655 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
 4656         const char *description, int *limit, int value)
 4657 {
 4658         *limit = value;
 4659         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
 4660             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
 4661             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
 4662 }

Cache object: 18ec653966c30257d9f8d6f7d4a46172


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.