The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ixgbe/ixgbe.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2 
    3   Copyright (c) 2001-2011, Intel Corporation 
    4   All rights reserved.
    5   
    6   Redistribution and use in source and binary forms, with or without 
    7   modification, are permitted provided that the following conditions are met:
    8   
    9    1. Redistributions of source code must retain the above copyright notice, 
   10       this list of conditions and the following disclaimer.
   11   
   12    2. Redistributions in binary form must reproduce the above copyright 
   13       notice, this list of conditions and the following disclaimer in the 
   14       documentation and/or other materials provided with the distribution.
   15   
   16    3. Neither the name of the Intel Corporation nor the names of its 
   17       contributors may be used to endorse or promote products derived from 
   18       this software without specific prior written permission.
   19   
   20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
   22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
   23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
   24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
   25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
   26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
   27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
   28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
   29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30   POSSIBILITY OF SUCH DAMAGE.
   31 
   32 ******************************************************************************/
   33 /*$FreeBSD: releng/9.0/sys/dev/ixgbe/ixgbe.c 225405 2011-09-05 17:54:19Z qingli $*/
   34 
   35 #ifdef HAVE_KERNEL_OPTION_HEADERS
   36 #include "opt_inet.h"
   37 #include "opt_inet6.h"
   38 #endif
   39 
   40 #include "ixgbe.h"
   41 
   42 /*********************************************************************
   43  *  Set this to one to display debug statistics
   44  *********************************************************************/
   45 int             ixgbe_display_debug_stats = 0;
   46 
   47 /*********************************************************************
   48  *  Driver version
   49  *********************************************************************/
   50 char ixgbe_driver_version[] = "2.3.11";
   51 
   52 /*********************************************************************
   53  *  PCI Device ID Table
   54  *
   55  *  Used by probe to select devices to load on
   56  *  Last field stores an index into ixgbe_strings
   57  *  Last entry must be all 0s
   58  *
   59  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
   60  *********************************************************************/
   61 
   62 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
   63 {
   64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
   65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
   66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
   67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
   68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
   69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
   70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
   71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
   72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
   73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
   74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
   75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
   76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
   77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
   78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
   79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
   80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
   81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
   82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
   83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
   84         /* required last entry */
   85         {0, 0, 0, 0, 0}
   86 };
   87 
   88 /*********************************************************************
   89  *  Table of branding strings
   90  *********************************************************************/
   91 
   92 static char    *ixgbe_strings[] = {
   93         "Intel(R) PRO/10GbE PCI-Express Network Driver"
   94 };
   95 
   96 /*********************************************************************
   97  *  Function prototypes
   98  *********************************************************************/
   99 static int      ixgbe_probe(device_t);
  100 static int      ixgbe_attach(device_t);
  101 static int      ixgbe_detach(device_t);
  102 static int      ixgbe_shutdown(device_t);
  103 static void     ixgbe_start(struct ifnet *);
  104 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
  105 #if __FreeBSD_version >= 800000
  106 static int      ixgbe_mq_start(struct ifnet *, struct mbuf *);
  107 static int      ixgbe_mq_start_locked(struct ifnet *,
  108                     struct tx_ring *, struct mbuf *);
  109 static void     ixgbe_qflush(struct ifnet *);
  110 #endif
  111 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
  112 static void     ixgbe_init(void *);
  113 static void     ixgbe_init_locked(struct adapter *);
  114 static void     ixgbe_stop(void *);
  115 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
  116 static int      ixgbe_media_change(struct ifnet *);
  117 static void     ixgbe_identify_hardware(struct adapter *);
  118 static int      ixgbe_allocate_pci_resources(struct adapter *);
  119 static int      ixgbe_allocate_msix(struct adapter *);
  120 static int      ixgbe_allocate_legacy(struct adapter *);
  121 static int      ixgbe_allocate_queues(struct adapter *);
  122 static int      ixgbe_setup_msix(struct adapter *);
  123 static void     ixgbe_free_pci_resources(struct adapter *);
  124 static void     ixgbe_local_timer(void *);
  125 static int      ixgbe_setup_interface(device_t, struct adapter *);
  126 static void     ixgbe_config_link(struct adapter *);
  127 
  128 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
  129 static int      ixgbe_setup_transmit_structures(struct adapter *);
  130 static void     ixgbe_setup_transmit_ring(struct tx_ring *);
  131 static void     ixgbe_initialize_transmit_units(struct adapter *);
  132 static void     ixgbe_free_transmit_structures(struct adapter *);
  133 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
  134 
  135 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
  136 static int      ixgbe_setup_receive_structures(struct adapter *);
  137 static int      ixgbe_setup_receive_ring(struct rx_ring *);
  138 static void     ixgbe_initialize_receive_units(struct adapter *);
  139 static void     ixgbe_free_receive_structures(struct adapter *);
  140 static void     ixgbe_free_receive_buffers(struct rx_ring *);
  141 static void     ixgbe_setup_hw_rsc(struct rx_ring *);
  142 
  143 static void     ixgbe_enable_intr(struct adapter *);
  144 static void     ixgbe_disable_intr(struct adapter *);
  145 static void     ixgbe_update_stats_counters(struct adapter *);
  146 static bool     ixgbe_txeof(struct tx_ring *);
  147 static bool     ixgbe_rxeof(struct ix_queue *, int);
  148 static void     ixgbe_rx_checksum(u32, struct mbuf *, u32);
  149 static void     ixgbe_set_promisc(struct adapter *);
  150 static void     ixgbe_set_multi(struct adapter *);
  151 static void     ixgbe_update_link_status(struct adapter *);
  152 static void     ixgbe_refresh_mbufs(struct rx_ring *, int);
  153 static int      ixgbe_xmit(struct tx_ring *, struct mbuf **);
  154 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
  155 static int      ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
  156 static int      ixgbe_dma_malloc(struct adapter *, bus_size_t,
  157                     struct ixgbe_dma_alloc *, int);
  158 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
  159 static void     ixgbe_add_rx_process_limit(struct adapter *, const char *,
  160                     const char *, int *, int);
  161 static bool     ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
  162 static bool     ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
  163 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
  164 static void     ixgbe_configure_ivars(struct adapter *);
  165 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
  166 
  167 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
  168 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
  169 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
  170 
  171 static void     ixgbe_add_hw_stats(struct adapter *adapter);
  172 
  173 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
  174 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
  175                     struct mbuf *, u32);
  176 
  177 /* Support for pluggable optic modules */
  178 static bool     ixgbe_sfp_probe(struct adapter *);
  179 static void     ixgbe_setup_optics(struct adapter *);
  180 
  181 /* Legacy (single vector interrupt handler */
  182 static void     ixgbe_legacy_irq(void *);
  183 
  184 /* The MSI/X Interrupt handlers */
  185 static void     ixgbe_msix_que(void *);
  186 static void     ixgbe_msix_link(void *);
  187 
  188 /* Deferred interrupt tasklets */
  189 static void     ixgbe_handle_que(void *, int);
  190 static void     ixgbe_handle_link(void *, int);
  191 static void     ixgbe_handle_msf(void *, int);
  192 static void     ixgbe_handle_mod(void *, int);
  193 
  194 #ifdef IXGBE_FDIR
  195 static void     ixgbe_atr(struct tx_ring *, struct mbuf *);
  196 static void     ixgbe_reinit_fdir(void *, int);
  197 #endif
  198 
  199 /*********************************************************************
  200  *  FreeBSD Device Interface Entry Points
  201  *********************************************************************/
  202 
  203 static device_method_t ixgbe_methods[] = {
  204         /* Device interface */
  205         DEVMETHOD(device_probe, ixgbe_probe),
  206         DEVMETHOD(device_attach, ixgbe_attach),
  207         DEVMETHOD(device_detach, ixgbe_detach),
  208         DEVMETHOD(device_shutdown, ixgbe_shutdown),
  209         {0, 0}
  210 };
  211 
  212 static driver_t ixgbe_driver = {
  213         "ix", ixgbe_methods, sizeof(struct adapter),
  214 };
  215 
  216 devclass_t ixgbe_devclass;
  217 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
  218 
  219 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
  220 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
  221 
  222 /*
  223 ** TUNEABLE PARAMETERS:
  224 */
  225 
  226 /*
  227 ** AIM: Adaptive Interrupt Moderation
  228 ** which means that the interrupt rate
  229 ** is varied over time based on the
  230 ** traffic for that interrupt vector
  231 */
  232 static int ixgbe_enable_aim = TRUE;
  233 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
  234 
  235 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
  236 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
  237 
  238 /* How many packets rxeof tries to clean at a time */
  239 static int ixgbe_rx_process_limit = 128;
  240 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
  241 
  242 /* Flow control setting, default to full */
  243 static int ixgbe_flow_control = ixgbe_fc_full;
  244 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
  245 
  246 /*
  247 ** Smart speed setting, default to on
  248 ** this only works as a compile option
  249 ** right now as its during attach, set
  250 ** this to 'ixgbe_smart_speed_off' to
  251 ** disable.
  252 */
  253 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
  254 
  255 /*
  256  * MSIX should be the default for best performance,
  257  * but this allows it to be forced off for testing.
  258  */
  259 static int ixgbe_enable_msix = 1;
  260 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
  261 
  262 /*
  263  * Header split: this causes the hardware to DMA
  264  * the header into a separate mbuf from the payload,
  265  * it can be a performance win in some workloads, but
  266  * in others it actually hurts, its off by default. 
  267  */
  268 static bool ixgbe_header_split = FALSE;
  269 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
  270 
  271 /*
  272  * Number of Queues, can be set to 0,
  273  * it then autoconfigures based on the
  274  * number of cpus with a max of 8. This
  275  * can be overriden manually here.
  276  */
  277 static int ixgbe_num_queues = 0;
  278 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
  279 
  280 /*
  281 ** Number of TX descriptors per ring,
  282 ** setting higher than RX as this seems
  283 ** the better performing choice.
  284 */
  285 static int ixgbe_txd = PERFORM_TXD;
  286 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
  287 
  288 /* Number of RX descriptors per ring */
  289 static int ixgbe_rxd = PERFORM_RXD;
  290 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
  291 
  292 /* Keep running tab on them for sanity check */
  293 static int ixgbe_total_ports;
  294 
  295 #ifdef IXGBE_FDIR
  296 /*
  297 ** For Flow Director: this is the
  298 ** number of TX packets we sample
  299 ** for the filter pool, this means
  300 ** every 20th packet will be probed.
  301 **
  302 ** This feature can be disabled by 
  303 ** setting this to 0.
  304 */
  305 static int atr_sample_rate = 20;
  306 /* 
  307 ** Flow Director actually 'steals'
  308 ** part of the packet buffer as its
  309 ** filter pool, this variable controls
  310 ** how much it uses:
  311 **  0 = 64K, 1 = 128K, 2 = 256K
  312 */
  313 static int fdir_pballoc = 1;
  314 #endif
  315 
  316 /*********************************************************************
  317  *  Device identification routine
  318  *
  319  *  ixgbe_probe determines if the driver should be loaded on
  320  *  adapter based on PCI vendor/device id of the adapter.
  321  *
  322  *  return BUS_PROBE_DEFAULT on success, positive on failure
  323  *********************************************************************/
  324 
  325 static int
  326 ixgbe_probe(device_t dev)
  327 {
  328         ixgbe_vendor_info_t *ent;
  329 
  330         u16     pci_vendor_id = 0;
  331         u16     pci_device_id = 0;
  332         u16     pci_subvendor_id = 0;
  333         u16     pci_subdevice_id = 0;
  334         char    adapter_name[256];
  335 
  336         INIT_DEBUGOUT("ixgbe_probe: begin");
  337 
  338         pci_vendor_id = pci_get_vendor(dev);
  339         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
  340                 return (ENXIO);
  341 
  342         pci_device_id = pci_get_device(dev);
  343         pci_subvendor_id = pci_get_subvendor(dev);
  344         pci_subdevice_id = pci_get_subdevice(dev);
  345 
  346         ent = ixgbe_vendor_info_array;
  347         while (ent->vendor_id != 0) {
  348                 if ((pci_vendor_id == ent->vendor_id) &&
  349                     (pci_device_id == ent->device_id) &&
  350 
  351                     ((pci_subvendor_id == ent->subvendor_id) ||
  352                      (ent->subvendor_id == 0)) &&
  353 
  354                     ((pci_subdevice_id == ent->subdevice_id) ||
  355                      (ent->subdevice_id == 0))) {
  356                         sprintf(adapter_name, "%s, Version - %s",
  357                                 ixgbe_strings[ent->index],
  358                                 ixgbe_driver_version);
  359                         device_set_desc_copy(dev, adapter_name);
  360                         ++ixgbe_total_ports;
  361                         return (BUS_PROBE_DEFAULT);
  362                 }
  363                 ent++;
  364         }
  365         return (ENXIO);
  366 }
  367 
  368 /*********************************************************************
  369  *  Device initialization routine
  370  *
  371  *  The attach entry point is called when the driver is being loaded.
  372  *  This routine identifies the type of hardware, allocates all resources
  373  *  and initializes the hardware.
  374  *
  375  *  return 0 on success, positive on failure
  376  *********************************************************************/
  377 
  378 static int
  379 ixgbe_attach(device_t dev)
  380 {
  381         struct adapter *adapter;
  382         struct ixgbe_hw *hw;
  383         int             error = 0;
  384         u16             csum;
  385         u32             ctrl_ext;
  386 
  387         INIT_DEBUGOUT("ixgbe_attach: begin");
  388 
  389         if (resource_disabled("ixgbe", device_get_unit(dev))) {
  390                 device_printf(dev, "Disabled by device hint\n");
  391                 return (ENXIO);
  392         }
  393 
  394         /* Allocate, clear, and link in our adapter structure */
  395         adapter = device_get_softc(dev);
  396         adapter->dev = adapter->osdep.dev = dev;
  397         hw = &adapter->hw;
  398 
  399         /* Core Lock Init*/
  400         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
  401 
  402         /* SYSCTL APIs */
  403 
  404         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  405                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  406                         OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
  407                         adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
  408 
  409         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  410                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  411                         OID_AUTO, "advertise_gig", CTLTYPE_INT | CTLFLAG_RW,
  412                         adapter, 0, ixgbe_set_advertise, "I", "1G Link");
  413 
  414         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
  415                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  416                         OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
  417                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
  418 
  419         /* Set up the timer callout */
  420         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
  421 
  422         /* Determine hardware revision */
  423         ixgbe_identify_hardware(adapter);
  424 
  425         /* Do base PCI setup - map BAR0 */
  426         if (ixgbe_allocate_pci_resources(adapter)) {
  427                 device_printf(dev, "Allocation of PCI resources failed\n");
  428                 error = ENXIO;
  429                 goto err_out;
  430         }
  431 
  432         /* Do descriptor calc and sanity checks */
  433         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
  434             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
  435                 device_printf(dev, "TXD config issue, using default!\n");
  436                 adapter->num_tx_desc = DEFAULT_TXD;
  437         } else
  438                 adapter->num_tx_desc = ixgbe_txd;
  439 
  440         /*
  441         ** With many RX rings it is easy to exceed the
  442         ** system mbuf allocation. Tuning nmbclusters
  443         ** can alleviate this.
  444         */
  445         if (nmbclusters > 0 ) {
  446                 int s;
  447                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
  448                 if (s > nmbclusters) {
  449                         device_printf(dev, "RX Descriptors exceed "
  450                             "system mbuf max, using default instead!\n");
  451                         ixgbe_rxd = DEFAULT_RXD;
  452                 }
  453         }
  454 
  455         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
  456             ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
  457                 device_printf(dev, "RXD config issue, using default!\n");
  458                 adapter->num_rx_desc = DEFAULT_RXD;
  459         } else
  460                 adapter->num_rx_desc = ixgbe_rxd;
  461 
  462         /* Allocate our TX/RX Queues */
  463         if (ixgbe_allocate_queues(adapter)) {
  464                 error = ENOMEM;
  465                 goto err_out;
  466         }
  467 
  468         /* Allocate multicast array memory. */
  469         adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
  470             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
  471         if (adapter->mta == NULL) {
  472                 device_printf(dev, "Can not allocate multicast setup array\n");
  473                 error = ENOMEM;
  474                 goto err_late;
  475         }
  476 
  477         /* Initialize the shared code */
  478         error = ixgbe_init_shared_code(hw);
  479         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
  480                 /*
  481                 ** No optics in this port, set up
  482                 ** so the timer routine will probe 
  483                 ** for later insertion.
  484                 */
  485                 adapter->sfp_probe = TRUE;
  486                 error = 0;
  487         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  488                 device_printf(dev,"Unsupported SFP+ module detected!\n");
  489                 error = EIO;
  490                 goto err_late;
  491         } else if (error) {
  492                 device_printf(dev,"Unable to initialize the shared code\n");
  493                 error = EIO;
  494                 goto err_late;
  495         }
  496 
  497         /* Make sure we have a good EEPROM before we read from it */
  498         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
  499                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
  500                 error = EIO;
  501                 goto err_late;
  502         }
  503 
  504         /* Get Hardware Flow Control setting */
  505         hw->fc.requested_mode = ixgbe_fc_full;
  506         hw->fc.pause_time = IXGBE_FC_PAUSE;
  507         hw->fc.low_water = IXGBE_FC_LO;
  508         hw->fc.high_water = IXGBE_FC_HI;
  509         hw->fc.send_xon = TRUE;
  510 
  511         error = ixgbe_init_hw(hw);
  512         if (error == IXGBE_ERR_EEPROM_VERSION) {
  513                 device_printf(dev, "This device is a pre-production adapter/"
  514                     "LOM.  Please be aware there may be issues associated "
  515                     "with your hardware.\n If you are experiencing problems "
  516                     "please contact your Intel or hardware representative "
  517                     "who provided you with this hardware.\n");
  518         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
  519                 device_printf(dev,"Unsupported SFP+ Module\n");
  520 
  521         if (error) {
  522                 error = EIO;
  523                 device_printf(dev,"Hardware Initialization Failure\n");
  524                 goto err_late;
  525         }
  526 
  527         /* Detect and set physical type */
  528         ixgbe_setup_optics(adapter);
  529 
  530         if ((adapter->msix > 1) && (ixgbe_enable_msix))
  531                 error = ixgbe_allocate_msix(adapter); 
  532         else
  533                 error = ixgbe_allocate_legacy(adapter); 
  534         if (error) 
  535                 goto err_late;
  536 
  537         /* Setup OS specific network interface */
  538         if (ixgbe_setup_interface(dev, adapter) != 0)
  539                 goto err_late;
  540 
  541         /* Sysctl for limiting the amount of work done in the taskqueue */
  542         ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
  543             "max number of rx packets to process", &adapter->rx_process_limit,
  544             ixgbe_rx_process_limit);
  545 
  546         /* Initialize statistics */
  547         ixgbe_update_stats_counters(adapter);
  548 
  549         /* Register for VLAN events */
  550         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
  551             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
  552         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
  553             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
  554 
  555         /* Print PCIE bus type/speed/width info */
  556         ixgbe_get_bus_info(hw);
  557         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
  558             ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
  559             (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
  560             (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
  561             (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
  562             (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
  563             ("Unknown"));
  564 
  565         if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
  566             (hw->bus.speed == ixgbe_bus_speed_2500)) {
  567                 device_printf(dev, "PCI-Express bandwidth available"
  568                     " for this card\n     is not sufficient for"
  569                     " optimal performance.\n");
  570                 device_printf(dev, "For optimal performance a x8 "
  571                     "PCIE, or x4 PCIE 2 slot is required.\n");
  572         }
  573 
  574         /* let hardware know driver is loaded */
  575         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  576         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
  577         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
  578 
  579         ixgbe_add_hw_stats(adapter);
  580 
  581         INIT_DEBUGOUT("ixgbe_attach: end");
  582         return (0);
  583 err_late:
  584         ixgbe_free_transmit_structures(adapter);
  585         ixgbe_free_receive_structures(adapter);
  586 err_out:
  587         if (adapter->ifp != NULL)
  588                 if_free(adapter->ifp);
  589         ixgbe_free_pci_resources(adapter);
  590         free(adapter->mta, M_DEVBUF);
  591         return (error);
  592 
  593 }
  594 
  595 /*********************************************************************
  596  *  Device removal routine
  597  *
  598  *  The detach entry point is called when the driver is being removed.
  599  *  This routine stops the adapter and deallocates all the resources
  600  *  that were allocated for driver operation.
  601  *
  602  *  return 0 on success, positive on failure
  603  *********************************************************************/
  604 
  605 static int
  606 ixgbe_detach(device_t dev)
  607 {
  608         struct adapter *adapter = device_get_softc(dev);
  609         struct ix_queue *que = adapter->queues;
  610         u32     ctrl_ext;
  611 
  612         INIT_DEBUGOUT("ixgbe_detach: begin");
  613 
  614         /* Make sure VLANS are not using driver */
  615         if (adapter->ifp->if_vlantrunk != NULL) {
  616                 device_printf(dev,"Vlan in use, detach first\n");
  617                 return (EBUSY);
  618         }
  619 
  620         IXGBE_CORE_LOCK(adapter);
  621         ixgbe_stop(adapter);
  622         IXGBE_CORE_UNLOCK(adapter);
  623 
  624         for (int i = 0; i < adapter->num_queues; i++, que++) {
  625                 if (que->tq) {
  626                         taskqueue_drain(que->tq, &que->que_task);
  627                         taskqueue_free(que->tq);
  628                 }
  629         }
  630 
  631         /* Drain the Link queue */
  632         if (adapter->tq) {
  633                 taskqueue_drain(adapter->tq, &adapter->link_task);
  634                 taskqueue_drain(adapter->tq, &adapter->mod_task);
  635                 taskqueue_drain(adapter->tq, &adapter->msf_task);
  636 #ifdef IXGBE_FDIR
  637                 taskqueue_drain(adapter->tq, &adapter->fdir_task);
  638 #endif
  639                 taskqueue_free(adapter->tq);
  640         }
  641 
  642         /* let hardware know driver is unloading */
  643         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  644         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
  645         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
  646 
  647         /* Unregister VLAN events */
  648         if (adapter->vlan_attach != NULL)
  649                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
  650         if (adapter->vlan_detach != NULL)
  651                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
  652 
  653         ether_ifdetach(adapter->ifp);
  654         callout_drain(&adapter->timer);
  655         ixgbe_free_pci_resources(adapter);
  656         bus_generic_detach(dev);
  657         if_free(adapter->ifp);
  658 
  659         ixgbe_free_transmit_structures(adapter);
  660         ixgbe_free_receive_structures(adapter);
  661         free(adapter->mta, M_DEVBUF);
  662 
  663         IXGBE_CORE_LOCK_DESTROY(adapter);
  664         return (0);
  665 }
  666 
  667 /*********************************************************************
  668  *
  669  *  Shutdown entry point
  670  *
  671  **********************************************************************/
  672 
  673 static int
  674 ixgbe_shutdown(device_t dev)
  675 {
  676         struct adapter *adapter = device_get_softc(dev);
  677         IXGBE_CORE_LOCK(adapter);
  678         ixgbe_stop(adapter);
  679         IXGBE_CORE_UNLOCK(adapter);
  680         return (0);
  681 }
  682 
  683 
  684 /*********************************************************************
  685  *  Transmit entry point
  686  *
  687  *  ixgbe_start is called by the stack to initiate a transmit.
  688  *  The driver will remain in this routine as long as there are
  689  *  packets to transmit and transmit resources are available.
  690  *  In case resources are not available stack is notified and
  691  *  the packet is requeued.
  692  **********************************************************************/
  693 
  694 static void
  695 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
  696 {
  697         struct mbuf    *m_head;
  698         struct adapter *adapter = txr->adapter;
  699 
  700         IXGBE_TX_LOCK_ASSERT(txr);
  701 
  702         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
  703             IFF_DRV_RUNNING)
  704                 return;
  705         if (!adapter->link_active)
  706                 return;
  707 
  708         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
  709 
  710                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
  711                 if (m_head == NULL)
  712                         break;
  713 
  714                 if (ixgbe_xmit(txr, &m_head)) {
  715                         if (m_head == NULL)
  716                                 break;
  717                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  718                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
  719                         break;
  720                 }
  721                 /* Send a copy of the frame to the BPF listener */
  722                 ETHER_BPF_MTAP(ifp, m_head);
  723 
  724                 /* Set watchdog on */
  725                 txr->watchdog_time = ticks;
  726                 txr->queue_status = IXGBE_QUEUE_WORKING;
  727 
  728         }
  729         return;
  730 }
  731 
  732 /*
  733  * Legacy TX start - called by the stack, this
  734  * always uses the first tx ring, and should
  735  * not be used with multiqueue tx enabled.
  736  */
  737 static void
  738 ixgbe_start(struct ifnet *ifp)
  739 {
  740         struct adapter *adapter = ifp->if_softc;
  741         struct tx_ring  *txr = adapter->tx_rings;
  742 
  743         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  744                 IXGBE_TX_LOCK(txr);
  745                 ixgbe_start_locked(txr, ifp);
  746                 IXGBE_TX_UNLOCK(txr);
  747         }
  748         return;
  749 }
  750 
  751 #if __FreeBSD_version >= 800000
  752 /*
  753 ** Multiqueue Transmit driver
  754 **
  755 */
  756 static int
  757 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
  758 {
  759         struct adapter  *adapter = ifp->if_softc;
  760         struct ix_queue *que;
  761         struct tx_ring  *txr;
  762         int             i = 0, err = 0;
  763 
  764         /* Which queue to use */
  765         if ((m->m_flags & M_FLOWID) != 0)
  766                 i = m->m_pkthdr.flowid % adapter->num_queues;
  767 
  768         txr = &adapter->tx_rings[i];
  769         que = &adapter->queues[i];
  770 
  771         if (IXGBE_TX_TRYLOCK(txr)) {
  772                 err = ixgbe_mq_start_locked(ifp, txr, m);
  773                 IXGBE_TX_UNLOCK(txr);
  774         } else {
  775                 err = drbr_enqueue(ifp, txr->br, m);
  776                 taskqueue_enqueue(que->tq, &que->que_task);
  777         }
  778 
  779         return (err);
  780 }
  781 
  782 static int
  783 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
  784 {
  785         struct adapter  *adapter = txr->adapter;
  786         struct mbuf     *next;
  787         int             enqueued, err = 0;
  788 
  789         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
  790             IFF_DRV_RUNNING || adapter->link_active == 0) {
  791                 if (m != NULL)
  792                         err = drbr_enqueue(ifp, txr->br, m);
  793                 return (err);
  794         }
  795 
  796         enqueued = 0;
  797         if (m == NULL) {
  798                 next = drbr_dequeue(ifp, txr->br);
  799         } else if (drbr_needs_enqueue(ifp, txr->br)) {
  800                 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
  801                         return (err);
  802                 next = drbr_dequeue(ifp, txr->br);
  803         } else
  804                 next = m;
  805 
  806         /* Process the queue */
  807         while (next != NULL) {
  808                 if ((err = ixgbe_xmit(txr, &next)) != 0) {
  809                         if (next != NULL)
  810                                 err = drbr_enqueue(ifp, txr->br, next);
  811                         break;
  812                 }
  813                 enqueued++;
  814                 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
  815                 /* Send a copy of the frame to the BPF listener */
  816                 ETHER_BPF_MTAP(ifp, next);
  817                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  818                         break;
  819                 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
  820                         ixgbe_txeof(txr);
  821                 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
  822                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  823                         break;
  824                 }
  825                 next = drbr_dequeue(ifp, txr->br);
  826         }
  827 
  828         if (enqueued > 0) {
  829                 /* Set watchdog on */
  830                 txr->queue_status = IXGBE_QUEUE_WORKING;
  831                 txr->watchdog_time = ticks;
  832         }
  833 
  834         return (err);
  835 }
  836 
  837 /*
  838 ** Flush all ring buffers
  839 */
  840 static void
  841 ixgbe_qflush(struct ifnet *ifp)
  842 {
  843         struct adapter  *adapter = ifp->if_softc;
  844         struct tx_ring  *txr = adapter->tx_rings;
  845         struct mbuf     *m;
  846 
  847         for (int i = 0; i < adapter->num_queues; i++, txr++) {
  848                 IXGBE_TX_LOCK(txr);
  849                 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
  850                         m_freem(m);
  851                 IXGBE_TX_UNLOCK(txr);
  852         }
  853         if_qflush(ifp);
  854 }
  855 #endif /* __FreeBSD_version >= 800000 */
  856 
  857 /*********************************************************************
  858  *  Ioctl entry point
  859  *
  860  *  ixgbe_ioctl is called when the user wants to configure the
  861  *  interface.
  862  *
  863  *  return 0 on success, positive on failure
  864  **********************************************************************/
  865 
  866 static int
  867 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
  868 {
  869         struct adapter  *adapter = ifp->if_softc;
  870         struct ifreq    *ifr = (struct ifreq *) data;
  871 #if defined(INET) || defined(INET6)
  872         struct ifaddr *ifa = (struct ifaddr *)data;
  873         bool            avoid_reset = FALSE;
  874 #endif
  875         int             error = 0;
  876 
  877         switch (command) {
  878 
  879         case SIOCSIFADDR:
  880 #ifdef INET
  881                 if (ifa->ifa_addr->sa_family == AF_INET)
  882                         avoid_reset = TRUE;
  883 #endif
  884 #ifdef INET6
  885                 if (ifa->ifa_addr->sa_family == AF_INET6)
  886                         avoid_reset = TRUE;
  887 #endif
  888 #if defined(INET) || defined(INET6)
  889                 /*
  890                 ** Calling init results in link renegotiation,
  891                 ** so we avoid doing it when possible.
  892                 */
  893                 if (avoid_reset) {
  894                         ifp->if_flags |= IFF_UP;
  895                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
  896                                 ixgbe_init(adapter);
  897                         if (!(ifp->if_flags & IFF_NOARP))
  898                                 arp_ifinit(ifp, ifa);
  899                 } else
  900                         error = ether_ioctl(ifp, command, data);
  901                 break;
  902 #endif
  903         case SIOCSIFMTU:
  904                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
  905                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
  906                         error = EINVAL;
  907                 } else {
  908                         IXGBE_CORE_LOCK(adapter);
  909                         ifp->if_mtu = ifr->ifr_mtu;
  910                         adapter->max_frame_size =
  911                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
  912                         ixgbe_init_locked(adapter);
  913                         IXGBE_CORE_UNLOCK(adapter);
  914                 }
  915                 break;
  916         case SIOCSIFFLAGS:
  917                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
  918                 IXGBE_CORE_LOCK(adapter);
  919                 if (ifp->if_flags & IFF_UP) {
  920                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  921                                 if ((ifp->if_flags ^ adapter->if_flags) &
  922                                     (IFF_PROMISC | IFF_ALLMULTI)) {
  923                                         ixgbe_set_promisc(adapter);
  924                                 }
  925                         } else
  926                                 ixgbe_init_locked(adapter);
  927                 } else
  928                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  929                                 ixgbe_stop(adapter);
  930                 adapter->if_flags = ifp->if_flags;
  931                 IXGBE_CORE_UNLOCK(adapter);
  932                 break;
  933         case SIOCADDMULTI:
  934         case SIOCDELMULTI:
  935                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
  936                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  937                         IXGBE_CORE_LOCK(adapter);
  938                         ixgbe_disable_intr(adapter);
  939                         ixgbe_set_multi(adapter);
  940                         ixgbe_enable_intr(adapter);
  941                         IXGBE_CORE_UNLOCK(adapter);
  942                 }
  943                 break;
  944         case SIOCSIFMEDIA:
  945         case SIOCGIFMEDIA:
  946                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
  947                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
  948                 break;
  949         case SIOCSIFCAP:
  950         {
  951                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
  952                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
  953                 if (mask & IFCAP_HWCSUM)
  954                         ifp->if_capenable ^= IFCAP_HWCSUM;
  955                 if (mask & IFCAP_TSO4)
  956                         ifp->if_capenable ^= IFCAP_TSO4;
  957                 if (mask & IFCAP_LRO)
  958                         ifp->if_capenable ^= IFCAP_LRO;
  959                 if (mask & IFCAP_VLAN_HWTAGGING)
  960                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
  961                 if (mask & IFCAP_VLAN_HWFILTER)
  962                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
  963                 if (mask & IFCAP_VLAN_HWTSO)
  964                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
  965                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  966                         IXGBE_CORE_LOCK(adapter);
  967                         ixgbe_init_locked(adapter);
  968                         IXGBE_CORE_UNLOCK(adapter);
  969                 }
  970                 VLAN_CAPABILITIES(ifp);
  971                 break;
  972         }
  973 
  974         default:
  975                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
  976                 error = ether_ioctl(ifp, command, data);
  977                 break;
  978         }
  979 
  980         return (error);
  981 }
  982 
  983 /*********************************************************************
  984  *  Init entry point
  985  *
  986  *  This routine is used in two ways. It is used by the stack as
  987  *  init entry point in network interface structure. It is also used
  988  *  by the driver as a hw/sw initialization routine to get to a
  989  *  consistent state.
  990  *
  991  *  return 0 on success, positive on failure
  992  **********************************************************************/
  993 #define IXGBE_MHADD_MFS_SHIFT 16
  994 
  995 static void
  996 ixgbe_init_locked(struct adapter *adapter)
  997 {
  998         struct ifnet   *ifp = adapter->ifp;
  999         device_t        dev = adapter->dev;
 1000         struct ixgbe_hw *hw = &adapter->hw;
 1001         u32             k, txdctl, mhadd, gpie;
 1002         u32             rxdctl, rxctrl;
 1003 
 1004         mtx_assert(&adapter->core_mtx, MA_OWNED);
 1005         INIT_DEBUGOUT("ixgbe_init: begin");
 1006         hw->adapter_stopped = FALSE;
 1007         ixgbe_stop_adapter(hw);
 1008         callout_stop(&adapter->timer);
 1009 
 1010         /* reprogram the RAR[0] in case user changed it. */
 1011         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
 1012 
 1013         /* Get the latest mac address, User can use a LAA */
 1014         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
 1015               IXGBE_ETH_LENGTH_OF_ADDRESS);
 1016         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
 1017         hw->addr_ctrl.rar_used_count = 1;
 1018 
 1019         /* Set the various hardware offload abilities */
 1020         ifp->if_hwassist = 0;
 1021         if (ifp->if_capenable & IFCAP_TSO4)
 1022                 ifp->if_hwassist |= CSUM_TSO;
 1023         if (ifp->if_capenable & IFCAP_TXCSUM) {
 1024                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
 1025 #if __FreeBSD_version >= 800000
 1026                 if (hw->mac.type != ixgbe_mac_82598EB)
 1027                         ifp->if_hwassist |= CSUM_SCTP;
 1028 #endif
 1029         }
 1030 
 1031         /* Prepare transmit descriptors and buffers */
 1032         if (ixgbe_setup_transmit_structures(adapter)) {
 1033                 device_printf(dev,"Could not setup transmit structures\n");
 1034                 ixgbe_stop(adapter);
 1035                 return;
 1036         }
 1037 
 1038         ixgbe_init_hw(hw);
 1039         ixgbe_initialize_transmit_units(adapter);
 1040 
 1041         /* Setup Multicast table */
 1042         ixgbe_set_multi(adapter);
 1043 
 1044         /*
 1045         ** Determine the correct mbuf pool
 1046         ** for doing jumbo/headersplit
 1047         */
 1048         if (adapter->max_frame_size <= 2048)
 1049                 adapter->rx_mbuf_sz = MCLBYTES;
 1050         else if (adapter->max_frame_size <= 4096)
 1051                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
 1052         else if (adapter->max_frame_size <= 9216)
 1053                 adapter->rx_mbuf_sz = MJUM9BYTES;
 1054         else
 1055                 adapter->rx_mbuf_sz = MJUM16BYTES;
 1056 
 1057         /* Prepare receive descriptors and buffers */
 1058         if (ixgbe_setup_receive_structures(adapter)) {
 1059                 device_printf(dev,"Could not setup receive structures\n");
 1060                 ixgbe_stop(adapter);
 1061                 return;
 1062         }
 1063 
 1064         /* Configure RX settings */
 1065         ixgbe_initialize_receive_units(adapter);
 1066 
 1067         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
 1068 
 1069         /* Enable Fan Failure Interrupt */
 1070         gpie |= IXGBE_SDP1_GPIEN;
 1071 
 1072         /* Add for Thermal detection */
 1073         if (hw->mac.type == ixgbe_mac_82599EB)
 1074                 gpie |= IXGBE_SDP2_GPIEN;
 1075 
 1076         if (adapter->msix > 1) {
 1077                 /* Enable Enhanced MSIX mode */
 1078                 gpie |= IXGBE_GPIE_MSIX_MODE;
 1079                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
 1080                     IXGBE_GPIE_OCD;
 1081         }
 1082         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 1083 
 1084         /* Set MTU size */
 1085         if (ifp->if_mtu > ETHERMTU) {
 1086                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
 1087                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
 1088                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
 1089                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
 1090         }
 1091         
 1092         /* Now enable all the queues */
 1093 
 1094         for (int i = 0; i < adapter->num_queues; i++) {
 1095                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
 1096                 txdctl |= IXGBE_TXDCTL_ENABLE;
 1097                 /* Set WTHRESH to 8, burst writeback */
 1098                 txdctl |= (8 << 16);
 1099                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
 1100         }
 1101 
 1102         for (int i = 0; i < adapter->num_queues; i++) {
 1103                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
 1104                 if (hw->mac.type == ixgbe_mac_82598EB) {
 1105                         /*
 1106                         ** PTHRESH = 21
 1107                         ** HTHRESH = 4
 1108                         ** WTHRESH = 8
 1109                         */
 1110                         rxdctl &= ~0x3FFFFF;
 1111                         rxdctl |= 0x080420;
 1112                 }
 1113                 rxdctl |= IXGBE_RXDCTL_ENABLE;
 1114                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
 1115                 for (k = 0; k < 10; k++) {
 1116                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
 1117                             IXGBE_RXDCTL_ENABLE)
 1118                                 break;
 1119                         else
 1120                                 msec_delay(1);
 1121                 }
 1122                 wmb();
 1123                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
 1124         }
 1125 
 1126         /* Set up VLAN support and filter */
 1127         ixgbe_setup_vlan_hw_support(adapter);
 1128 
 1129         /* Enable Receive engine */
 1130         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 1131         if (hw->mac.type == ixgbe_mac_82598EB)
 1132                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
 1133         rxctrl |= IXGBE_RXCTRL_RXEN;
 1134         ixgbe_enable_rx_dma(hw, rxctrl);
 1135 
 1136         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
 1137 
 1138         /* Set up MSI/X routing */
 1139         if (ixgbe_enable_msix)  {
 1140                 ixgbe_configure_ivars(adapter);
 1141                 /* Set up auto-mask */
 1142                 if (hw->mac.type == ixgbe_mac_82598EB)
 1143                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
 1144                 else {
 1145                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
 1146                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
 1147                 }
 1148         } else {  /* Simple settings for Legacy/MSI */
 1149                 ixgbe_set_ivar(adapter, 0, 0, 0);
 1150                 ixgbe_set_ivar(adapter, 0, 0, 1);
 1151                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
 1152         }
 1153 
 1154 #ifdef IXGBE_FDIR
 1155         /* Init Flow director */
 1156         if (hw->mac.type != ixgbe_mac_82598EB)
 1157                 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
 1158 #endif
 1159 
 1160         /*
 1161         ** Check on any SFP devices that
 1162         ** need to be kick-started
 1163         */
 1164         if (hw->phy.type == ixgbe_phy_none) {
 1165                 int err = hw->phy.ops.identify(hw);
 1166                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 1167                         device_printf(dev,
 1168                             "Unsupported SFP+ module type was detected.\n");
 1169                         return;
 1170                 }
 1171         }
 1172 
 1173         /* Set moderation on the Link interrupt */
 1174         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
 1175 
 1176         /* Config/Enable Link */
 1177         ixgbe_config_link(adapter);
 1178 
 1179         /* And now turn on interrupts */
 1180         ixgbe_enable_intr(adapter);
 1181 
 1182         /* Now inform the stack we're ready */
 1183         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1184         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1185 
 1186         return;
 1187 }
 1188 
 1189 static void
 1190 ixgbe_init(void *arg)
 1191 {
 1192         struct adapter *adapter = arg;
 1193 
 1194         IXGBE_CORE_LOCK(adapter);
 1195         ixgbe_init_locked(adapter);
 1196         IXGBE_CORE_UNLOCK(adapter);
 1197         return;
 1198 }
 1199 
 1200 
 1201 /*
 1202 **
 1203 ** MSIX Interrupt Handlers and Tasklets
 1204 **
 1205 */
 1206 
 1207 static inline void
 1208 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
 1209 {
 1210         struct ixgbe_hw *hw = &adapter->hw;
 1211         u64     queue = (u64)(1 << vector);
 1212         u32     mask;
 1213 
 1214         if (hw->mac.type == ixgbe_mac_82598EB) {
 1215                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
 1216                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
 1217         } else {
 1218                 mask = (queue & 0xFFFFFFFF);
 1219                 if (mask)
 1220                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
 1221                 mask = (queue >> 32);
 1222                 if (mask)
 1223                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
 1224         }
 1225 }
 1226 
 1227 static inline void
 1228 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
 1229 {
 1230         struct ixgbe_hw *hw = &adapter->hw;
 1231         u64     queue = (u64)(1 << vector);
 1232         u32     mask;
 1233 
 1234         if (hw->mac.type == ixgbe_mac_82598EB) {
 1235                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
 1236                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
 1237         } else {
 1238                 mask = (queue & 0xFFFFFFFF);
 1239                 if (mask)
 1240                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
 1241                 mask = (queue >> 32);
 1242                 if (mask)
 1243                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
 1244         }
 1245 }
 1246 
 1247 static inline void
 1248 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
 1249 {
 1250         u32 mask;
 1251 
 1252         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 1253                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
 1254                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
 1255         } else {
 1256                 mask = (queues & 0xFFFFFFFF);
 1257                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
 1258                 mask = (queues >> 32);
 1259                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
 1260         }
 1261 }
 1262 
 1263 
 1264 static void
 1265 ixgbe_handle_que(void *context, int pending)
 1266 {
 1267         struct ix_queue *que = context;
 1268         struct adapter  *adapter = que->adapter;
 1269         struct tx_ring  *txr = que->txr;
 1270         struct ifnet    *ifp = adapter->ifp;
 1271         bool            more;
 1272 
 1273         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1274                 more = ixgbe_rxeof(que, adapter->rx_process_limit);
 1275                 IXGBE_TX_LOCK(txr);
 1276                 ixgbe_txeof(txr);
 1277 #if __FreeBSD_version >= 800000
 1278                 if (!drbr_empty(ifp, txr->br))
 1279                         ixgbe_mq_start_locked(ifp, txr, NULL);
 1280 #else
 1281                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1282                         ixgbe_start_locked(txr, ifp);
 1283 #endif
 1284                 IXGBE_TX_UNLOCK(txr);
 1285                 if (more) {
 1286                         taskqueue_enqueue(que->tq, &que->que_task);
 1287                         return;
 1288                 }
 1289         }
 1290 
 1291         /* Reenable this interrupt */
 1292         ixgbe_enable_queue(adapter, que->msix);
 1293         return;
 1294 }
 1295 
 1296 
 1297 /*********************************************************************
 1298  *
 1299  *  Legacy Interrupt Service routine
 1300  *
 1301  **********************************************************************/
 1302 
 1303 static void
 1304 ixgbe_legacy_irq(void *arg)
 1305 {
 1306         struct ix_queue *que = arg;
 1307         struct adapter  *adapter = que->adapter;
 1308         struct ixgbe_hw *hw = &adapter->hw;
 1309         struct          tx_ring *txr = adapter->tx_rings;
 1310         bool            more_tx, more_rx;
 1311         u32             reg_eicr, loop = MAX_LOOP;
 1312 
 1313 
 1314         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 1315 
 1316         ++que->irqs;
 1317         if (reg_eicr == 0) {
 1318                 ixgbe_enable_intr(adapter);
 1319                 return;
 1320         }
 1321 
 1322         more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
 1323 
 1324         IXGBE_TX_LOCK(txr);
 1325         do {
 1326                 more_tx = ixgbe_txeof(txr);
 1327         } while (loop-- && more_tx);
 1328         IXGBE_TX_UNLOCK(txr);
 1329 
 1330         if (more_rx || more_tx)
 1331                 taskqueue_enqueue(que->tq, &que->que_task);
 1332 
 1333         /* Check for fan failure */
 1334         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
 1335             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
 1336                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
 1337                     "REPLACE IMMEDIATELY!!\n");
 1338                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
 1339         }
 1340 
 1341         /* Link status change */
 1342         if (reg_eicr & IXGBE_EICR_LSC)
 1343                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
 1344 
 1345         ixgbe_enable_intr(adapter);
 1346         return;
 1347 }
 1348 
 1349 
 1350 /*********************************************************************
 1351  *
 1352  *  MSIX Queue Interrupt Service routine
 1353  *
 1354  **********************************************************************/
 1355 void
 1356 ixgbe_msix_que(void *arg)
 1357 {
 1358         struct ix_queue *que = arg;
 1359         struct adapter  *adapter = que->adapter;
 1360         struct tx_ring  *txr = que->txr;
 1361         struct rx_ring  *rxr = que->rxr;
 1362         bool            more_tx, more_rx;
 1363         u32             newitr = 0;
 1364 
 1365         ++que->irqs;
 1366 
 1367         more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
 1368 
 1369         IXGBE_TX_LOCK(txr);
 1370         more_tx = ixgbe_txeof(txr);
 1371         /*
 1372         ** Make certain that if the stack 
 1373         ** has anything queued the task gets
 1374         ** scheduled to handle it.
 1375         */
 1376 #if __FreeBSD_version < 800000
 1377         if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
 1378 #else
 1379         if (!drbr_empty(adapter->ifp, txr->br))
 1380 #endif
 1381                 more_tx = 1;
 1382         IXGBE_TX_UNLOCK(txr);
 1383 
 1384         /* Do AIM now? */
 1385 
 1386         if (ixgbe_enable_aim == FALSE)
 1387                 goto no_calc;
 1388         /*
 1389         ** Do Adaptive Interrupt Moderation:
 1390         **  - Write out last calculated setting
 1391         **  - Calculate based on average size over
 1392         **    the last interval.
 1393         */
 1394         if (que->eitr_setting)
 1395                 IXGBE_WRITE_REG(&adapter->hw,
 1396                     IXGBE_EITR(que->msix), que->eitr_setting);
 1397  
 1398         que->eitr_setting = 0;
 1399 
 1400         /* Idle, do nothing */
 1401         if ((txr->bytes == 0) && (rxr->bytes == 0))
 1402                 goto no_calc;
 1403                                 
 1404         if ((txr->bytes) && (txr->packets))
 1405                 newitr = txr->bytes/txr->packets;
 1406         if ((rxr->bytes) && (rxr->packets))
 1407                 newitr = max(newitr,
 1408                     (rxr->bytes / rxr->packets));
 1409         newitr += 24; /* account for hardware frame, crc */
 1410 
 1411         /* set an upper boundary */
 1412         newitr = min(newitr, 3000);
 1413 
 1414         /* Be nice to the mid range */
 1415         if ((newitr > 300) && (newitr < 1200))
 1416                 newitr = (newitr / 3);
 1417         else
 1418                 newitr = (newitr / 2);
 1419 
 1420         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 1421                 newitr |= newitr << 16;
 1422         else
 1423                 newitr |= IXGBE_EITR_CNT_WDIS;
 1424                  
 1425         /* save for next interrupt */
 1426         que->eitr_setting = newitr;
 1427 
 1428         /* Reset state */
 1429         txr->bytes = 0;
 1430         txr->packets = 0;
 1431         rxr->bytes = 0;
 1432         rxr->packets = 0;
 1433 
 1434 no_calc:
 1435         if (more_tx || more_rx)
 1436                 taskqueue_enqueue(que->tq, &que->que_task);
 1437         else /* Reenable this interrupt */
 1438                 ixgbe_enable_queue(adapter, que->msix);
 1439         return;
 1440 }
 1441 
 1442 
 1443 static void
 1444 ixgbe_msix_link(void *arg)
 1445 {
 1446         struct adapter  *adapter = arg;
 1447         struct ixgbe_hw *hw = &adapter->hw;
 1448         u32             reg_eicr;
 1449 
 1450         ++adapter->link_irq;
 1451 
 1452         /* First get the cause */
 1453         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
 1454         /* Clear interrupt with write */
 1455         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
 1456 
 1457         /* Link status change */
 1458         if (reg_eicr & IXGBE_EICR_LSC)
 1459                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
 1460 
 1461         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
 1462 #ifdef IXGBE_FDIR
 1463                 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
 1464                         /* This is probably overkill :) */
 1465                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
 1466                                 return;
 1467                         /* Clear the interrupt */
 1468                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
 1469                         /* Turn off the interface */
 1470                         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1471                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
 1472                 } else
 1473 #endif
 1474                 if (reg_eicr & IXGBE_EICR_ECC) {
 1475                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
 1476                             "Please Reboot!!\n");
 1477                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
 1478                 } else
 1479 
 1480                 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
 1481                         /* Clear the interrupt */
 1482                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 1483                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
 1484                 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
 1485                         /* Clear the interrupt */
 1486                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
 1487                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
 1488                 }
 1489         } 
 1490 
 1491         /* Check for fan failure */
 1492         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
 1493             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
 1494                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
 1495                     "REPLACE IMMEDIATELY!!\n");
 1496                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 1497         }
 1498 
 1499         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 1500         return;
 1501 }
 1502 
 1503 /*********************************************************************
 1504  *
 1505  *  Media Ioctl callback
 1506  *
 1507  *  This routine is called whenever the user queries the status of
 1508  *  the interface using ifconfig.
 1509  *
 1510  **********************************************************************/
 1511 static void
 1512 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
 1513 {
 1514         struct adapter *adapter = ifp->if_softc;
 1515 
 1516         INIT_DEBUGOUT("ixgbe_media_status: begin");
 1517         IXGBE_CORE_LOCK(adapter);
 1518         ixgbe_update_link_status(adapter);
 1519 
 1520         ifmr->ifm_status = IFM_AVALID;
 1521         ifmr->ifm_active = IFM_ETHER;
 1522 
 1523         if (!adapter->link_active) {
 1524                 IXGBE_CORE_UNLOCK(adapter);
 1525                 return;
 1526         }
 1527 
 1528         ifmr->ifm_status |= IFM_ACTIVE;
 1529 
 1530         switch (adapter->link_speed) {
 1531                 case IXGBE_LINK_SPEED_1GB_FULL:
 1532                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
 1533                         break;
 1534                 case IXGBE_LINK_SPEED_10GB_FULL:
 1535                         ifmr->ifm_active |= adapter->optics | IFM_FDX;
 1536                         break;
 1537         }
 1538 
 1539         IXGBE_CORE_UNLOCK(adapter);
 1540 
 1541         return;
 1542 }
 1543 
 1544 /*********************************************************************
 1545  *
 1546  *  Media Ioctl callback
 1547  *
 1548  *  This routine is called when the user changes speed/duplex using
 1549  *  media/mediopt option with ifconfig.
 1550  *
 1551  **********************************************************************/
 1552 static int
 1553 ixgbe_media_change(struct ifnet * ifp)
 1554 {
 1555         struct adapter *adapter = ifp->if_softc;
 1556         struct ifmedia *ifm = &adapter->media;
 1557 
 1558         INIT_DEBUGOUT("ixgbe_media_change: begin");
 1559 
 1560         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1561                 return (EINVAL);
 1562 
 1563         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 1564         case IFM_AUTO:
 1565                 adapter->hw.phy.autoneg_advertised =
 1566                     IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
 1567                 break;
 1568         default:
 1569                 device_printf(adapter->dev, "Only auto media type\n");
 1570                 return (EINVAL);
 1571         }
 1572 
 1573         return (0);
 1574 }
 1575 
 1576 /*********************************************************************
 1577  *
 1578  *  This routine maps the mbufs to tx descriptors, allowing the
 1579  *  TX engine to transmit the packets. 
 1580  *      - return 0 on success, positive on failure
 1581  *
 1582  **********************************************************************/
 1583 
 1584 static int
 1585 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
 1586 {
 1587         struct adapter  *adapter = txr->adapter;
 1588         u32             olinfo_status = 0, cmd_type_len;
 1589         u32             paylen = 0;
 1590         int             i, j, error, nsegs;
 1591         int             first, last = 0;
 1592         struct mbuf     *m_head;
 1593         bus_dma_segment_t segs[adapter->num_segs];
 1594         bus_dmamap_t    map;
 1595         struct ixgbe_tx_buf *txbuf;
 1596         union ixgbe_adv_tx_desc *txd = NULL;
 1597 
 1598         m_head = *m_headp;
 1599 
 1600         /* Basic descriptor defines */
 1601         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
 1602             IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
 1603 
 1604         if (m_head->m_flags & M_VLANTAG)
 1605                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
 1606 
 1607         /*
 1608          * Important to capture the first descriptor
 1609          * used because it will contain the index of
 1610          * the one we tell the hardware to report back
 1611          */
 1612         first = txr->next_avail_desc;
 1613         txbuf = &txr->tx_buffers[first];
 1614         map = txbuf->map;
 1615 
 1616         /*
 1617          * Map the packet for DMA.
 1618          */
 1619         error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
 1620             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 1621 
 1622         if (error == EFBIG) {
 1623                 struct mbuf *m;
 1624 
 1625                 m = m_defrag(*m_headp, M_DONTWAIT);
 1626                 if (m == NULL) {
 1627                         adapter->mbuf_defrag_failed++;
 1628                         m_freem(*m_headp);
 1629                         *m_headp = NULL;
 1630                         return (ENOBUFS);
 1631                 }
 1632                 *m_headp = m;
 1633 
 1634                 /* Try it again */
 1635                 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
 1636                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 1637 
 1638                 if (error == ENOMEM) {
 1639                         adapter->no_tx_dma_setup++;
 1640                         return (error);
 1641                 } else if (error != 0) {
 1642                         adapter->no_tx_dma_setup++;
 1643                         m_freem(*m_headp);
 1644                         *m_headp = NULL;
 1645                         return (error);
 1646                 }
 1647         } else if (error == ENOMEM) {
 1648                 adapter->no_tx_dma_setup++;
 1649                 return (error);
 1650         } else if (error != 0) {
 1651                 adapter->no_tx_dma_setup++;
 1652                 m_freem(*m_headp);
 1653                 *m_headp = NULL;
 1654                 return (error);
 1655         }
 1656 
 1657         /* Make certain there are enough descriptors */
 1658         if (nsegs > txr->tx_avail - 2) {
 1659                 txr->no_desc_avail++;
 1660                 error = ENOBUFS;
 1661                 goto xmit_fail;
 1662         }
 1663         m_head = *m_headp;
 1664 
 1665         /*
 1666         ** Set up the appropriate offload context
 1667         ** this becomes the first descriptor of 
 1668         ** a packet.
 1669         */
 1670         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
 1671                 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
 1672                         cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
 1673                         olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
 1674                         olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
 1675                         olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
 1676                         ++adapter->tso_tx;
 1677                 } else
 1678                         return (ENXIO);
 1679         } else if (ixgbe_tx_ctx_setup(txr, m_head))
 1680                 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
 1681 
 1682 #ifdef IXGBE_IEEE1588
 1683         /* This is changing soon to an mtag detection */
 1684         if (we detect this mbuf has a TSTAMP mtag)
 1685                 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
 1686 #endif
 1687 
 1688 #ifdef IXGBE_FDIR
 1689         /* Do the flow director magic */
 1690         if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
 1691                 ++txr->atr_count;
 1692                 if (txr->atr_count >= atr_sample_rate) {
 1693                         ixgbe_atr(txr, m_head);
 1694                         txr->atr_count = 0;
 1695                 }
 1696         }
 1697 #endif
 1698         /* Record payload length */
 1699         if (paylen == 0)
 1700                 olinfo_status |= m_head->m_pkthdr.len <<
 1701                     IXGBE_ADVTXD_PAYLEN_SHIFT;
 1702 
 1703         i = txr->next_avail_desc;
 1704         for (j = 0; j < nsegs; j++) {
 1705                 bus_size_t seglen;
 1706                 bus_addr_t segaddr;
 1707 
 1708                 txbuf = &txr->tx_buffers[i];
 1709                 txd = &txr->tx_base[i];
 1710                 seglen = segs[j].ds_len;
 1711                 segaddr = htole64(segs[j].ds_addr);
 1712 
 1713                 txd->read.buffer_addr = segaddr;
 1714                 txd->read.cmd_type_len = htole32(txr->txd_cmd |
 1715                     cmd_type_len |seglen);
 1716                 txd->read.olinfo_status = htole32(olinfo_status);
 1717                 last = i; /* descriptor that will get completion IRQ */
 1718 
 1719                 if (++i == adapter->num_tx_desc)
 1720                         i = 0;
 1721 
 1722                 txbuf->m_head = NULL;
 1723                 txbuf->eop_index = -1;
 1724         }
 1725 
 1726         txd->read.cmd_type_len |=
 1727             htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
 1728         txr->tx_avail -= nsegs;
 1729         txr->next_avail_desc = i;
 1730 
 1731         txbuf->m_head = m_head;
 1732         /* Swap the dma map between the first and last descriptor */
 1733         txr->tx_buffers[first].map = txbuf->map;
 1734         txbuf->map = map;
 1735         bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
 1736 
 1737         /* Set the index of the descriptor that will be marked done */
 1738         txbuf = &txr->tx_buffers[first];
 1739         txbuf->eop_index = last;
 1740 
 1741         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 1742             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1743         /*
 1744          * Advance the Transmit Descriptor Tail (Tdt), this tells the
 1745          * hardware that this frame is available to transmit.
 1746          */
 1747         ++txr->total_packets;
 1748         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
 1749 
 1750         return (0);
 1751 
 1752 xmit_fail:
 1753         bus_dmamap_unload(txr->txtag, txbuf->map);
 1754         return (error);
 1755 
 1756 }
 1757 
 1758 static void
 1759 ixgbe_set_promisc(struct adapter *adapter)
 1760 {
 1761         u_int32_t       reg_rctl;
 1762         struct ifnet   *ifp = adapter->ifp;
 1763 
 1764         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
 1765         reg_rctl &= (~IXGBE_FCTRL_UPE);
 1766         reg_rctl &= (~IXGBE_FCTRL_MPE);
 1767         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
 1768 
 1769         if (ifp->if_flags & IFF_PROMISC) {
 1770                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 1771                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
 1772         } else if (ifp->if_flags & IFF_ALLMULTI) {
 1773                 reg_rctl |= IXGBE_FCTRL_MPE;
 1774                 reg_rctl &= ~IXGBE_FCTRL_UPE;
 1775                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
 1776         }
 1777         return;
 1778 }
 1779 
 1780 
 1781 /*********************************************************************
 1782  *  Multicast Update
 1783  *
 1784  *  This routine is called whenever multicast address list is updated.
 1785  *
 1786  **********************************************************************/
 1787 #define IXGBE_RAR_ENTRIES 16
 1788 
 1789 static void
 1790 ixgbe_set_multi(struct adapter *adapter)
 1791 {
 1792         u32     fctrl;
 1793         u8      *mta;
 1794         u8      *update_ptr;
 1795         struct  ifmultiaddr *ifma;
 1796         int     mcnt = 0;
 1797         struct ifnet   *ifp = adapter->ifp;
 1798 
 1799         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
 1800 
 1801         mta = adapter->mta;
 1802         bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
 1803             MAX_NUM_MULTICAST_ADDRESSES);
 1804 
 1805         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
 1806         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 1807         if (ifp->if_flags & IFF_PROMISC)
 1808                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 1809         else if (ifp->if_flags & IFF_ALLMULTI) {
 1810                 fctrl |= IXGBE_FCTRL_MPE;
 1811                 fctrl &= ~IXGBE_FCTRL_UPE;
 1812         } else
 1813                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 1814         
 1815         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
 1816 
 1817 #if __FreeBSD_version < 800000
 1818         IF_ADDR_LOCK(ifp);
 1819 #else
 1820         if_maddr_rlock(ifp);
 1821 #endif
 1822         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1823                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1824                         continue;
 1825                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
 1826                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
 1827                     IXGBE_ETH_LENGTH_OF_ADDRESS);
 1828                 mcnt++;
 1829         }
 1830 #if __FreeBSD_version < 800000
 1831         IF_ADDR_UNLOCK(ifp);
 1832 #else
 1833         if_maddr_runlock(ifp);
 1834 #endif
 1835 
 1836         update_ptr = mta;
 1837         ixgbe_update_mc_addr_list(&adapter->hw,
 1838             update_ptr, mcnt, ixgbe_mc_array_itr);
 1839 
 1840         return;
 1841 }
 1842 
 1843 /*
 1844  * This is an iterator function now needed by the multicast
 1845  * shared code. It simply feeds the shared code routine the
 1846  * addresses in the array of ixgbe_set_multi() one by one.
 1847  */
 1848 static u8 *
 1849 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
 1850 {
 1851         u8 *addr = *update_ptr;
 1852         u8 *newptr;
 1853         *vmdq = 0;
 1854 
 1855         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
 1856         *update_ptr = newptr;
 1857         return addr;
 1858 }
 1859 
 1860 
 1861 /*********************************************************************
 1862  *  Timer routine
 1863  *
 1864  *  This routine checks for link status,updates statistics,
 1865  *  and runs the watchdog check.
 1866  *
 1867  **********************************************************************/
 1868 
 1869 static void
 1870 ixgbe_local_timer(void *arg)
 1871 {
 1872         struct adapter *adapter = arg;
 1873         device_t        dev = adapter->dev;
 1874         struct tx_ring *txr = adapter->tx_rings;
 1875 
 1876         mtx_assert(&adapter->core_mtx, MA_OWNED);
 1877 
 1878         /* Check for pluggable optics */
 1879         if (adapter->sfp_probe)
 1880                 if (!ixgbe_sfp_probe(adapter))
 1881                         goto out; /* Nothing to do */
 1882 
 1883         ixgbe_update_link_status(adapter);
 1884         ixgbe_update_stats_counters(adapter);
 1885 
 1886         /*
 1887          * If the interface has been paused
 1888          * then don't do the watchdog check
 1889          */
 1890         if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
 1891                 goto out;
 1892 
 1893         /*
 1894         ** Check status on the TX queues for a hang
 1895         */
 1896         for (int i = 0; i < adapter->num_queues; i++, txr++)
 1897                 if (txr->queue_status == IXGBE_QUEUE_HUNG)
 1898                         goto hung;
 1899 
 1900 out:
 1901         ixgbe_rearm_queues(adapter, adapter->que_mask);
 1902         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
 1903         return;
 1904 
 1905 hung:
 1906         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
 1907         device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
 1908             IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
 1909             IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
 1910         device_printf(dev,"TX(%d) desc avail = %d,"
 1911             "Next TX to Clean = %d\n",
 1912             txr->me, txr->tx_avail, txr->next_to_clean);
 1913         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1914         adapter->watchdog_events++;
 1915         ixgbe_init_locked(adapter);
 1916 }
 1917 
 1918 /*
 1919 ** Note: this routine updates the OS on the link state
 1920 **      the real check of the hardware only happens with
 1921 **      a link interrupt.
 1922 */
 1923 static void
 1924 ixgbe_update_link_status(struct adapter *adapter)
 1925 {
 1926         struct ifnet    *ifp = adapter->ifp;
 1927         struct tx_ring *txr = adapter->tx_rings;
 1928         device_t dev = adapter->dev;
 1929 
 1930 
 1931         if (adapter->link_up){ 
 1932                 if (adapter->link_active == FALSE) {
 1933                         if (bootverbose)
 1934                                 device_printf(dev,"Link is up %d Gbps %s \n",
 1935                                     ((adapter->link_speed == 128)? 10:1),
 1936                                     "Full Duplex");
 1937                         adapter->link_active = TRUE;
 1938                         if_link_state_change(ifp, LINK_STATE_UP);
 1939                 }
 1940         } else { /* Link down */
 1941                 if (adapter->link_active == TRUE) {
 1942                         if (bootverbose)
 1943                                 device_printf(dev,"Link is Down\n");
 1944                         if_link_state_change(ifp, LINK_STATE_DOWN);
 1945                         adapter->link_active = FALSE;
 1946                         for (int i = 0; i < adapter->num_queues;
 1947                             i++, txr++)
 1948                                 txr->queue_status = IXGBE_QUEUE_IDLE;
 1949                 }
 1950         }
 1951 
 1952         return;
 1953 }
 1954 
 1955 
 1956 /*********************************************************************
 1957  *
 1958  *  This routine disables all traffic on the adapter by issuing a
 1959  *  global reset on the MAC and deallocates TX/RX buffers.
 1960  *
 1961  **********************************************************************/
 1962 
 1963 static void
 1964 ixgbe_stop(void *arg)
 1965 {
 1966         struct ifnet   *ifp;
 1967         struct adapter *adapter = arg;
 1968         struct ixgbe_hw *hw = &adapter->hw;
 1969         ifp = adapter->ifp;
 1970 
 1971         mtx_assert(&adapter->core_mtx, MA_OWNED);
 1972 
 1973         INIT_DEBUGOUT("ixgbe_stop: begin\n");
 1974         ixgbe_disable_intr(adapter);
 1975 
 1976         /* Tell the stack that the interface is no longer active */
 1977         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1978 
 1979         ixgbe_reset_hw(hw);
 1980         hw->adapter_stopped = FALSE;
 1981         ixgbe_stop_adapter(hw);
 1982         /* Turn off the laser */
 1983         if (hw->phy.multispeed_fiber)
 1984                 ixgbe_disable_tx_laser(hw);
 1985         callout_stop(&adapter->timer);
 1986 
 1987         /* reprogram the RAR[0] in case user changed it. */
 1988         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
 1989 
 1990         return;
 1991 }
 1992 
 1993 
 1994 /*********************************************************************
 1995  *
 1996  *  Determine hardware revision.
 1997  *
 1998  **********************************************************************/
 1999 static void
 2000 ixgbe_identify_hardware(struct adapter *adapter)
 2001 {
 2002         device_t        dev = adapter->dev;
 2003         struct ixgbe_hw *hw = &adapter->hw;
 2004 
 2005         /* Save off the information about this board */
 2006         hw->vendor_id = pci_get_vendor(dev);
 2007         hw->device_id = pci_get_device(dev);
 2008         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
 2009         hw->subsystem_vendor_id =
 2010             pci_read_config(dev, PCIR_SUBVEND_0, 2);
 2011         hw->subsystem_device_id =
 2012             pci_read_config(dev, PCIR_SUBDEV_0, 2);
 2013 
 2014         /* We need this here to set the num_segs below */
 2015         ixgbe_set_mac_type(hw);
 2016 
 2017         /* Pick up the 82599 and VF settings */
 2018         if (hw->mac.type != ixgbe_mac_82598EB) {
 2019                 hw->phy.smart_speed = ixgbe_smart_speed;
 2020                 adapter->num_segs = IXGBE_82599_SCATTER;
 2021         } else
 2022                 adapter->num_segs = IXGBE_82598_SCATTER;
 2023 
 2024         return;
 2025 }
 2026 
 2027 /*********************************************************************
 2028  *
 2029  *  Determine optic type
 2030  *
 2031  **********************************************************************/
 2032 static void
 2033 ixgbe_setup_optics(struct adapter *adapter)
 2034 {
 2035         struct ixgbe_hw *hw = &adapter->hw;
 2036         int             layer;
 2037         
 2038         layer = ixgbe_get_supported_physical_layer(hw);
 2039         switch (layer) {
 2040                 case IXGBE_PHYSICAL_LAYER_10GBASE_T:
 2041                         adapter->optics = IFM_10G_T;
 2042                         break;
 2043                 case IXGBE_PHYSICAL_LAYER_1000BASE_T:
 2044                         adapter->optics = IFM_1000_T;
 2045                         break;
 2046                 case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
 2047                 case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
 2048                         adapter->optics = IFM_10G_LR;
 2049                         break;
 2050                 case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
 2051                         adapter->optics = IFM_10G_SR;
 2052                         break;
 2053                 case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
 2054                 case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
 2055                         adapter->optics = IFM_10G_CX4;
 2056                         break;
 2057                 case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
 2058                         adapter->optics = IFM_10G_TWINAX;
 2059                         break;
 2060                 case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
 2061                 case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
 2062                 case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
 2063                 case IXGBE_PHYSICAL_LAYER_UNKNOWN:
 2064                 default:
 2065                         adapter->optics = IFM_ETHER | IFM_AUTO;
 2066                         break;
 2067         }
 2068         return;
 2069 }
 2070 
 2071 /*********************************************************************
 2072  *
 2073  *  Setup the Legacy or MSI Interrupt handler
 2074  *
 2075  **********************************************************************/
 2076 static int
 2077 ixgbe_allocate_legacy(struct adapter *adapter)
 2078 {
 2079         device_t dev = adapter->dev;
 2080         struct          ix_queue *que = adapter->queues;
 2081         int error, rid = 0;
 2082 
 2083         /* MSI RID at 1 */
 2084         if (adapter->msix == 1)
 2085                 rid = 1;
 2086 
 2087         /* We allocate a single interrupt resource */
 2088         adapter->res = bus_alloc_resource_any(dev,
 2089             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
 2090         if (adapter->res == NULL) {
 2091                 device_printf(dev, "Unable to allocate bus resource: "
 2092                     "interrupt\n");
 2093                 return (ENXIO);
 2094         }
 2095 
 2096         /*
 2097          * Try allocating a fast interrupt and the associated deferred
 2098          * processing contexts.
 2099          */
 2100         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
 2101         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
 2102             taskqueue_thread_enqueue, &que->tq);
 2103         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
 2104             device_get_nameunit(adapter->dev));
 2105 
 2106         /* Tasklets for Link, SFP and Multispeed Fiber */
 2107         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
 2108         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
 2109         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
 2110 #ifdef IXGBE_FDIR
 2111         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
 2112 #endif
 2113         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
 2114             taskqueue_thread_enqueue, &adapter->tq);
 2115         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
 2116             device_get_nameunit(adapter->dev));
 2117 
 2118         if ((error = bus_setup_intr(dev, adapter->res,
 2119             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
 2120             que, &adapter->tag)) != 0) {
 2121                 device_printf(dev, "Failed to register fast interrupt "
 2122                     "handler: %d\n", error);
 2123                 taskqueue_free(que->tq);
 2124                 taskqueue_free(adapter->tq);
 2125                 que->tq = NULL;
 2126                 adapter->tq = NULL;
 2127                 return (error);
 2128         }
 2129         /* For simplicity in the handlers */
 2130         adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
 2131 
 2132         return (0);
 2133 }
 2134 
 2135 
 2136 /*********************************************************************
 2137  *
 2138  *  Setup MSIX Interrupt resources and handlers 
 2139  *
 2140  **********************************************************************/
 2141 static int
 2142 ixgbe_allocate_msix(struct adapter *adapter)
 2143 {
 2144         device_t        dev = adapter->dev;
 2145         struct          ix_queue *que = adapter->queues;
 2146         int             error, rid, vector = 0;
 2147 
 2148         for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
 2149                 rid = vector + 1;
 2150                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 2151                     RF_SHAREABLE | RF_ACTIVE);
 2152                 if (que->res == NULL) {
 2153                         device_printf(dev,"Unable to allocate"
 2154                             " bus resource: que interrupt [%d]\n", vector);
 2155                         return (ENXIO);
 2156                 }
 2157                 /* Set the handler function */
 2158                 error = bus_setup_intr(dev, que->res,
 2159                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
 2160                     ixgbe_msix_que, que, &que->tag);
 2161                 if (error) {
 2162                         que->res = NULL;
 2163                         device_printf(dev, "Failed to register QUE handler");
 2164                         return (error);
 2165                 }
 2166 #if __FreeBSD_version >= 800504
 2167                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
 2168 #endif
 2169                 que->msix = vector;
 2170                 adapter->que_mask |= (u64)(1 << que->msix);
 2171                 /*
 2172                 ** Bind the msix vector, and thus the
 2173                 ** ring to the corresponding cpu.
 2174                 */
 2175                 if (adapter->num_queues > 1)
 2176                         bus_bind_intr(dev, que->res, i);
 2177 
 2178                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
 2179                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
 2180                     taskqueue_thread_enqueue, &que->tq);
 2181                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
 2182                     device_get_nameunit(adapter->dev));
 2183         }
 2184 
 2185         /* and Link */
 2186         rid = vector + 1;
 2187         adapter->res = bus_alloc_resource_any(dev,
 2188             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
 2189         if (!adapter->res) {
 2190                 device_printf(dev,"Unable to allocate"
 2191             " bus resource: Link interrupt [%d]\n", rid);
 2192                 return (ENXIO);
 2193         }
 2194         /* Set the link handler function */
 2195         error = bus_setup_intr(dev, adapter->res,
 2196             INTR_TYPE_NET | INTR_MPSAFE, NULL,
 2197             ixgbe_msix_link, adapter, &adapter->tag);
 2198         if (error) {
 2199                 adapter->res = NULL;
 2200                 device_printf(dev, "Failed to register LINK handler");
 2201                 return (error);
 2202         }
 2203 #if __FreeBSD_version >= 800504
 2204         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
 2205 #endif
 2206         adapter->linkvec = vector;
 2207         /* Tasklets for Link, SFP and Multispeed Fiber */
 2208         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
 2209         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
 2210         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
 2211 #ifdef IXGBE_FDIR
 2212         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
 2213 #endif
 2214         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
 2215             taskqueue_thread_enqueue, &adapter->tq);
 2216         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
 2217             device_get_nameunit(adapter->dev));
 2218 
 2219         return (0);
 2220 }
 2221 
 2222 /*
 2223  * Setup Either MSI/X or MSI
 2224  */
 2225 static int
 2226 ixgbe_setup_msix(struct adapter *adapter)
 2227 {
 2228         device_t dev = adapter->dev;
 2229         int rid, want, queues, msgs;
 2230 
 2231         /* Override by tuneable */
 2232         if (ixgbe_enable_msix == 0)
 2233                 goto msi;
 2234 
 2235         /* First try MSI/X */
 2236         rid = PCIR_BAR(MSIX_82598_BAR);
 2237         adapter->msix_mem = bus_alloc_resource_any(dev,
 2238             SYS_RES_MEMORY, &rid, RF_ACTIVE);
 2239         if (!adapter->msix_mem) {
 2240                 rid += 4;       /* 82599 maps in higher BAR */
 2241                 adapter->msix_mem = bus_alloc_resource_any(dev,
 2242                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
 2243         }
 2244         if (!adapter->msix_mem) {
 2245                 /* May not be enabled */
 2246                 device_printf(adapter->dev,
 2247                     "Unable to map MSIX table \n");
 2248                 goto msi;
 2249         }
 2250 
 2251         msgs = pci_msix_count(dev); 
 2252         if (msgs == 0) { /* system has msix disabled */
 2253                 bus_release_resource(dev, SYS_RES_MEMORY,
 2254                     rid, adapter->msix_mem);
 2255                 adapter->msix_mem = NULL;
 2256                 goto msi;
 2257         }
 2258 
 2259         /* Figure out a reasonable auto config value */
 2260         queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
 2261 
 2262         if (ixgbe_num_queues != 0)
 2263                 queues = ixgbe_num_queues;
 2264         /* Set max queues to 8 when autoconfiguring */
 2265         else if ((ixgbe_num_queues == 0) && (queues > 8))
 2266                 queues = 8;
 2267 
 2268         /*
 2269         ** Want one vector (RX/TX pair) per queue
 2270         ** plus an additional for Link.
 2271         */
 2272         want = queues + 1;
 2273         if (msgs >= want)
 2274                 msgs = want;
 2275         else {
 2276                 device_printf(adapter->dev,
 2277                     "MSIX Configuration Problem, "
 2278                     "%d vectors but %d queues wanted!\n",
 2279                     msgs, want);
 2280                 return (0); /* Will go to Legacy setup */
 2281         }
 2282         if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
 2283                 device_printf(adapter->dev,
 2284                     "Using MSIX interrupts with %d vectors\n", msgs);
 2285                 adapter->num_queues = queues;
 2286                 return (msgs);
 2287         }
 2288 msi:
 2289         msgs = pci_msi_count(dev);
 2290         if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
 2291                 device_printf(adapter->dev,"Using an MSI interrupt\n");
 2292         else
 2293                 device_printf(adapter->dev,"Using a Legacy interrupt\n");
 2294         return (msgs);
 2295 }
 2296 
 2297 
 2298 static int
 2299 ixgbe_allocate_pci_resources(struct adapter *adapter)
 2300 {
 2301         int             rid;
 2302         device_t        dev = adapter->dev;
 2303 
 2304         rid = PCIR_BAR(0);
 2305         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
 2306             &rid, RF_ACTIVE);
 2307 
 2308         if (!(adapter->pci_mem)) {
 2309                 device_printf(dev,"Unable to allocate bus resource: memory\n");
 2310                 return (ENXIO);
 2311         }
 2312 
 2313         adapter->osdep.mem_bus_space_tag =
 2314                 rman_get_bustag(adapter->pci_mem);
 2315         adapter->osdep.mem_bus_space_handle =
 2316                 rman_get_bushandle(adapter->pci_mem);
 2317         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
 2318 
 2319         /* Legacy defaults */
 2320         adapter->num_queues = 1;
 2321         adapter->hw.back = &adapter->osdep;
 2322 
 2323         /*
 2324         ** Now setup MSI or MSI/X, should
 2325         ** return us the number of supported
 2326         ** vectors. (Will be 1 for MSI)
 2327         */
 2328         adapter->msix = ixgbe_setup_msix(adapter);
 2329         return (0);
 2330 }
 2331 
 2332 static void
 2333 ixgbe_free_pci_resources(struct adapter * adapter)
 2334 {
 2335         struct          ix_queue *que = adapter->queues;
 2336         device_t        dev = adapter->dev;
 2337         int             rid, memrid;
 2338 
 2339         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 2340                 memrid = PCIR_BAR(MSIX_82598_BAR);
 2341         else
 2342                 memrid = PCIR_BAR(MSIX_82599_BAR);
 2343 
 2344         /*
 2345         ** There is a slight possibility of a failure mode
 2346         ** in attach that will result in entering this function
 2347         ** before interrupt resources have been initialized, and
 2348         ** in that case we do not want to execute the loops below
 2349         ** We can detect this reliably by the state of the adapter
 2350         ** res pointer.
 2351         */
 2352         if (adapter->res == NULL)
 2353                 goto mem;
 2354 
 2355         /*
 2356         **  Release all msix queue resources:
 2357         */
 2358         for (int i = 0; i < adapter->num_queues; i++, que++) {
 2359                 rid = que->msix + 1;
 2360                 if (que->tag != NULL) {
 2361                         bus_teardown_intr(dev, que->res, que->tag);
 2362                         que->tag = NULL;
 2363                 }
 2364                 if (que->res != NULL)
 2365                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
 2366         }
 2367 
 2368 
 2369         /* Clean the Legacy or Link interrupt last */
 2370         if (adapter->linkvec) /* we are doing MSIX */
 2371                 rid = adapter->linkvec + 1;
 2372         else
 2373                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
 2374 
 2375         if (adapter->tag != NULL) {
 2376                 bus_teardown_intr(dev, adapter->res, adapter->tag);
 2377                 adapter->tag = NULL;
 2378         }
 2379         if (adapter->res != NULL)
 2380                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
 2381 
 2382 mem:
 2383         if (adapter->msix)
 2384                 pci_release_msi(dev);
 2385 
 2386         if (adapter->msix_mem != NULL)
 2387                 bus_release_resource(dev, SYS_RES_MEMORY,
 2388                     memrid, adapter->msix_mem);
 2389 
 2390         if (adapter->pci_mem != NULL)
 2391                 bus_release_resource(dev, SYS_RES_MEMORY,
 2392                     PCIR_BAR(0), adapter->pci_mem);
 2393 
 2394         return;
 2395 }
 2396 
 2397 /*********************************************************************
 2398  *
 2399  *  Setup networking device structure and register an interface.
 2400  *
 2401  **********************************************************************/
 2402 static int
 2403 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
 2404 {
 2405         struct ixgbe_hw *hw = &adapter->hw;
 2406         struct ifnet   *ifp;
 2407 
 2408         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
 2409 
 2410         ifp = adapter->ifp = if_alloc(IFT_ETHER);
 2411         if (ifp == NULL) {
 2412                 device_printf(dev, "can not allocate ifnet structure\n");
 2413                 return (-1);
 2414         }
 2415         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2416         ifp->if_mtu = ETHERMTU;
 2417         ifp->if_baudrate = 1000000000;
 2418         ifp->if_init = ixgbe_init;
 2419         ifp->if_softc = adapter;
 2420         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2421         ifp->if_ioctl = ixgbe_ioctl;
 2422         ifp->if_start = ixgbe_start;
 2423 #if __FreeBSD_version >= 800000
 2424         ifp->if_transmit = ixgbe_mq_start;
 2425         ifp->if_qflush = ixgbe_qflush;
 2426 #endif
 2427         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
 2428 
 2429         ether_ifattach(ifp, adapter->hw.mac.addr);
 2430 
 2431         adapter->max_frame_size =
 2432             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 2433 
 2434         /*
 2435          * Tell the upper layer(s) we support long frames.
 2436          */
 2437         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 2438 
 2439         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
 2440         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
 2441         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
 2442                              |  IFCAP_VLAN_HWTSO
 2443                              |  IFCAP_VLAN_MTU;
 2444         ifp->if_capenable = ifp->if_capabilities;
 2445 
 2446         /* Don't enable LRO by default */
 2447         ifp->if_capabilities |= IFCAP_LRO;
 2448 
 2449         /*
 2450         ** Don't turn this on by default, if vlans are
 2451         ** created on another pseudo device (eg. lagg)
 2452         ** then vlan events are not passed thru, breaking
 2453         ** operation, but with HW FILTER off it works. If
 2454         ** using vlans directly on the ixgbe driver you can
 2455         ** enable this and get full hardware tag filtering.
 2456         */
 2457         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
 2458 
 2459         /*
 2460          * Specify the media types supported by this adapter and register
 2461          * callbacks to update media and link information
 2462          */
 2463         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
 2464                      ixgbe_media_status);
 2465         ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
 2466         ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
 2467         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
 2468                 ifmedia_add(&adapter->media,
 2469                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
 2470                 ifmedia_add(&adapter->media,
 2471                     IFM_ETHER | IFM_1000_T, 0, NULL);
 2472         }
 2473         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
 2474         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
 2475 
 2476         return (0);
 2477 }
 2478 
 2479 static void
 2480 ixgbe_config_link(struct adapter *adapter)
 2481 {
 2482         struct ixgbe_hw *hw = &adapter->hw;
 2483         u32     autoneg, err = 0;
 2484         bool    sfp, negotiate;
 2485 
 2486         sfp = ixgbe_is_sfp(hw);
 2487 
 2488         if (sfp) { 
 2489                 if (hw->phy.multispeed_fiber) {
 2490                         hw->mac.ops.setup_sfp(hw);
 2491                         ixgbe_enable_tx_laser(hw);
 2492                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
 2493                 } else
 2494                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
 2495         } else {
 2496                 if (hw->mac.ops.check_link)
 2497                         err = ixgbe_check_link(hw, &autoneg,
 2498                             &adapter->link_up, FALSE);
 2499                 if (err)
 2500                         goto out;
 2501                 autoneg = hw->phy.autoneg_advertised;
 2502                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
 2503                         err  = hw->mac.ops.get_link_capabilities(hw,
 2504                             &autoneg, &negotiate);
 2505                 if (err)
 2506                         goto out;
 2507                 if (hw->mac.ops.setup_link)
 2508                         err = hw->mac.ops.setup_link(hw, autoneg,
 2509                             negotiate, adapter->link_up);
 2510         }
 2511 out:
 2512         return;
 2513 }
 2514 
 2515 /********************************************************************
 2516  * Manage DMA'able memory.
 2517  *******************************************************************/
 2518 static void
 2519 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
 2520 {
 2521         if (error)
 2522                 return;
 2523         *(bus_addr_t *) arg = segs->ds_addr;
 2524         return;
 2525 }
 2526 
 2527 static int
 2528 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
 2529                 struct ixgbe_dma_alloc *dma, int mapflags)
 2530 {
 2531         device_t dev = adapter->dev;
 2532         int             r;
 2533 
 2534         r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
 2535                                DBA_ALIGN, 0,    /* alignment, bounds */
 2536                                BUS_SPACE_MAXADDR,       /* lowaddr */
 2537                                BUS_SPACE_MAXADDR,       /* highaddr */
 2538                                NULL, NULL,      /* filter, filterarg */
 2539                                size,    /* maxsize */
 2540                                1,       /* nsegments */
 2541                                size,    /* maxsegsize */
 2542                                BUS_DMA_ALLOCNOW,        /* flags */
 2543                                NULL,    /* lockfunc */
 2544                                NULL,    /* lockfuncarg */
 2545                                &dma->dma_tag);
 2546         if (r != 0) {
 2547                 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
 2548                        "error %u\n", r);
 2549                 goto fail_0;
 2550         }
 2551         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
 2552                              BUS_DMA_NOWAIT, &dma->dma_map);
 2553         if (r != 0) {
 2554                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
 2555                        "error %u\n", r);
 2556                 goto fail_1;
 2557         }
 2558         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
 2559                             size,
 2560                             ixgbe_dmamap_cb,
 2561                             &dma->dma_paddr,
 2562                             mapflags | BUS_DMA_NOWAIT);
 2563         if (r != 0) {
 2564                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
 2565                        "error %u\n", r);
 2566                 goto fail_2;
 2567         }
 2568         dma->dma_size = size;
 2569         return (0);
 2570 fail_2:
 2571         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
 2572 fail_1:
 2573         bus_dma_tag_destroy(dma->dma_tag);
 2574 fail_0:
 2575         dma->dma_map = NULL;
 2576         dma->dma_tag = NULL;
 2577         return (r);
 2578 }
 2579 
 2580 static void
 2581 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
 2582 {
 2583         bus_dmamap_sync(dma->dma_tag, dma->dma_map,
 2584             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2585         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
 2586         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
 2587         bus_dma_tag_destroy(dma->dma_tag);
 2588 }
 2589 
 2590 
 2591 /*********************************************************************
 2592  *
 2593  *  Allocate memory for the transmit and receive rings, and then
 2594  *  the descriptors associated with each, called only once at attach.
 2595  *
 2596  **********************************************************************/
 2597 static int
 2598 ixgbe_allocate_queues(struct adapter *adapter)
 2599 {
 2600         device_t        dev = adapter->dev;
 2601         struct ix_queue *que;
 2602         struct tx_ring  *txr;
 2603         struct rx_ring  *rxr;
 2604         int rsize, tsize, error = IXGBE_SUCCESS;
 2605         int txconf = 0, rxconf = 0;
 2606 
 2607         /* First allocate the top level queue structs */
 2608         if (!(adapter->queues =
 2609             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
 2610             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2611                 device_printf(dev, "Unable to allocate queue memory\n");
 2612                 error = ENOMEM;
 2613                 goto fail;
 2614         }
 2615 
 2616         /* First allocate the TX ring struct memory */
 2617         if (!(adapter->tx_rings =
 2618             (struct tx_ring *) malloc(sizeof(struct tx_ring) *
 2619             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2620                 device_printf(dev, "Unable to allocate TX ring memory\n");
 2621                 error = ENOMEM;
 2622                 goto tx_fail;
 2623         }
 2624 
 2625         /* Next allocate the RX */
 2626         if (!(adapter->rx_rings =
 2627             (struct rx_ring *) malloc(sizeof(struct rx_ring) *
 2628             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2629                 device_printf(dev, "Unable to allocate RX ring memory\n");
 2630                 error = ENOMEM;
 2631                 goto rx_fail;
 2632         }
 2633 
 2634         /* For the ring itself */
 2635         tsize = roundup2(adapter->num_tx_desc *
 2636             sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
 2637 
 2638         /*
 2639          * Now set up the TX queues, txconf is needed to handle the
 2640          * possibility that things fail midcourse and we need to
 2641          * undo memory gracefully
 2642          */ 
 2643         for (int i = 0; i < adapter->num_queues; i++, txconf++) {
 2644                 /* Set up some basics */
 2645                 txr = &adapter->tx_rings[i];
 2646                 txr->adapter = adapter;
 2647                 txr->me = i;
 2648 
 2649                 /* Initialize the TX side lock */
 2650                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
 2651                     device_get_nameunit(dev), txr->me);
 2652                 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
 2653 
 2654                 if (ixgbe_dma_malloc(adapter, tsize,
 2655                         &txr->txdma, BUS_DMA_NOWAIT)) {
 2656                         device_printf(dev,
 2657                             "Unable to allocate TX Descriptor memory\n");
 2658                         error = ENOMEM;
 2659                         goto err_tx_desc;
 2660                 }
 2661                 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
 2662                 bzero((void *)txr->tx_base, tsize);
 2663 
 2664                 /* Now allocate transmit buffers for the ring */
 2665                 if (ixgbe_allocate_transmit_buffers(txr)) {
 2666                         device_printf(dev,
 2667                             "Critical Failure setting up transmit buffers\n");
 2668                         error = ENOMEM;
 2669                         goto err_tx_desc;
 2670                 }
 2671 #if __FreeBSD_version >= 800000
 2672                 /* Allocate a buf ring */
 2673                 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
 2674                     M_WAITOK, &txr->tx_mtx);
 2675                 if (txr->br == NULL) {
 2676                         device_printf(dev,
 2677                             "Critical Failure setting up buf ring\n");
 2678                         error = ENOMEM;
 2679                         goto err_tx_desc;
 2680                 }
 2681 #endif
 2682         }
 2683 
 2684         /*
 2685          * Next the RX queues...
 2686          */ 
 2687         rsize = roundup2(adapter->num_rx_desc *
 2688             sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
 2689         for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
 2690                 rxr = &adapter->rx_rings[i];
 2691                 /* Set up some basics */
 2692                 rxr->adapter = adapter;
 2693                 rxr->me = i;
 2694 
 2695                 /* Initialize the RX side lock */
 2696                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
 2697                     device_get_nameunit(dev), rxr->me);
 2698                 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
 2699 
 2700                 if (ixgbe_dma_malloc(adapter, rsize,
 2701                         &rxr->rxdma, BUS_DMA_NOWAIT)) {
 2702                         device_printf(dev,
 2703                             "Unable to allocate RxDescriptor memory\n");
 2704                         error = ENOMEM;
 2705                         goto err_rx_desc;
 2706                 }
 2707                 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
 2708                 bzero((void *)rxr->rx_base, rsize);
 2709 
 2710                 /* Allocate receive buffers for the ring*/
 2711                 if (ixgbe_allocate_receive_buffers(rxr)) {
 2712                         device_printf(dev,
 2713                             "Critical Failure setting up receive buffers\n");
 2714                         error = ENOMEM;
 2715                         goto err_rx_desc;
 2716                 }
 2717         }
 2718 
 2719         /*
 2720         ** Finally set up the queue holding structs
 2721         */
 2722         for (int i = 0; i < adapter->num_queues; i++) {
 2723                 que = &adapter->queues[i];
 2724                 que->adapter = adapter;
 2725                 que->txr = &adapter->tx_rings[i];
 2726                 que->rxr = &adapter->rx_rings[i];
 2727         }
 2728 
 2729         return (0);
 2730 
 2731 err_rx_desc:
 2732         for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
 2733                 ixgbe_dma_free(adapter, &rxr->rxdma);
 2734 err_tx_desc:
 2735         for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
 2736                 ixgbe_dma_free(adapter, &txr->txdma);
 2737         free(adapter->rx_rings, M_DEVBUF);
 2738 rx_fail:
 2739         free(adapter->tx_rings, M_DEVBUF);
 2740 tx_fail:
 2741         free(adapter->queues, M_DEVBUF);
 2742 fail:
 2743         return (error);
 2744 }
 2745 
 2746 /*********************************************************************
 2747  *
 2748  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
 2749  *  the information needed to transmit a packet on the wire. This is
 2750  *  called only once at attach, setup is done every reset.
 2751  *
 2752  **********************************************************************/
 2753 static int
 2754 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
 2755 {
 2756         struct adapter *adapter = txr->adapter;
 2757         device_t dev = adapter->dev;
 2758         struct ixgbe_tx_buf *txbuf;
 2759         int error, i;
 2760 
 2761         /*
 2762          * Setup DMA descriptor areas.
 2763          */
 2764         if ((error = bus_dma_tag_create(NULL,           /* parent */
 2765                                1, 0,            /* alignment, bounds */
 2766                                BUS_SPACE_MAXADDR,       /* lowaddr */
 2767                                BUS_SPACE_MAXADDR,       /* highaddr */
 2768                                NULL, NULL,              /* filter, filterarg */
 2769                                IXGBE_TSO_SIZE,          /* maxsize */
 2770                                adapter->num_segs,       /* nsegments */
 2771                                PAGE_SIZE,               /* maxsegsize */
 2772                                0,                       /* flags */
 2773                                NULL,                    /* lockfunc */
 2774                                NULL,                    /* lockfuncarg */
 2775                                &txr->txtag))) {
 2776                 device_printf(dev,"Unable to allocate TX DMA tag\n");
 2777                 goto fail;
 2778         }
 2779 
 2780         if (!(txr->tx_buffers =
 2781             (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
 2782             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2783                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
 2784                 error = ENOMEM;
 2785                 goto fail;
 2786         }
 2787 
 2788         /* Create the descriptor buffer dma maps */
 2789         txbuf = txr->tx_buffers;
 2790         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
 2791                 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
 2792                 if (error != 0) {
 2793                         device_printf(dev, "Unable to create TX DMA map\n");
 2794                         goto fail;
 2795                 }
 2796         }
 2797 
 2798         return 0;
 2799 fail:
 2800         /* We free all, it handles case where we are in the middle */
 2801         ixgbe_free_transmit_structures(adapter);
 2802         return (error);
 2803 }
 2804 
 2805 /*********************************************************************
 2806  *
 2807  *  Initialize a transmit ring.
 2808  *
 2809  **********************************************************************/
 2810 static void
 2811 ixgbe_setup_transmit_ring(struct tx_ring *txr)
 2812 {
 2813         struct adapter *adapter = txr->adapter;
 2814         struct ixgbe_tx_buf *txbuf;
 2815         int i;
 2816 
 2817         /* Clear the old ring contents */
 2818         IXGBE_TX_LOCK(txr);
 2819         bzero((void *)txr->tx_base,
 2820               (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
 2821         /* Reset indices */
 2822         txr->next_avail_desc = 0;
 2823         txr->next_to_clean = 0;
 2824 
 2825         /* Free any existing tx buffers. */
 2826         txbuf = txr->tx_buffers;
 2827         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
 2828                 if (txbuf->m_head != NULL) {
 2829                         bus_dmamap_sync(txr->txtag, txbuf->map,
 2830                             BUS_DMASYNC_POSTWRITE);
 2831                         bus_dmamap_unload(txr->txtag, txbuf->map);
 2832                         m_freem(txbuf->m_head);
 2833                         txbuf->m_head = NULL;
 2834                 }
 2835                 /* Clear the EOP index */
 2836                 txbuf->eop_index = -1;
 2837         }
 2838 
 2839 #ifdef IXGBE_FDIR
 2840         /* Set the rate at which we sample packets */
 2841         if (adapter->hw.mac.type != ixgbe_mac_82598EB)
 2842                 txr->atr_sample = atr_sample_rate;
 2843 #endif
 2844 
 2845         /* Set number of descriptors available */
 2846         txr->tx_avail = adapter->num_tx_desc;
 2847 
 2848         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 2849             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2850         IXGBE_TX_UNLOCK(txr);
 2851 }
 2852 
 2853 /*********************************************************************
 2854  *
 2855  *  Initialize all transmit rings.
 2856  *
 2857  **********************************************************************/
 2858 static int
 2859 ixgbe_setup_transmit_structures(struct adapter *adapter)
 2860 {
 2861         struct tx_ring *txr = adapter->tx_rings;
 2862 
 2863         for (int i = 0; i < adapter->num_queues; i++, txr++)
 2864                 ixgbe_setup_transmit_ring(txr);
 2865 
 2866         return (0);
 2867 }
 2868 
 2869 /*********************************************************************
 2870  *
 2871  *  Enable transmit unit.
 2872  *
 2873  **********************************************************************/
 2874 static void
 2875 ixgbe_initialize_transmit_units(struct adapter *adapter)
 2876 {
 2877         struct tx_ring  *txr = adapter->tx_rings;
 2878         struct ixgbe_hw *hw = &adapter->hw;
 2879 
 2880         /* Setup the Base and Length of the Tx Descriptor Ring */
 2881 
 2882         for (int i = 0; i < adapter->num_queues; i++, txr++) {
 2883                 u64     tdba = txr->txdma.dma_paddr;
 2884                 u32     txctrl;
 2885 
 2886                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
 2887                        (tdba & 0x00000000ffffffffULL));
 2888                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
 2889                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
 2890                     adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
 2891 
 2892                 /* Setup the HW Tx Head and Tail descriptor pointers */
 2893                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
 2894                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
 2895 
 2896                 /* Setup Transmit Descriptor Cmd Settings */
 2897                 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
 2898                 txr->queue_status = IXGBE_QUEUE_IDLE;
 2899 
 2900                 /* Disable Head Writeback */
 2901                 switch (hw->mac.type) {
 2902                 case ixgbe_mac_82598EB:
 2903                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
 2904                         break;
 2905                 case ixgbe_mac_82599EB:
 2906                 default:
 2907                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
 2908                         break;
 2909                 }
 2910                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
 2911                 switch (hw->mac.type) {
 2912                 case ixgbe_mac_82598EB:
 2913                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
 2914                         break;
 2915                 case ixgbe_mac_82599EB:
 2916                 default:
 2917                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
 2918                         break;
 2919                 }
 2920 
 2921         }
 2922 
 2923         if (hw->mac.type != ixgbe_mac_82598EB) {
 2924                 u32 dmatxctl, rttdcs;
 2925                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
 2926                 dmatxctl |= IXGBE_DMATXCTL_TE;
 2927                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
 2928                 /* Disable arbiter to set MTQC */
 2929                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 2930                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
 2931                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 2932                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
 2933                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
 2934                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 2935         }
 2936 
 2937         return;
 2938 }
 2939 
 2940 /*********************************************************************
 2941  *
 2942  *  Free all transmit rings.
 2943  *
 2944  **********************************************************************/
 2945 static void
 2946 ixgbe_free_transmit_structures(struct adapter *adapter)
 2947 {
 2948         struct tx_ring *txr = adapter->tx_rings;
 2949 
 2950         for (int i = 0; i < adapter->num_queues; i++, txr++) {
 2951                 IXGBE_TX_LOCK(txr);
 2952                 ixgbe_free_transmit_buffers(txr);
 2953                 ixgbe_dma_free(adapter, &txr->txdma);
 2954                 IXGBE_TX_UNLOCK(txr);
 2955                 IXGBE_TX_LOCK_DESTROY(txr);
 2956         }
 2957         free(adapter->tx_rings, M_DEVBUF);
 2958 }
 2959 
 2960 /*********************************************************************
 2961  *
 2962  *  Free transmit ring related data structures.
 2963  *
 2964  **********************************************************************/
 2965 static void
 2966 ixgbe_free_transmit_buffers(struct tx_ring *txr)
 2967 {
 2968         struct adapter *adapter = txr->adapter;
 2969         struct ixgbe_tx_buf *tx_buffer;
 2970         int             i;
 2971 
 2972         INIT_DEBUGOUT("free_transmit_ring: begin");
 2973 
 2974         if (txr->tx_buffers == NULL)
 2975                 return;
 2976 
 2977         tx_buffer = txr->tx_buffers;
 2978         for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
 2979                 if (tx_buffer->m_head != NULL) {
 2980                         bus_dmamap_sync(txr->txtag, tx_buffer->map,
 2981                             BUS_DMASYNC_POSTWRITE);
 2982                         bus_dmamap_unload(txr->txtag,
 2983                             tx_buffer->map);
 2984                         m_freem(tx_buffer->m_head);
 2985                         tx_buffer->m_head = NULL;
 2986                         if (tx_buffer->map != NULL) {
 2987                                 bus_dmamap_destroy(txr->txtag,
 2988                                     tx_buffer->map);
 2989                                 tx_buffer->map = NULL;
 2990                         }
 2991                 } else if (tx_buffer->map != NULL) {
 2992                         bus_dmamap_unload(txr->txtag,
 2993                             tx_buffer->map);
 2994                         bus_dmamap_destroy(txr->txtag,
 2995                             tx_buffer->map);
 2996                         tx_buffer->map = NULL;
 2997                 }
 2998         }
 2999 #if __FreeBSD_version >= 800000
 3000         if (txr->br != NULL)
 3001                 buf_ring_free(txr->br, M_DEVBUF);
 3002 #endif
 3003         if (txr->tx_buffers != NULL) {
 3004                 free(txr->tx_buffers, M_DEVBUF);
 3005                 txr->tx_buffers = NULL;
 3006         }
 3007         if (txr->txtag != NULL) {
 3008                 bus_dma_tag_destroy(txr->txtag);
 3009                 txr->txtag = NULL;
 3010         }
 3011         return;
 3012 }
 3013 
 3014 /*********************************************************************
 3015  *
 3016  *  Advanced Context Descriptor setup for VLAN or CSUM
 3017  *
 3018  **********************************************************************/
 3019 
 3020 static boolean_t
 3021 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
 3022 {
 3023         struct adapter *adapter = txr->adapter;
 3024         struct ixgbe_adv_tx_context_desc *TXD;
 3025         struct ixgbe_tx_buf        *tx_buffer;
 3026         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 3027         struct ether_vlan_header *eh;
 3028         struct ip *ip;
 3029         struct ip6_hdr *ip6;
 3030         int  ehdrlen, ip_hlen = 0;
 3031         u16     etype;
 3032         u8      ipproto = 0;
 3033         bool    offload = TRUE;
 3034         int ctxd = txr->next_avail_desc;
 3035         u16 vtag = 0;
 3036 
 3037 
 3038         if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
 3039                 offload = FALSE;
 3040 
 3041         tx_buffer = &txr->tx_buffers[ctxd];
 3042         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
 3043 
 3044         /*
 3045         ** In advanced descriptors the vlan tag must 
 3046         ** be placed into the descriptor itself.
 3047         */
 3048         if (mp->m_flags & M_VLANTAG) {
 3049                 vtag = htole16(mp->m_pkthdr.ether_vtag);
 3050                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
 3051         } else if (offload == FALSE)
 3052                 return FALSE;
 3053 
 3054         /*
 3055          * Determine where frame payload starts.
 3056          * Jump over vlan headers if already present,
 3057          * helpful for QinQ too.
 3058          */
 3059         eh = mtod(mp, struct ether_vlan_header *);
 3060         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 3061                 etype = ntohs(eh->evl_proto);
 3062                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3063         } else {
 3064                 etype = ntohs(eh->evl_encap_proto);
 3065                 ehdrlen = ETHER_HDR_LEN;
 3066         }
 3067 
 3068         /* Set the ether header length */
 3069         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
 3070 
 3071         switch (etype) {
 3072                 case ETHERTYPE_IP:
 3073                         ip = (struct ip *)(mp->m_data + ehdrlen);
 3074                         ip_hlen = ip->ip_hl << 2;
 3075                         ipproto = ip->ip_p;
 3076                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 3077                         break;
 3078                 case ETHERTYPE_IPV6:
 3079                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 3080                         ip_hlen = sizeof(struct ip6_hdr);
 3081                         ipproto = ip6->ip6_nxt;
 3082                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
 3083                         break;
 3084                 default:
 3085                         offload = FALSE;
 3086                         break;
 3087         }
 3088 
 3089         vlan_macip_lens |= ip_hlen;
 3090         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 3091 
 3092         switch (ipproto) {
 3093                 case IPPROTO_TCP:
 3094                         if (mp->m_pkthdr.csum_flags & CSUM_TCP)
 3095                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 3096                         break;
 3097 
 3098                 case IPPROTO_UDP:
 3099                         if (mp->m_pkthdr.csum_flags & CSUM_UDP)
 3100                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
 3101                         break;
 3102 
 3103 #if __FreeBSD_version >= 800000
 3104                 case IPPROTO_SCTP:
 3105                         if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
 3106                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
 3107                         break;
 3108 #endif
 3109                 default:
 3110                         offload = FALSE;
 3111                         break;
 3112         }
 3113 
 3114         /* Now copy bits into descriptor */
 3115         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
 3116         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
 3117         TXD->seqnum_seed = htole32(0);
 3118         TXD->mss_l4len_idx = htole32(0);
 3119 
 3120         tx_buffer->m_head = NULL;
 3121         tx_buffer->eop_index = -1;
 3122 
 3123         /* We've consumed the first desc, adjust counters */
 3124         if (++ctxd == adapter->num_tx_desc)
 3125                 ctxd = 0;
 3126         txr->next_avail_desc = ctxd;
 3127         --txr->tx_avail;
 3128 
 3129         return (offload);
 3130 }
 3131 
 3132 /**********************************************************************
 3133  *
 3134  *  Setup work for hardware segmentation offload (TSO) on
 3135  *  adapters using advanced tx descriptors
 3136  *
 3137  **********************************************************************/
 3138 static boolean_t
 3139 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
 3140 {
 3141         struct adapter *adapter = txr->adapter;
 3142         struct ixgbe_adv_tx_context_desc *TXD;
 3143         struct ixgbe_tx_buf        *tx_buffer;
 3144         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 3145         u32 mss_l4len_idx = 0;
 3146         u16 vtag = 0;
 3147         int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
 3148         struct ether_vlan_header *eh;
 3149         struct ip *ip;
 3150         struct tcphdr *th;
 3151 
 3152 
 3153         /*
 3154          * Determine where frame payload starts.
 3155          * Jump over vlan headers if already present
 3156          */
 3157         eh = mtod(mp, struct ether_vlan_header *);
 3158         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 
 3159                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3160         else
 3161                 ehdrlen = ETHER_HDR_LEN;
 3162 
 3163         /* Ensure we have at least the IP+TCP header in the first mbuf. */
 3164         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
 3165                 return FALSE;
 3166 
 3167         ctxd = txr->next_avail_desc;
 3168         tx_buffer = &txr->tx_buffers[ctxd];
 3169         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
 3170 
 3171         ip = (struct ip *)(mp->m_data + ehdrlen);
 3172         if (ip->ip_p != IPPROTO_TCP)
 3173                 return FALSE;   /* 0 */
 3174         ip->ip_sum = 0;
 3175         ip_hlen = ip->ip_hl << 2;
 3176         th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
 3177         th->th_sum = in_pseudo(ip->ip_src.s_addr,
 3178             ip->ip_dst.s_addr, htons(IPPROTO_TCP));
 3179         tcp_hlen = th->th_off << 2;
 3180         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
 3181 
 3182         /* This is used in the transmit desc in encap */
 3183         *paylen = mp->m_pkthdr.len - hdrlen;
 3184 
 3185         /* VLAN MACLEN IPLEN */
 3186         if (mp->m_flags & M_VLANTAG) {
 3187                 vtag = htole16(mp->m_pkthdr.ether_vtag);
 3188                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
 3189         }
 3190 
 3191         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
 3192         vlan_macip_lens |= ip_hlen;
 3193         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
 3194 
 3195         /* ADV DTYPE TUCMD */
 3196         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 3197         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 3198         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 3199         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
 3200 
 3201 
 3202         /* MSS L4LEN IDX */
 3203         mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
 3204         mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
 3205         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
 3206 
 3207         TXD->seqnum_seed = htole32(0);
 3208         tx_buffer->m_head = NULL;
 3209         tx_buffer->eop_index = -1;
 3210 
 3211         if (++ctxd == adapter->num_tx_desc)
 3212                 ctxd = 0;
 3213 
 3214         txr->tx_avail--;
 3215         txr->next_avail_desc = ctxd;
 3216         return TRUE;
 3217 }
 3218 
 3219 #ifdef IXGBE_FDIR
 3220 /*
 3221 ** This routine parses packet headers so that Flow
 3222 ** Director can make a hashed filter table entry 
 3223 ** allowing traffic flows to be identified and kept
 3224 ** on the same cpu.  This would be a performance
 3225 ** hit, but we only do it at IXGBE_FDIR_RATE of
 3226 ** packets.
 3227 */
 3228 static void
 3229 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
 3230 {
 3231         struct adapter                  *adapter = txr->adapter;
 3232         struct ix_queue                 *que;
 3233         struct ip                       *ip;
 3234         struct tcphdr                   *th;
 3235         struct udphdr                   *uh;
 3236         struct ether_vlan_header        *eh;
 3237         union ixgbe_atr_hash_dword      input = {.dword = 0}; 
 3238         union ixgbe_atr_hash_dword      common = {.dword = 0}; 
 3239         int                             ehdrlen, ip_hlen;
 3240         u16                             etype;
 3241 
 3242         eh = mtod(mp, struct ether_vlan_header *);
 3243         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 3244                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3245                 etype = eh->evl_proto;
 3246         } else {
 3247                 ehdrlen = ETHER_HDR_LEN;
 3248                 etype = eh->evl_encap_proto;
 3249         }
 3250 
 3251         /* Only handling IPv4 */
 3252         if (etype != htons(ETHERTYPE_IP))
 3253                 return;
 3254 
 3255         ip = (struct ip *)(mp->m_data + ehdrlen);
 3256         ip_hlen = ip->ip_hl << 2;
 3257 
 3258         /* check if we're UDP or TCP */
 3259         switch (ip->ip_p) {
 3260         case IPPROTO_TCP:
 3261                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
 3262                 /* src and dst are inverted */
 3263                 common.port.dst ^= th->th_sport;
 3264                 common.port.src ^= th->th_dport;
 3265                 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
 3266                 break;
 3267         case IPPROTO_UDP:
 3268                 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
 3269                 /* src and dst are inverted */
 3270                 common.port.dst ^= uh->uh_sport;
 3271                 common.port.src ^= uh->uh_dport;
 3272                 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
 3273                 break;
 3274         default:
 3275                 return;
 3276         }
 3277 
 3278         input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
 3279         if (mp->m_pkthdr.ether_vtag)
 3280                 common.flex_bytes ^= htons(ETHERTYPE_VLAN);
 3281         else
 3282                 common.flex_bytes ^= etype;
 3283         common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
 3284 
 3285         que = &adapter->queues[txr->me];
 3286         /*
 3287         ** This assumes the Rx queue and Tx
 3288         ** queue are bound to the same CPU
 3289         */
 3290         ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
 3291             input, common, que->msix);
 3292 }
 3293 #endif /* IXGBE_FDIR */
 3294 
 3295 /**********************************************************************
 3296  *
 3297  *  Examine each tx_buffer in the used queue. If the hardware is done
 3298  *  processing the packet then free associated resources. The
 3299  *  tx_buffer is put back on the free queue.
 3300  *
 3301  **********************************************************************/
 3302 static boolean_t
 3303 ixgbe_txeof(struct tx_ring *txr)
 3304 {
 3305         struct adapter  *adapter = txr->adapter;
 3306         struct ifnet    *ifp = adapter->ifp;
 3307         u32     first, last, done, processed;
 3308         struct ixgbe_tx_buf *tx_buffer;
 3309         struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
 3310 
 3311         mtx_assert(&txr->tx_mtx, MA_OWNED);
 3312 
 3313         if (txr->tx_avail == adapter->num_tx_desc) {
 3314                 txr->queue_status = IXGBE_QUEUE_IDLE;
 3315                 return FALSE;
 3316         }
 3317 
 3318         processed = 0;
 3319         first = txr->next_to_clean;
 3320         tx_buffer = &txr->tx_buffers[first];
 3321         /* For cleanup we just use legacy struct */
 3322         tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
 3323         last = tx_buffer->eop_index;
 3324         if (last == -1)
 3325                 return FALSE;
 3326         eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
 3327 
 3328         /*
 3329         ** Get the index of the first descriptor
 3330         ** BEYOND the EOP and call that 'done'.
 3331         ** I do this so the comparison in the
 3332         ** inner while loop below can be simple
 3333         */
 3334         if (++last == adapter->num_tx_desc) last = 0;
 3335         done = last;
 3336 
 3337         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 3338             BUS_DMASYNC_POSTREAD);
 3339         /*
 3340         ** Only the EOP descriptor of a packet now has the DD
 3341         ** bit set, this is what we look for...
 3342         */
 3343         while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
 3344                 /* We clean the range of the packet */
 3345                 while (first != done) {
 3346                         tx_desc->upper.data = 0;
 3347                         tx_desc->lower.data = 0;
 3348                         tx_desc->buffer_addr = 0;
 3349                         ++txr->tx_avail;
 3350                         ++processed;
 3351 
 3352                         if (tx_buffer->m_head) {
 3353                                 txr->bytes +=
 3354                                     tx_buffer->m_head->m_pkthdr.len;
 3355                                 bus_dmamap_sync(txr->txtag,
 3356                                     tx_buffer->map,
 3357                                     BUS_DMASYNC_POSTWRITE);
 3358                                 bus_dmamap_unload(txr->txtag,
 3359                                     tx_buffer->map);
 3360                                 m_freem(tx_buffer->m_head);
 3361                                 tx_buffer->m_head = NULL;
 3362                                 tx_buffer->map = NULL;
 3363                         }
 3364                         tx_buffer->eop_index = -1;
 3365                         txr->watchdog_time = ticks;
 3366 
 3367                         if (++first == adapter->num_tx_desc)
 3368                                 first = 0;
 3369 
 3370                         tx_buffer = &txr->tx_buffers[first];
 3371                         tx_desc =
 3372                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
 3373                 }
 3374                 ++txr->packets;
 3375                 ++ifp->if_opackets;
 3376                 /* See if there is more work now */
 3377                 last = tx_buffer->eop_index;
 3378                 if (last != -1) {
 3379                         eop_desc =
 3380                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
 3381                         /* Get next done point */
 3382                         if (++last == adapter->num_tx_desc) last = 0;
 3383                         done = last;
 3384                 } else
 3385                         break;
 3386         }
 3387         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 3388             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3389 
 3390         txr->next_to_clean = first;
 3391 
 3392         /*
 3393         ** Watchdog calculation, we know there's
 3394         ** work outstanding or the first return
 3395         ** would have been taken, so none processed
 3396         ** for too long indicates a hang.
 3397         */
 3398         if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
 3399                 txr->queue_status = IXGBE_QUEUE_HUNG;
 3400 
 3401         /*
 3402          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
 3403          * it is OK to send packets. If there are no pending descriptors,
 3404          * clear the timeout. Otherwise, if some descriptors have been freed,
 3405          * restart the timeout.
 3406          */
 3407         if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
 3408                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3409                 if (txr->tx_avail == adapter->num_tx_desc) {
 3410                         txr->queue_status = IXGBE_QUEUE_IDLE;
 3411                         return FALSE;
 3412                 }
 3413         }
 3414 
 3415         return TRUE;
 3416 }
 3417 
 3418 /*********************************************************************
 3419  *
 3420  *  Refresh mbuf buffers for RX descriptor rings
 3421  *   - now keeps its own state so discards due to resource
 3422  *     exhaustion are unnecessary, if an mbuf cannot be obtained
 3423  *     it just returns, keeping its placeholder, thus it can simply
 3424  *     be recalled to try again.
 3425  *
 3426  **********************************************************************/
 3427 static void
 3428 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
 3429 {
 3430         struct adapter          *adapter = rxr->adapter;
 3431         bus_dma_segment_t       hseg[1];
 3432         bus_dma_segment_t       pseg[1];
 3433         struct ixgbe_rx_buf     *rxbuf;
 3434         struct mbuf             *mh, *mp;
 3435         int                     i, j, nsegs, error;
 3436         bool                    refreshed = FALSE;
 3437 
 3438         i = j = rxr->next_to_refresh;
 3439         /* Control the loop with one beyond */
 3440         if (++j == adapter->num_rx_desc)
 3441                 j = 0;
 3442 
 3443         while (j != limit) {
 3444                 rxbuf = &rxr->rx_buffers[i];
 3445                 if (rxr->hdr_split == FALSE)
 3446                         goto no_split;
 3447 
 3448                 if (rxbuf->m_head == NULL) {
 3449                         mh = m_gethdr(M_DONTWAIT, MT_DATA);
 3450                         if (mh == NULL)
 3451                                 goto update;
 3452                 } else
 3453                         mh = rxbuf->m_head;
 3454 
 3455                 mh->m_pkthdr.len = mh->m_len = MHLEN;
 3456                 mh->m_len = MHLEN;
 3457                 mh->m_flags |= M_PKTHDR;
 3458                 /* Get the memory mapping */
 3459                 error = bus_dmamap_load_mbuf_sg(rxr->htag,
 3460                     rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
 3461                 if (error != 0) {
 3462                         printf("Refresh mbufs: hdr dmamap load"
 3463                             " failure - %d\n", error);
 3464                         m_free(mh);
 3465                         rxbuf->m_head = NULL;
 3466                         goto update;
 3467                 }
 3468                 rxbuf->m_head = mh;
 3469                 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
 3470                     BUS_DMASYNC_PREREAD);
 3471                 rxr->rx_base[i].read.hdr_addr =
 3472                     htole64(hseg[0].ds_addr);
 3473 
 3474 no_split:
 3475                 if (rxbuf->m_pack == NULL) {
 3476                         mp = m_getjcl(M_DONTWAIT, MT_DATA,
 3477                             M_PKTHDR, adapter->rx_mbuf_sz);
 3478                         if (mp == NULL)
 3479                                 goto update;
 3480                 } else
 3481                         mp = rxbuf->m_pack;
 3482 
 3483                 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
 3484                 /* Get the memory mapping */
 3485                 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
 3486                     rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
 3487                 if (error != 0) {
 3488                         printf("Refresh mbufs: payload dmamap load"
 3489                             " failure - %d\n", error);
 3490                         m_free(mp);
 3491                         rxbuf->m_pack = NULL;
 3492                         goto update;
 3493                 }
 3494                 rxbuf->m_pack = mp;
 3495                 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 3496                     BUS_DMASYNC_PREREAD);
 3497                 rxr->rx_base[i].read.pkt_addr =
 3498                     htole64(pseg[0].ds_addr);
 3499 
 3500                 refreshed = TRUE;
 3501                 /* Next is precalculated */
 3502                 i = j;
 3503                 rxr->next_to_refresh = i;
 3504                 if (++j == adapter->num_rx_desc)
 3505                         j = 0;
 3506         }
 3507 update:
 3508         if (refreshed) /* Update hardware tail index */
 3509                 IXGBE_WRITE_REG(&adapter->hw,
 3510                     IXGBE_RDT(rxr->me), rxr->next_to_refresh);
 3511         return;
 3512 }
 3513 
 3514 /*********************************************************************
 3515  *
 3516  *  Allocate memory for rx_buffer structures. Since we use one
 3517  *  rx_buffer per received packet, the maximum number of rx_buffer's
 3518  *  that we'll need is equal to the number of receive descriptors
 3519  *  that we've allocated.
 3520  *
 3521  **********************************************************************/
 3522 static int
 3523 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
 3524 {
 3525         struct  adapter         *adapter = rxr->adapter;
 3526         device_t                dev = adapter->dev;
 3527         struct ixgbe_rx_buf     *rxbuf;
 3528         int                     i, bsize, error;
 3529 
 3530         bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
 3531         if (!(rxr->rx_buffers =
 3532             (struct ixgbe_rx_buf *) malloc(bsize,
 3533             M_DEVBUF, M_NOWAIT | M_ZERO))) {
 3534                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
 3535                 error = ENOMEM;
 3536                 goto fail;
 3537         }
 3538 
 3539         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
 3540                                    1, 0,        /* alignment, bounds */
 3541                                    BUS_SPACE_MAXADDR,   /* lowaddr */
 3542                                    BUS_SPACE_MAXADDR,   /* highaddr */
 3543                                    NULL, NULL,          /* filter, filterarg */
 3544                                    MSIZE,               /* maxsize */
 3545                                    1,                   /* nsegments */
 3546                                    MSIZE,               /* maxsegsize */
 3547                                    0,                   /* flags */
 3548                                    NULL,                /* lockfunc */
 3549                                    NULL,                /* lockfuncarg */
 3550                                    &rxr->htag))) {
 3551                 device_printf(dev, "Unable to create RX DMA tag\n");
 3552                 goto fail;
 3553         }
 3554 
 3555         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
 3556                                    1, 0,        /* alignment, bounds */
 3557                                    BUS_SPACE_MAXADDR,   /* lowaddr */
 3558                                    BUS_SPACE_MAXADDR,   /* highaddr */
 3559                                    NULL, NULL,          /* filter, filterarg */
 3560                                    MJUM16BYTES,         /* maxsize */
 3561                                    1,                   /* nsegments */
 3562                                    MJUM16BYTES,         /* maxsegsize */
 3563                                    0,                   /* flags */
 3564                                    NULL,                /* lockfunc */
 3565                                    NULL,                /* lockfuncarg */
 3566                                    &rxr->ptag))) {
 3567                 device_printf(dev, "Unable to create RX DMA tag\n");
 3568                 goto fail;
 3569         }
 3570 
 3571         for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
 3572                 rxbuf = &rxr->rx_buffers[i];
 3573                 error = bus_dmamap_create(rxr->htag,
 3574                     BUS_DMA_NOWAIT, &rxbuf->hmap);
 3575                 if (error) {
 3576                         device_printf(dev, "Unable to create RX head map\n");
 3577                         goto fail;
 3578                 }
 3579                 error = bus_dmamap_create(rxr->ptag,
 3580                     BUS_DMA_NOWAIT, &rxbuf->pmap);
 3581                 if (error) {
 3582                         device_printf(dev, "Unable to create RX pkt map\n");
 3583                         goto fail;
 3584                 }
 3585         }
 3586 
 3587         return (0);
 3588 
 3589 fail:
 3590         /* Frees all, but can handle partial completion */
 3591         ixgbe_free_receive_structures(adapter);
 3592         return (error);
 3593 }
 3594 
 3595 /*
 3596 ** Used to detect a descriptor that has
 3597 ** been merged by Hardware RSC.
 3598 */
 3599 static inline u32
 3600 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
 3601 {
 3602         return (le32toh(rx->wb.lower.lo_dword.data) &
 3603             IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
 3604 }
 3605 
 3606 /*********************************************************************
 3607  *
 3608  *  Initialize Hardware RSC (LRO) feature on 82599
 3609  *  for an RX ring, this is toggled by the LRO capability
 3610  *  even though it is transparent to the stack.
 3611  *
 3612  **********************************************************************/
 3613 static void
 3614 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
 3615 {
 3616         struct  adapter         *adapter = rxr->adapter;
 3617         struct  ixgbe_hw        *hw = &adapter->hw;
 3618         u32                     rscctrl, rdrxctl;
 3619 
 3620         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 3621         rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
 3622         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
 3623         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
 3624         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
 3625 
 3626         rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
 3627         rscctrl |= IXGBE_RSCCTL_RSCEN;
 3628         /*
 3629         ** Limit the total number of descriptors that
 3630         ** can be combined, so it does not exceed 64K
 3631         */
 3632         if (adapter->rx_mbuf_sz == MCLBYTES)
 3633                 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
 3634         else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
 3635                 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
 3636         else if (adapter->rx_mbuf_sz == MJUM9BYTES)
 3637                 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
 3638         else  /* Using 16K cluster */
 3639                 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
 3640 
 3641         IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
 3642 
 3643         /* Enable TCP header recognition */
 3644         IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
 3645             (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
 3646             IXGBE_PSRTYPE_TCPHDR));
 3647 
 3648         /* Disable RSC for ACK packets */
 3649         IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
 3650             (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
 3651 
 3652         rxr->hw_rsc = TRUE;
 3653 }
 3654 
 3655 
 3656 static void     
 3657 ixgbe_free_receive_ring(struct rx_ring *rxr)
 3658 { 
 3659         struct  adapter         *adapter;
 3660         struct ixgbe_rx_buf       *rxbuf;
 3661         int i;
 3662 
 3663         adapter = rxr->adapter;
 3664         for (i = 0; i < adapter->num_rx_desc; i++) {
 3665                 rxbuf = &rxr->rx_buffers[i];
 3666                 if (rxbuf->m_head != NULL) {
 3667                         bus_dmamap_sync(rxr->htag, rxbuf->hmap,
 3668                             BUS_DMASYNC_POSTREAD);
 3669                         bus_dmamap_unload(rxr->htag, rxbuf->hmap);
 3670                         rxbuf->m_head->m_flags |= M_PKTHDR;
 3671                         m_freem(rxbuf->m_head);
 3672                 }
 3673                 if (rxbuf->m_pack != NULL) {
 3674                         bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 3675                             BUS_DMASYNC_POSTREAD);
 3676                         bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
 3677                         rxbuf->m_pack->m_flags |= M_PKTHDR;
 3678                         m_freem(rxbuf->m_pack);
 3679                 }
 3680                 rxbuf->m_head = NULL;
 3681                 rxbuf->m_pack = NULL;
 3682         }
 3683 }
 3684 
 3685 
 3686 /*********************************************************************
 3687  *
 3688  *  Initialize a receive ring and its buffers.
 3689  *
 3690  **********************************************************************/
 3691 static int
 3692 ixgbe_setup_receive_ring(struct rx_ring *rxr)
 3693 {
 3694         struct  adapter         *adapter;
 3695         struct ifnet            *ifp;
 3696         device_t                dev;
 3697         struct ixgbe_rx_buf     *rxbuf;
 3698         bus_dma_segment_t       pseg[1], hseg[1];
 3699         struct lro_ctrl         *lro = &rxr->lro;
 3700         int                     rsize, nsegs, error = 0;
 3701 
 3702         adapter = rxr->adapter;
 3703         ifp = adapter->ifp;
 3704         dev = adapter->dev;
 3705 
 3706         /* Clear the ring contents */
 3707         IXGBE_RX_LOCK(rxr);
 3708         rsize = roundup2(adapter->num_rx_desc *
 3709             sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
 3710         bzero((void *)rxr->rx_base, rsize);
 3711 
 3712         /* Free current RX buffer structs and their mbufs */
 3713         ixgbe_free_receive_ring(rxr);
 3714 
 3715         /* Configure header split? */
 3716         if (ixgbe_header_split)
 3717                 rxr->hdr_split = TRUE;
 3718 
 3719         /* Now replenish the mbufs */
 3720         for (int j = 0; j != adapter->num_rx_desc; ++j) {
 3721                 struct mbuf     *mh, *mp;
 3722 
 3723                 rxbuf = &rxr->rx_buffers[j];
 3724                 /*
 3725                 ** Don't allocate mbufs if not
 3726                 ** doing header split, its wasteful
 3727                 */ 
 3728                 if (rxr->hdr_split == FALSE)
 3729                         goto skip_head;
 3730 
 3731                 /* First the header */
 3732                 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
 3733                 if (rxbuf->m_head == NULL) {
 3734                         error = ENOBUFS;
 3735                         goto fail;
 3736                 }
 3737                 m_adj(rxbuf->m_head, ETHER_ALIGN);
 3738                 mh = rxbuf->m_head;
 3739                 mh->m_len = mh->m_pkthdr.len = MHLEN;
 3740                 mh->m_flags |= M_PKTHDR;
 3741                 /* Get the memory mapping */
 3742                 error = bus_dmamap_load_mbuf_sg(rxr->htag,
 3743                     rxbuf->hmap, rxbuf->m_head, hseg,
 3744                     &nsegs, BUS_DMA_NOWAIT);
 3745                 if (error != 0) /* Nothing elegant to do here */
 3746                         goto fail;
 3747                 bus_dmamap_sync(rxr->htag,
 3748                     rxbuf->hmap, BUS_DMASYNC_PREREAD);
 3749                 /* Update descriptor */
 3750                 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
 3751 
 3752 skip_head:
 3753                 /* Now the payload cluster */
 3754                 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
 3755                     M_PKTHDR, adapter->rx_mbuf_sz);
 3756                 if (rxbuf->m_pack == NULL) {
 3757                         error = ENOBUFS;
 3758                         goto fail;
 3759                 }
 3760                 mp = rxbuf->m_pack;
 3761                 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
 3762                 /* Get the memory mapping */
 3763                 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
 3764                     rxbuf->pmap, mp, pseg,
 3765                     &nsegs, BUS_DMA_NOWAIT);
 3766                 if (error != 0)
 3767                         goto fail;
 3768                 bus_dmamap_sync(rxr->ptag,
 3769                     rxbuf->pmap, BUS_DMASYNC_PREREAD);
 3770                 /* Update descriptor */
 3771                 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
 3772         }
 3773 
 3774 
 3775         /* Setup our descriptor indices */
 3776         rxr->next_to_check = 0;
 3777         rxr->next_to_refresh = 0;
 3778         rxr->lro_enabled = FALSE;
 3779         rxr->rx_split_packets = 0;
 3780         rxr->rx_bytes = 0;
 3781         rxr->discard = FALSE;
 3782 
 3783         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 3784             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3785 
 3786         /*
 3787         ** Now set up the LRO interface:
 3788         ** 82598 uses software LRO, the
 3789         ** 82599 uses a hardware assist.
 3790         */
 3791         if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
 3792             (ifp->if_capenable & IFCAP_RXCSUM) &&
 3793             (ifp->if_capenable & IFCAP_LRO))
 3794                 ixgbe_setup_hw_rsc(rxr);
 3795         else if (ifp->if_capenable & IFCAP_LRO) {
 3796                 int err = tcp_lro_init(lro);
 3797                 if (err) {
 3798                         device_printf(dev, "LRO Initialization failed!\n");
 3799                         goto fail;
 3800                 }
 3801                 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
 3802                 rxr->lro_enabled = TRUE;
 3803                 lro->ifp = adapter->ifp;
 3804         }
 3805 
 3806         IXGBE_RX_UNLOCK(rxr);
 3807         return (0);
 3808 
 3809 fail:
 3810         ixgbe_free_receive_ring(rxr);
 3811         IXGBE_RX_UNLOCK(rxr);
 3812         return (error);
 3813 }
 3814 
 3815 /*********************************************************************
 3816  *
 3817  *  Initialize all receive rings.
 3818  *
 3819  **********************************************************************/
 3820 static int
 3821 ixgbe_setup_receive_structures(struct adapter *adapter)
 3822 {
 3823         struct rx_ring *rxr = adapter->rx_rings;
 3824         int j;
 3825 
 3826         for (j = 0; j < adapter->num_queues; j++, rxr++)
 3827                 if (ixgbe_setup_receive_ring(rxr))
 3828                         goto fail;
 3829 
 3830         return (0);
 3831 fail:
 3832         /*
 3833          * Free RX buffers allocated so far, we will only handle
 3834          * the rings that completed, the failing case will have
 3835          * cleaned up for itself. 'j' failed, so its the terminus.
 3836          */
 3837         for (int i = 0; i < j; ++i) {
 3838                 rxr = &adapter->rx_rings[i];
 3839                 ixgbe_free_receive_ring(rxr);
 3840         }
 3841 
 3842         return (ENOBUFS);
 3843 }
 3844 
 3845 /*********************************************************************
 3846  *
 3847  *  Setup receive registers and features.
 3848  *
 3849  **********************************************************************/
 3850 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 3851 
 3852 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
 3853         
 3854 static void
 3855 ixgbe_initialize_receive_units(struct adapter *adapter)
 3856 {
 3857         struct  rx_ring *rxr = adapter->rx_rings;
 3858         struct ixgbe_hw *hw = &adapter->hw;
 3859         struct ifnet   *ifp = adapter->ifp;
 3860         u32             bufsz, rxctrl, fctrl, srrctl, rxcsum;
 3861         u32             reta, mrqc = 0, hlreg, random[10];
 3862 
 3863 
 3864         /*
 3865          * Make sure receives are disabled while
 3866          * setting up the descriptor ring
 3867          */
 3868         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 3869         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
 3870             rxctrl & ~IXGBE_RXCTRL_RXEN);
 3871 
 3872         /* Enable broadcasts */
 3873         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 3874         fctrl |= IXGBE_FCTRL_BAM;
 3875         fctrl |= IXGBE_FCTRL_DPF;
 3876         fctrl |= IXGBE_FCTRL_PMCF;
 3877         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 3878 
 3879         /* Set for Jumbo Frames? */
 3880         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 3881         if (ifp->if_mtu > ETHERMTU)
 3882                 hlreg |= IXGBE_HLREG0_JUMBOEN;
 3883         else
 3884                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
 3885         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
 3886 
 3887         bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 3888 
 3889         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 3890                 u64 rdba = rxr->rxdma.dma_paddr;
 3891 
 3892                 /* Setup the Base and Length of the Rx Descriptor Ring */
 3893                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
 3894                                (rdba & 0x00000000ffffffffULL));
 3895                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
 3896                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
 3897                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
 3898 
 3899                 /* Set up the SRRCTL register */
 3900                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
 3901                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 3902                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 3903                 srrctl |= bufsz;
 3904                 if (rxr->hdr_split) {
 3905                         /* Use a standard mbuf for the header */
 3906                         srrctl |= ((IXGBE_RX_HDR <<
 3907                             IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
 3908                             & IXGBE_SRRCTL_BSIZEHDR_MASK);
 3909                         srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
 3910                 } else
 3911                         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 3912                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
 3913 
 3914                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
 3915                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
 3916                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
 3917         }
 3918 
 3919         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
 3920                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
 3921                               IXGBE_PSRTYPE_UDPHDR |
 3922                               IXGBE_PSRTYPE_IPV4HDR |
 3923                               IXGBE_PSRTYPE_IPV6HDR;
 3924                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
 3925         }
 3926 
 3927         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 3928 
 3929         /* Setup RSS */
 3930         if (adapter->num_queues > 1) {
 3931                 int i, j;
 3932                 reta = 0;
 3933 
 3934                 /* set up random bits */
 3935                 arc4rand(&random, sizeof(random), 0);
 3936 
 3937                 /* Set up the redirection table */
 3938                 for (i = 0, j = 0; i < 128; i++, j++) {
 3939                         if (j == adapter->num_queues) j = 0;
 3940                         reta = (reta << 8) | (j * 0x11);
 3941                         if ((i & 3) == 3)
 3942                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
 3943                 }
 3944 
 3945                 /* Now fill our hash function seeds */
 3946                 for (int i = 0; i < 10; i++)
 3947                         IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
 3948 
 3949                 /* Perform hash on these packet types */
 3950                 mrqc = IXGBE_MRQC_RSSEN
 3951                      | IXGBE_MRQC_RSS_FIELD_IPV4
 3952                      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
 3953                      | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
 3954                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
 3955                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX
 3956                      | IXGBE_MRQC_RSS_FIELD_IPV6
 3957                      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
 3958                      | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
 3959                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 3960                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 3961 
 3962                 /* RSS and RX IPP Checksum are mutually exclusive */
 3963                 rxcsum |= IXGBE_RXCSUM_PCSD;
 3964         }
 3965 
 3966         if (ifp->if_capenable & IFCAP_RXCSUM)
 3967                 rxcsum |= IXGBE_RXCSUM_PCSD;
 3968 
 3969         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
 3970                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
 3971 
 3972         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 3973 
 3974         return;
 3975 }
 3976 
 3977 /*********************************************************************
 3978  *
 3979  *  Free all receive rings.
 3980  *
 3981  **********************************************************************/
 3982 static void
 3983 ixgbe_free_receive_structures(struct adapter *adapter)
 3984 {
 3985         struct rx_ring *rxr = adapter->rx_rings;
 3986 
 3987         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 3988                 struct lro_ctrl         *lro = &rxr->lro;
 3989                 ixgbe_free_receive_buffers(rxr);
 3990                 /* Free LRO memory */
 3991                 tcp_lro_free(lro);
 3992                 /* Free the ring memory as well */
 3993                 ixgbe_dma_free(adapter, &rxr->rxdma);
 3994         }
 3995 
 3996         free(adapter->rx_rings, M_DEVBUF);
 3997 }
 3998 
 3999 
 4000 /*********************************************************************
 4001  *
 4002  *  Free receive ring data structures
 4003  *
 4004  **********************************************************************/
 4005 static void
 4006 ixgbe_free_receive_buffers(struct rx_ring *rxr)
 4007 {
 4008         struct adapter          *adapter = rxr->adapter;
 4009         struct ixgbe_rx_buf     *rxbuf;
 4010 
 4011         INIT_DEBUGOUT("free_receive_structures: begin");
 4012 
 4013         /* Cleanup any existing buffers */
 4014         if (rxr->rx_buffers != NULL) {
 4015                 for (int i = 0; i < adapter->num_rx_desc; i++) {
 4016                         rxbuf = &rxr->rx_buffers[i];
 4017                         if (rxbuf->m_head != NULL) {
 4018                                 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
 4019                                     BUS_DMASYNC_POSTREAD);
 4020                                 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
 4021                                 rxbuf->m_head->m_flags |= M_PKTHDR;
 4022                                 m_freem(rxbuf->m_head);
 4023                         }
 4024                         if (rxbuf->m_pack != NULL) {
 4025                                 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 4026                                     BUS_DMASYNC_POSTREAD);
 4027                                 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
 4028                                 rxbuf->m_pack->m_flags |= M_PKTHDR;
 4029                                 m_freem(rxbuf->m_pack);
 4030                         }
 4031                         rxbuf->m_head = NULL;
 4032                         rxbuf->m_pack = NULL;
 4033                         if (rxbuf->hmap != NULL) {
 4034                                 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
 4035                                 rxbuf->hmap = NULL;
 4036                         }
 4037                         if (rxbuf->pmap != NULL) {
 4038                                 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
 4039                                 rxbuf->pmap = NULL;
 4040                         }
 4041                 }
 4042                 if (rxr->rx_buffers != NULL) {
 4043                         free(rxr->rx_buffers, M_DEVBUF);
 4044                         rxr->rx_buffers = NULL;
 4045                 }
 4046         }
 4047 
 4048         if (rxr->htag != NULL) {
 4049                 bus_dma_tag_destroy(rxr->htag);
 4050                 rxr->htag = NULL;
 4051         }
 4052         if (rxr->ptag != NULL) {
 4053                 bus_dma_tag_destroy(rxr->ptag);
 4054                 rxr->ptag = NULL;
 4055         }
 4056 
 4057         return;
 4058 }
 4059 
 4060 static __inline void
 4061 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
 4062 {
 4063                  
 4064         /*
 4065          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
 4066          * should be computed by hardware. Also it should not have VLAN tag in
 4067          * ethernet header.
 4068          */
 4069         if (rxr->lro_enabled &&
 4070             (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
 4071             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
 4072             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
 4073             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
 4074             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
 4075             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
 4076                 /*
 4077                  * Send to the stack if:
 4078                  **  - LRO not enabled, or
 4079                  **  - no LRO resources, or
 4080                  **  - lro enqueue fails
 4081                  */
 4082                 if (rxr->lro.lro_cnt != 0)
 4083                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
 4084                                 return;
 4085         }
 4086         IXGBE_RX_UNLOCK(rxr);
 4087         (*ifp->if_input)(ifp, m);
 4088         IXGBE_RX_LOCK(rxr);
 4089 }
 4090 
 4091 static __inline void
 4092 ixgbe_rx_discard(struct rx_ring *rxr, int i)
 4093 {
 4094         struct ixgbe_rx_buf     *rbuf;
 4095 
 4096         rbuf = &rxr->rx_buffers[i];
 4097 
 4098         if (rbuf->fmp != NULL) {/* Partial chain ? */
 4099                 rbuf->fmp->m_flags |= M_PKTHDR;
 4100                 m_freem(rbuf->fmp);
 4101                 rbuf->fmp = NULL;
 4102         }
 4103 
 4104         /*
 4105         ** With advanced descriptors the writeback
 4106         ** clobbers the buffer addrs, so its easier
 4107         ** to just free the existing mbufs and take
 4108         ** the normal refresh path to get new buffers
 4109         ** and mapping.
 4110         */
 4111         if (rbuf->m_head) {
 4112                 m_free(rbuf->m_head);
 4113                 rbuf->m_head = NULL;
 4114         }
 4115  
 4116         if (rbuf->m_pack) {
 4117                 m_free(rbuf->m_pack);
 4118                 rbuf->m_pack = NULL;
 4119         }
 4120 
 4121         return;
 4122 }
 4123 
 4124 
 4125 /*********************************************************************
 4126  *
 4127  *  This routine executes in interrupt context. It replenishes
 4128  *  the mbufs in the descriptor and sends data which has been
 4129  *  dma'ed into host memory to upper layer.
 4130  *
 4131  *  We loop at most count times if count is > 0, or until done if
 4132  *  count < 0.
 4133  *
 4134  *  Return TRUE for more work, FALSE for all clean.
 4135  *********************************************************************/
 4136 static bool
 4137 ixgbe_rxeof(struct ix_queue *que, int count)
 4138 {
 4139         struct adapter          *adapter = que->adapter;
 4140         struct rx_ring          *rxr = que->rxr;
 4141         struct ifnet            *ifp = adapter->ifp;
 4142         struct lro_ctrl         *lro = &rxr->lro;
 4143         struct lro_entry        *queued;
 4144         int                     i, nextp, processed = 0;
 4145         u32                     staterr = 0;
 4146         union ixgbe_adv_rx_desc *cur;
 4147         struct ixgbe_rx_buf     *rbuf, *nbuf;
 4148 
 4149         IXGBE_RX_LOCK(rxr);
 4150 
 4151         for (i = rxr->next_to_check; count != 0;) {
 4152                 struct mbuf     *sendmp, *mh, *mp;
 4153                 u32             rsc, ptype;
 4154                 u16             hlen, plen, hdr, vtag;
 4155                 bool            eop;
 4156  
 4157                 /* Sync the ring. */
 4158                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 4159                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 4160 
 4161                 cur = &rxr->rx_base[i];
 4162                 staterr = le32toh(cur->wb.upper.status_error);
 4163 
 4164                 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
 4165                         break;
 4166                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 4167                         break;
 4168 
 4169                 count--;
 4170                 sendmp = NULL;
 4171                 nbuf = NULL;
 4172                 rsc = 0;
 4173                 cur->wb.upper.status_error = 0;
 4174                 rbuf = &rxr->rx_buffers[i];
 4175                 mh = rbuf->m_head;
 4176                 mp = rbuf->m_pack;
 4177 
 4178                 plen = le16toh(cur->wb.upper.length);
 4179                 ptype = le32toh(cur->wb.lower.lo_dword.data) &
 4180                     IXGBE_RXDADV_PKTTYPE_MASK;
 4181                 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
 4182                 vtag = le16toh(cur->wb.upper.vlan);
 4183                 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
 4184 
 4185                 /* Make sure bad packets are discarded */
 4186                 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
 4187                     (rxr->discard)) {
 4188                         ifp->if_ierrors++;
 4189                         rxr->rx_discarded++;
 4190                         if (eop)
 4191                                 rxr->discard = FALSE;
 4192                         else
 4193                                 rxr->discard = TRUE;
 4194                         ixgbe_rx_discard(rxr, i);
 4195                         goto next_desc;
 4196                 }
 4197 
 4198                 /*
 4199                 ** On 82599 which supports a hardware
 4200                 ** LRO (called HW RSC), packets need
 4201                 ** not be fragmented across sequential
 4202                 ** descriptors, rather the next descriptor
 4203                 ** is indicated in bits of the descriptor.
 4204                 ** This also means that we might proceses
 4205                 ** more than one packet at a time, something
 4206                 ** that has never been true before, it
 4207                 ** required eliminating global chain pointers
 4208                 ** in favor of what we are doing here.  -jfv
 4209                 */
 4210                 if (!eop) {
 4211                         /*
 4212                         ** Figure out the next descriptor
 4213                         ** of this frame.
 4214                         */
 4215                         if (rxr->hw_rsc == TRUE) {
 4216                                 rsc = ixgbe_rsc_count(cur);
 4217                                 rxr->rsc_num += (rsc - 1);
 4218                         }
 4219                         if (rsc) { /* Get hardware index */
 4220                                 nextp = ((staterr &
 4221                                     IXGBE_RXDADV_NEXTP_MASK) >>
 4222                                     IXGBE_RXDADV_NEXTP_SHIFT);
 4223                         } else { /* Just sequential */
 4224                                 nextp = i + 1;
 4225                                 if (nextp == adapter->num_rx_desc)
 4226                                         nextp = 0;
 4227                         }
 4228                         nbuf = &rxr->rx_buffers[nextp];
 4229                         prefetch(nbuf);
 4230                 }
 4231                 /*
 4232                 ** The header mbuf is ONLY used when header 
 4233                 ** split is enabled, otherwise we get normal 
 4234                 ** behavior, ie, both header and payload
 4235                 ** are DMA'd into the payload buffer.
 4236                 **
 4237                 ** Rather than using the fmp/lmp global pointers
 4238                 ** we now keep the head of a packet chain in the
 4239                 ** buffer struct and pass this along from one
 4240                 ** descriptor to the next, until we get EOP.
 4241                 */
 4242                 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
 4243                         /* This must be an initial descriptor */
 4244                         hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 4245                             IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 4246                         if (hlen > IXGBE_RX_HDR)
 4247                                 hlen = IXGBE_RX_HDR;
 4248                         mh->m_len = hlen;
 4249                         mh->m_flags |= M_PKTHDR;
 4250                         mh->m_next = NULL;
 4251                         mh->m_pkthdr.len = mh->m_len;
 4252                         /* Null buf pointer so it is refreshed */
 4253                         rbuf->m_head = NULL;
 4254                         /*
 4255                         ** Check the payload length, this
 4256                         ** could be zero if its a small
 4257                         ** packet.
 4258                         */
 4259                         if (plen > 0) {
 4260                                 mp->m_len = plen;
 4261                                 mp->m_next = NULL;
 4262                                 mp->m_flags &= ~M_PKTHDR;
 4263                                 mh->m_next = mp;
 4264                                 mh->m_pkthdr.len += mp->m_len;
 4265                                 /* Null buf pointer so it is refreshed */
 4266                                 rbuf->m_pack = NULL;
 4267                                 rxr->rx_split_packets++;
 4268                         }
 4269                         /*
 4270                         ** Now create the forward
 4271                         ** chain so when complete 
 4272                         ** we wont have to.
 4273                         */
 4274                         if (eop == 0) {
 4275                                 /* stash the chain head */
 4276                                 nbuf->fmp = mh;
 4277                                 /* Make forward chain */
 4278                                 if (plen)
 4279                                         mp->m_next = nbuf->m_pack;
 4280                                 else
 4281                                         mh->m_next = nbuf->m_pack;
 4282                         } else {
 4283                                 /* Singlet, prepare to send */
 4284                                 sendmp = mh;
 4285                                 if ((adapter->num_vlans) &&
 4286                                   (staterr & IXGBE_RXD_STAT_VP)) {
 4287                                         sendmp->m_pkthdr.ether_vtag = vtag;
 4288                                         sendmp->m_flags |= M_VLANTAG;
 4289                                 }
 4290                         }
 4291                 } else {
 4292                         /*
 4293                         ** Either no header split, or a
 4294                         ** secondary piece of a fragmented
 4295                         ** split packet.
 4296                         */
 4297                         mp->m_len = plen;
 4298                         /*
 4299                         ** See if there is a stored head
 4300                         ** that determines what we are
 4301                         */
 4302                         sendmp = rbuf->fmp;
 4303                         rbuf->m_pack = rbuf->fmp = NULL;
 4304 
 4305                         if (sendmp != NULL) {  /* secondary frag */
 4306                                 mp->m_flags &= ~M_PKTHDR;
 4307                                 sendmp->m_pkthdr.len += mp->m_len;
 4308                         } else {
 4309                                 /* first desc of a non-ps chain */
 4310                                 sendmp = mp;
 4311                                 sendmp->m_flags |= M_PKTHDR;
 4312                                 sendmp->m_pkthdr.len = mp->m_len;
 4313                                 if (staterr & IXGBE_RXD_STAT_VP) {
 4314                                         sendmp->m_pkthdr.ether_vtag = vtag;
 4315                                         sendmp->m_flags |= M_VLANTAG;
 4316                                 }
 4317                         }
 4318                         /* Pass the head pointer on */
 4319                         if (eop == 0) {
 4320                                 nbuf->fmp = sendmp;
 4321                                 sendmp = NULL;
 4322                                 mp->m_next = nbuf->m_pack;
 4323                         }
 4324                 }
 4325                 ++processed;
 4326                 /* Sending this frame? */
 4327                 if (eop) {
 4328                         sendmp->m_pkthdr.rcvif = ifp;
 4329                         ifp->if_ipackets++;
 4330                         rxr->rx_packets++;
 4331                         /* capture data for AIM */
 4332                         rxr->bytes += sendmp->m_pkthdr.len;
 4333                         rxr->rx_bytes += sendmp->m_pkthdr.len;
 4334                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 4335                                 ixgbe_rx_checksum(staterr, sendmp, ptype);
 4336 #if __FreeBSD_version >= 800000
 4337                         sendmp->m_pkthdr.flowid = que->msix;
 4338                         sendmp->m_flags |= M_FLOWID;
 4339 #endif
 4340                 }
 4341 next_desc:
 4342                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 4343                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 4344 
 4345                 /* Advance our pointers to the next descriptor. */
 4346                 if (++i == adapter->num_rx_desc)
 4347                         i = 0;
 4348 
 4349                 /* Now send to the stack or do LRO */
 4350                 if (sendmp != NULL) {
 4351                         rxr->next_to_check = i;
 4352                         ixgbe_rx_input(rxr, ifp, sendmp, ptype);
 4353                         i = rxr->next_to_check;
 4354                 }
 4355 
 4356                /* Every 8 descriptors we go to refresh mbufs */
 4357                 if (processed == 8) {
 4358                         ixgbe_refresh_mbufs(rxr, i);
 4359                         processed = 0;
 4360                 }
 4361         }
 4362 
 4363         /* Refresh any remaining buf structs */
 4364         if (ixgbe_rx_unrefreshed(rxr))
 4365                 ixgbe_refresh_mbufs(rxr, i);
 4366 
 4367         rxr->next_to_check = i;
 4368 
 4369         /*
 4370          * Flush any outstanding LRO work
 4371          */
 4372         while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
 4373                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
 4374                 tcp_lro_flush(lro, queued);
 4375         }
 4376 
 4377         IXGBE_RX_UNLOCK(rxr);
 4378 
 4379         /*
 4380         ** We still have cleaning to do?
 4381         ** Schedule another interrupt if so.
 4382         */
 4383         if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
 4384                 ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
 4385                 return (TRUE);
 4386         }
 4387 
 4388         return (FALSE);
 4389 }
 4390 
 4391 
 4392 /*********************************************************************
 4393  *
 4394  *  Verify that the hardware indicated that the checksum is valid.
 4395  *  Inform the stack about the status of checksum so that stack
 4396  *  doesn't spend time verifying the checksum.
 4397  *
 4398  *********************************************************************/
 4399 static void
 4400 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
 4401 {
 4402         u16     status = (u16) staterr;
 4403         u8      errors = (u8) (staterr >> 24);
 4404         bool    sctp = FALSE;
 4405 
 4406         if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
 4407             (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
 4408                 sctp = TRUE;
 4409 
 4410         if (status & IXGBE_RXD_STAT_IPCS) {
 4411                 if (!(errors & IXGBE_RXD_ERR_IPE)) {
 4412                         /* IP Checksum Good */
 4413                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
 4414                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 4415 
 4416                 } else
 4417                         mp->m_pkthdr.csum_flags = 0;
 4418         }
 4419         if (status & IXGBE_RXD_STAT_L4CS) {
 4420                 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 4421 #if __FreeBSD_version >= 800000
 4422                 if (sctp)
 4423                         type = CSUM_SCTP_VALID;
 4424 #endif
 4425                 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
 4426                         mp->m_pkthdr.csum_flags |= type;
 4427                         if (!sctp)
 4428                                 mp->m_pkthdr.csum_data = htons(0xffff);
 4429                 } 
 4430         }
 4431         return;
 4432 }
 4433 
 4434 
 4435 /*
 4436 ** This routine is run via an vlan config EVENT,
 4437 ** it enables us to use the HW Filter table since
 4438 ** we can get the vlan id. This just creates the
 4439 ** entry in the soft version of the VFTA, init will
 4440 ** repopulate the real table.
 4441 */
 4442 static void
 4443 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
 4444 {
 4445         struct adapter  *adapter = ifp->if_softc;
 4446         u16             index, bit;
 4447 
 4448         if (ifp->if_softc !=  arg)   /* Not our event */
 4449                 return;
 4450 
 4451         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
 4452                 return;
 4453 
 4454         IXGBE_CORE_LOCK(adapter);
 4455         index = (vtag >> 5) & 0x7F;
 4456         bit = vtag & 0x1F;
 4457         adapter->shadow_vfta[index] |= (1 << bit);
 4458         ++adapter->num_vlans;
 4459         ixgbe_init_locked(adapter);
 4460         IXGBE_CORE_UNLOCK(adapter);
 4461 }
 4462 
 4463 /*
 4464 ** This routine is run via an vlan
 4465 ** unconfig EVENT, remove our entry
 4466 ** in the soft vfta.
 4467 */
 4468 static void
 4469 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
 4470 {
 4471         struct adapter  *adapter = ifp->if_softc;
 4472         u16             index, bit;
 4473 
 4474         if (ifp->if_softc !=  arg)
 4475                 return;
 4476 
 4477         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
 4478                 return;
 4479 
 4480         IXGBE_CORE_LOCK(adapter);
 4481         index = (vtag >> 5) & 0x7F;
 4482         bit = vtag & 0x1F;
 4483         adapter->shadow_vfta[index] &= ~(1 << bit);
 4484         --adapter->num_vlans;
 4485         /* Re-init to load the changes */
 4486         ixgbe_init_locked(adapter);
 4487         IXGBE_CORE_UNLOCK(adapter);
 4488 }
 4489 
 4490 static void
 4491 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
 4492 {
 4493         struct ifnet    *ifp = adapter->ifp;
 4494         struct ixgbe_hw *hw = &adapter->hw;
 4495         u32             ctrl;
 4496 
 4497 
 4498         /*
 4499         ** We get here thru init_locked, meaning
 4500         ** a soft reset, this has already cleared
 4501         ** the VFTA and other state, so if there
 4502         ** have been no vlan's registered do nothing.
 4503         */
 4504         if (adapter->num_vlans == 0)
 4505                 return;
 4506 
 4507         /*
 4508         ** A soft reset zero's out the VFTA, so
 4509         ** we need to repopulate it now.
 4510         */
 4511         for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
 4512                 if (adapter->shadow_vfta[i] != 0)
 4513                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
 4514                             adapter->shadow_vfta[i]);
 4515 
 4516         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 4517         /* Enable the Filter Table if enabled */
 4518         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
 4519                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
 4520                 ctrl |= IXGBE_VLNCTRL_VFE;
 4521         }
 4522         if (hw->mac.type == ixgbe_mac_82598EB)
 4523                 ctrl |= IXGBE_VLNCTRL_VME;
 4524         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
 4525 
 4526         /* On 82599 the VLAN enable is per/queue in RXDCTL */
 4527         if (hw->mac.type != ixgbe_mac_82598EB)
 4528                 for (int i = 0; i < adapter->num_queues; i++) {
 4529                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
 4530                                 ctrl |= IXGBE_RXDCTL_VME;
 4531                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
 4532                 }
 4533 }
 4534 
 4535 static void
 4536 ixgbe_enable_intr(struct adapter *adapter)
 4537 {
 4538         struct ixgbe_hw *hw = &adapter->hw;
 4539         struct ix_queue *que = adapter->queues;
 4540         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 4541 
 4542 
 4543         /* Enable Fan Failure detection */
 4544         if (hw->device_id == IXGBE_DEV_ID_82598AT)
 4545                     mask |= IXGBE_EIMS_GPI_SDP1;
 4546         else {
 4547                     mask |= IXGBE_EIMS_ECC;
 4548                     mask |= IXGBE_EIMS_GPI_SDP1;
 4549                     mask |= IXGBE_EIMS_GPI_SDP2;
 4550 #ifdef IXGBE_FDIR
 4551                     mask |= IXGBE_EIMS_FLOW_DIR;
 4552 #endif
 4553         }
 4554 
 4555         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
 4556 
 4557         /* With RSS we use auto clear */
 4558         if (adapter->msix_mem) {
 4559                 mask = IXGBE_EIMS_ENABLE_MASK;
 4560                 /* Don't autoclear Link */
 4561                 mask &= ~IXGBE_EIMS_OTHER;
 4562                 mask &= ~IXGBE_EIMS_LSC;
 4563                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
 4564         }
 4565 
 4566         /*
 4567         ** Now enable all queues, this is done separately to
 4568         ** allow for handling the extended (beyond 32) MSIX
 4569         ** vectors that can be used by 82599
 4570         */
 4571         for (int i = 0; i < adapter->num_queues; i++, que++)
 4572                 ixgbe_enable_queue(adapter, que->msix);
 4573 
 4574         IXGBE_WRITE_FLUSH(hw);
 4575 
 4576         return;
 4577 }
 4578 
 4579 static void
 4580 ixgbe_disable_intr(struct adapter *adapter)
 4581 {
 4582         if (adapter->msix_mem)
 4583                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
 4584         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 4585                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
 4586         } else {
 4587                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
 4588                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
 4589                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
 4590         }
 4591         IXGBE_WRITE_FLUSH(&adapter->hw);
 4592         return;
 4593 }
 4594 
 4595 u16
 4596 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
 4597 {
 4598         u16 value;
 4599 
 4600         value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
 4601             reg, 2);
 4602 
 4603         return (value);
 4604 }
 4605 
 4606 void
 4607 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
 4608 {
 4609         pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
 4610             reg, value, 2);
 4611 
 4612         return;
 4613 }
 4614 
 4615 /*
 4616 ** Setup the correct IVAR register for a particular MSIX interrupt
 4617 **   (yes this is all very magic and confusing :)
 4618 **  - entry is the register array entry
 4619 **  - vector is the MSIX vector for this queue
 4620 **  - type is RX/TX/MISC
 4621 */
 4622 static void
 4623 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
 4624 {
 4625         struct ixgbe_hw *hw = &adapter->hw;
 4626         u32 ivar, index;
 4627 
 4628         vector |= IXGBE_IVAR_ALLOC_VAL;
 4629 
 4630         switch (hw->mac.type) {
 4631 
 4632         case ixgbe_mac_82598EB:
 4633                 if (type == -1)
 4634                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
 4635                 else
 4636                         entry += (type * 64);
 4637                 index = (entry >> 2) & 0x1F;
 4638                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
 4639                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
 4640                 ivar |= (vector << (8 * (entry & 0x3)));
 4641                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
 4642                 break;
 4643 
 4644         case ixgbe_mac_82599EB:
 4645                 if (type == -1) { /* MISC IVAR */
 4646                         index = (entry & 1) * 8;
 4647                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
 4648                         ivar &= ~(0xFF << index);
 4649                         ivar |= (vector << index);
 4650                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
 4651                 } else {        /* RX/TX IVARS */
 4652                         index = (16 * (entry & 1)) + (8 * type);
 4653                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
 4654                         ivar &= ~(0xFF << index);
 4655                         ivar |= (vector << index);
 4656                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
 4657                 }
 4658 
 4659         default:
 4660                 break;
 4661         }
 4662 }
 4663 
 4664 static void
 4665 ixgbe_configure_ivars(struct adapter *adapter)
 4666 {
 4667         struct  ix_queue *que = adapter->queues;
 4668         u32 newitr;
 4669 
 4670         if (ixgbe_max_interrupt_rate > 0)
 4671                 newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
 4672         else
 4673                 newitr = 0;
 4674 
 4675         for (int i = 0; i < adapter->num_queues; i++, que++) {
 4676                 /* First the RX queue entry */
 4677                 ixgbe_set_ivar(adapter, i, que->msix, 0);
 4678                 /* ... and the TX */
 4679                 ixgbe_set_ivar(adapter, i, que->msix, 1);
 4680                 /* Set an Initial EITR value */
 4681                 IXGBE_WRITE_REG(&adapter->hw,
 4682                     IXGBE_EITR(que->msix), newitr);
 4683         }
 4684 
 4685         /* For the Link interrupt */
 4686         ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
 4687 }
 4688 
 4689 /*
 4690 ** ixgbe_sfp_probe - called in the local timer to
 4691 ** determine if a port had optics inserted.
 4692 */  
 4693 static bool ixgbe_sfp_probe(struct adapter *adapter)
 4694 {
 4695         struct ixgbe_hw *hw = &adapter->hw;
 4696         device_t        dev = adapter->dev;
 4697         bool            result = FALSE;
 4698 
 4699         if ((hw->phy.type == ixgbe_phy_nl) &&
 4700             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
 4701                 s32 ret = hw->phy.ops.identify_sfp(hw);
 4702                 if (ret)
 4703                         goto out;
 4704                 ret = hw->phy.ops.reset(hw);
 4705                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 4706                         device_printf(dev,"Unsupported SFP+ module detected!");
 4707                         printf(" Reload driver with supported module.\n");
 4708                         adapter->sfp_probe = FALSE;
 4709                         goto out;
 4710                 } else
 4711                         device_printf(dev,"SFP+ module detected!\n");
 4712                 /* We now have supported optics */
 4713                 adapter->sfp_probe = FALSE;
 4714                 /* Set the optics type so system reports correctly */
 4715                 ixgbe_setup_optics(adapter);
 4716                 result = TRUE;
 4717         }
 4718 out:
 4719         return (result);
 4720 }
 4721 
 4722 /*
 4723 ** Tasklet handler for MSIX Link interrupts
 4724 **  - do outside interrupt since it might sleep
 4725 */
 4726 static void
 4727 ixgbe_handle_link(void *context, int pending)
 4728 {
 4729         struct adapter  *adapter = context;
 4730 
 4731         ixgbe_check_link(&adapter->hw,
 4732             &adapter->link_speed, &adapter->link_up, 0);
 4733         ixgbe_update_link_status(adapter);
 4734 }
 4735 
 4736 /*
 4737 ** Tasklet for handling SFP module interrupts
 4738 */
 4739 static void
 4740 ixgbe_handle_mod(void *context, int pending)
 4741 {
 4742         struct adapter  *adapter = context;
 4743         struct ixgbe_hw *hw = &adapter->hw;
 4744         device_t        dev = adapter->dev;
 4745         u32 err;
 4746 
 4747         err = hw->phy.ops.identify_sfp(hw);
 4748         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 4749                 device_printf(dev,
 4750                     "Unsupported SFP+ module type was detected.\n");
 4751                 return;
 4752         }
 4753         err = hw->mac.ops.setup_sfp(hw);
 4754         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 4755                 device_printf(dev,
 4756                     "Setup failure - unsupported SFP+ module type.\n");
 4757                 return;
 4758         }
 4759         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
 4760         return;
 4761 }
 4762 
 4763 
 4764 /*
 4765 ** Tasklet for handling MSF (multispeed fiber) interrupts
 4766 */
 4767 static void
 4768 ixgbe_handle_msf(void *context, int pending)
 4769 {
 4770         struct adapter  *adapter = context;
 4771         struct ixgbe_hw *hw = &adapter->hw;
 4772         u32 autoneg;
 4773         bool negotiate;
 4774 
 4775         autoneg = hw->phy.autoneg_advertised;
 4776         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
 4777                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
 4778         if (hw->mac.ops.setup_link)
 4779                 hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
 4780         return;
 4781 }
 4782 
 4783 #ifdef IXGBE_FDIR
 4784 /*
 4785 ** Tasklet for reinitializing the Flow Director filter table
 4786 */
 4787 static void
 4788 ixgbe_reinit_fdir(void *context, int pending)
 4789 {
 4790         struct adapter  *adapter = context;
 4791         struct ifnet   *ifp = adapter->ifp;
 4792 
 4793         if (adapter->fdir_reinit != 1) /* Shouldn't happen */
 4794                 return;
 4795         ixgbe_reinit_fdir_tables_82599(&adapter->hw);
 4796         adapter->fdir_reinit = 0;
 4797         /* Restart the interface */
 4798         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 4799         return;
 4800 }
 4801 #endif
 4802 
 4803 /**********************************************************************
 4804  *
 4805  *  Update the board statistics counters.
 4806  *
 4807  **********************************************************************/
 4808 static void
 4809 ixgbe_update_stats_counters(struct adapter *adapter)
 4810 {
 4811         struct ifnet   *ifp = adapter->ifp;
 4812         struct ixgbe_hw *hw = &adapter->hw;
 4813         u32  missed_rx = 0, bprc, lxon, lxoff, total;
 4814         u64  total_missed_rx = 0;
 4815 
 4816         adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
 4817         adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
 4818         adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
 4819         adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
 4820 
 4821         for (int i = 0; i < 8; i++) {
 4822                 u32 mp;
 4823                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
 4824                 /* missed_rx tallies misses for the gprc workaround */
 4825                 missed_rx += mp;
 4826                 /* global total per queue */
 4827                 adapter->stats.mpc[i] += mp;
 4828                 /* Running comprehensive total for stats display */
 4829                 total_missed_rx += adapter->stats.mpc[i];
 4830                 if (hw->mac.type == ixgbe_mac_82598EB)
 4831                         adapter->stats.rnbc[i] +=
 4832                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
 4833                 adapter->stats.pxontxc[i] +=
 4834                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
 4835                 adapter->stats.pxonrxc[i] +=
 4836                     IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
 4837                 adapter->stats.pxofftxc[i] +=
 4838                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
 4839                 adapter->stats.pxoffrxc[i] +=
 4840                     IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
 4841                 adapter->stats.pxon2offc[i] +=
 4842                     IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
 4843         }
 4844         for (int i = 0; i < 16; i++) {
 4845                 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
 4846                 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
 4847                 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
 4848                 adapter->stats.qbrc[i] += 
 4849                     ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
 4850                 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
 4851                 adapter->stats.qbtc[i] +=
 4852                     ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
 4853                 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 4854         }
 4855         adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
 4856         adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
 4857         adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
 4858 
 4859         /* Hardware workaround, gprc counts missed packets */
 4860         adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
 4861         adapter->stats.gprc -= missed_rx;
 4862 
 4863         if (hw->mac.type != ixgbe_mac_82598EB) {
 4864                 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
 4865                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
 4866                 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
 4867                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
 4868                 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
 4869                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
 4870                 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
 4871                 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
 4872         } else {
 4873                 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
 4874                 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
 4875                 /* 82598 only has a counter in the high register */
 4876                 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
 4877                 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
 4878                 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
 4879         }
 4880 
 4881         /*
 4882          * Workaround: mprc hardware is incorrectly counting
 4883          * broadcasts, so for now we subtract those.
 4884          */
 4885         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
 4886         adapter->stats.bprc += bprc;
 4887         adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
 4888         if (hw->mac.type == ixgbe_mac_82598EB)
 4889                 adapter->stats.mprc -= bprc;
 4890 
 4891         adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
 4892         adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
 4893         adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
 4894         adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
 4895         adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
 4896         adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
 4897 
 4898         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
 4899         adapter->stats.lxontxc += lxon;
 4900         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
 4901         adapter->stats.lxofftxc += lxoff;
 4902         total = lxon + lxoff;
 4903 
 4904         adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
 4905         adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
 4906         adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
 4907         adapter->stats.gptc -= total;
 4908         adapter->stats.mptc -= total;
 4909         adapter->stats.ptc64 -= total;
 4910         adapter->stats.gotc -= total * ETHER_MIN_LEN;
 4911 
 4912         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
 4913         adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
 4914         adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
 4915         adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
 4916         adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
 4917         adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
 4918         adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
 4919         adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
 4920         adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
 4921         adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
 4922         adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
 4923         adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
 4924         adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
 4925         adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
 4926         adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
 4927         adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
 4928         adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
 4929         adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
 4930         /* Only read FCOE on 82599 */
 4931         if (hw->mac.type != ixgbe_mac_82598EB) {
 4932                 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
 4933                 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
 4934                 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
 4935                 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
 4936                 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
 4937         }
 4938 
 4939         /* Fill out the OS statistics structure */
 4940         ifp->if_ipackets = adapter->stats.gprc;
 4941         ifp->if_opackets = adapter->stats.gptc;
 4942         ifp->if_ibytes = adapter->stats.gorc;
 4943         ifp->if_obytes = adapter->stats.gotc;
 4944         ifp->if_imcasts = adapter->stats.mprc;
 4945         ifp->if_collisions = 0;
 4946 
 4947         /* Rx Errors */
 4948         ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
 4949                 adapter->stats.rlec;
 4950 }
 4951 
 4952 /** ixgbe_sysctl_tdh_handler - Handler function
 4953  *  Retrieves the TDH value from the hardware
 4954  */
 4955 static int 
 4956 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
 4957 {
 4958         int error;
 4959 
 4960         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
 4961         if (!txr) return 0;
 4962 
 4963         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
 4964         error = sysctl_handle_int(oidp, &val, 0, req);
 4965         if (error || !req->newptr)
 4966                 return error;
 4967         return 0;
 4968 }
 4969 
 4970 /** ixgbe_sysctl_tdt_handler - Handler function
 4971  *  Retrieves the TDT value from the hardware
 4972  */
 4973 static int 
 4974 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
 4975 {
 4976         int error;
 4977 
 4978         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
 4979         if (!txr) return 0;
 4980 
 4981         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
 4982         error = sysctl_handle_int(oidp, &val, 0, req);
 4983         if (error || !req->newptr)
 4984                 return error;
 4985         return 0;
 4986 }
 4987 
 4988 /** ixgbe_sysctl_rdh_handler - Handler function
 4989  *  Retrieves the RDH value from the hardware
 4990  */
 4991 static int 
 4992 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
 4993 {
 4994         int error;
 4995 
 4996         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
 4997         if (!rxr) return 0;
 4998 
 4999         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
 5000         error = sysctl_handle_int(oidp, &val, 0, req);
 5001         if (error || !req->newptr)
 5002                 return error;
 5003         return 0;
 5004 }
 5005 
 5006 /** ixgbe_sysctl_rdt_handler - Handler function
 5007  *  Retrieves the RDT value from the hardware
 5008  */
 5009 static int 
 5010 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
 5011 {
 5012         int error;
 5013 
 5014         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
 5015         if (!rxr) return 0;
 5016 
 5017         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
 5018         error = sysctl_handle_int(oidp, &val, 0, req);
 5019         if (error || !req->newptr)
 5020                 return error;
 5021         return 0;
 5022 }
 5023 
 5024 static int
 5025 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
 5026 {
 5027         int error;
 5028         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
 5029         unsigned int reg, usec, rate;
 5030 
 5031         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
 5032         usec = ((reg & 0x0FF8) >> 3);
 5033         if (usec > 0)
 5034                 rate = 1000000 / usec;
 5035         else
 5036                 rate = 0;
 5037         error = sysctl_handle_int(oidp, &rate, 0, req);
 5038         if (error || !req->newptr)
 5039                 return error;
 5040         return 0;
 5041 }
 5042 
 5043 /*
 5044  * Add sysctl variables, one per statistic, to the system.
 5045  */
 5046 static void
 5047 ixgbe_add_hw_stats(struct adapter *adapter)
 5048 {
 5049 
 5050         device_t dev = adapter->dev;
 5051 
 5052         struct tx_ring *txr = adapter->tx_rings;
 5053         struct rx_ring *rxr = adapter->rx_rings;
 5054 
 5055         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
 5056         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
 5057         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
 5058         struct ixgbe_hw_stats *stats = &adapter->stats;
 5059 
 5060         struct sysctl_oid *stat_node, *queue_node;
 5061         struct sysctl_oid_list *stat_list, *queue_list;
 5062 
 5063 #define QUEUE_NAME_LEN 32
 5064         char namebuf[QUEUE_NAME_LEN];
 5065 
 5066         /* Driver Statistics */
 5067         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
 5068                         CTLFLAG_RD, &adapter->dropped_pkts,
 5069                         "Driver dropped packets");
 5070         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
 5071                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
 5072                         "m_defrag() failed");
 5073         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
 5074                         CTLFLAG_RD, &adapter->no_tx_dma_setup,
 5075                         "Driver tx dma failure in xmit");
 5076         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
 5077                         CTLFLAG_RD, &adapter->watchdog_events,
 5078                         "Watchdog timeouts");
 5079         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
 5080                         CTLFLAG_RD, &adapter->tso_tx,
 5081                         "TSO");
 5082         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
 5083                         CTLFLAG_RD, &adapter->link_irq,
 5084                         "Link MSIX IRQ Handled");
 5085 
 5086         for (int i = 0; i < adapter->num_queues; i++, txr++) {
 5087                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
 5088                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
 5089                                             CTLFLAG_RD, NULL, "Queue Name");
 5090                 queue_list = SYSCTL_CHILDREN(queue_node);
 5091 
 5092                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
 5093                                 CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i],
 5094                                 sizeof(&adapter->queues[i]),
 5095                                 ixgbe_sysctl_interrupt_rate_handler, "IU",
 5096                                 "Interrupt Rate");
 5097                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
 5098                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
 5099                                 ixgbe_sysctl_tdh_handler, "IU",
 5100                                 "Transmit Descriptor Head");
 5101                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
 5102                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
 5103                                 ixgbe_sysctl_tdt_handler, "IU",
 5104                                 "Transmit Descriptor Tail");
 5105                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
 5106                                 CTLFLAG_RD, &txr->no_desc_avail,
 5107                                 "Queue No Descriptor Available");
 5108                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
 5109                                 CTLFLAG_RD, &txr->total_packets,
 5110                                 "Queue Packets Transmitted");
 5111         }
 5112 
 5113         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 5114                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
 5115                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
 5116                                             CTLFLAG_RD, NULL, "Queue Name");
 5117                 queue_list = SYSCTL_CHILDREN(queue_node);
 5118 
 5119                 struct lro_ctrl *lro = &rxr->lro;
 5120 
 5121                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
 5122                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
 5123                                             CTLFLAG_RD, NULL, "Queue Name");
 5124                 queue_list = SYSCTL_CHILDREN(queue_node);
 5125 
 5126                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
 5127                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
 5128                                 ixgbe_sysctl_rdh_handler, "IU",
 5129                                 "Receive Descriptor Head");
 5130                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
 5131                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
 5132                                 ixgbe_sysctl_rdt_handler, "IU",
 5133                                 "Receive Descriptor Tail");
 5134                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
 5135                                 CTLFLAG_RD, &rxr->rx_packets,
 5136                                 "Queue Packets Received");
 5137                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
 5138                                 CTLFLAG_RD, &rxr->rx_bytes,
 5139                                 "Queue Bytes Received");
 5140                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
 5141                                 CTLFLAG_RD, &lro->lro_queued, 0,
 5142                                 "LRO Queued");
 5143                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
 5144                                 CTLFLAG_RD, &lro->lro_flushed, 0,
 5145                                 "LRO Flushed");
 5146         }
 5147 
 5148         /* MAC stats get the own sub node */
 5149 
 5150         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
 5151                                     CTLFLAG_RD, NULL, "MAC Statistics");
 5152         stat_list = SYSCTL_CHILDREN(stat_node);
 5153 
 5154         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
 5155                         CTLFLAG_RD, &stats->crcerrs,
 5156                         "CRC Errors");
 5157         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
 5158                         CTLFLAG_RD, &stats->illerrc,
 5159                         "Illegal Byte Errors");
 5160         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
 5161                         CTLFLAG_RD, &stats->errbc,
 5162                         "Byte Errors");
 5163         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
 5164                         CTLFLAG_RD, &stats->mspdc,
 5165                         "MAC Short Packets Discarded");
 5166         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
 5167                         CTLFLAG_RD, &stats->mlfc,
 5168                         "MAC Local Faults");
 5169         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
 5170                         CTLFLAG_RD, &stats->mrfc,
 5171                         "MAC Remote Faults");
 5172         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
 5173                         CTLFLAG_RD, &stats->rlec,
 5174                         "Receive Length Errors");
 5175         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
 5176                         CTLFLAG_RD, &stats->lxontxc,
 5177                         "Link XON Transmitted");
 5178         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
 5179                         CTLFLAG_RD, &stats->lxonrxc,
 5180                         "Link XON Received");
 5181         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
 5182                         CTLFLAG_RD, &stats->lxofftxc,
 5183                         "Link XOFF Transmitted");
 5184         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
 5185                         CTLFLAG_RD, &stats->lxoffrxc,
 5186                         "Link XOFF Received");
 5187 
 5188         /* Packet Reception Stats */
 5189         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
 5190                         CTLFLAG_RD, &stats->tor, 
 5191                         "Total Octets Received"); 
 5192         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
 5193                         CTLFLAG_RD, &stats->gorc, 
 5194                         "Good Octets Received"); 
 5195         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
 5196                         CTLFLAG_RD, &stats->tpr,
 5197                         "Total Packets Received");
 5198         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
 5199                         CTLFLAG_RD, &stats->gprc,
 5200                         "Good Packets Received");
 5201         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
 5202                         CTLFLAG_RD, &stats->mprc,
 5203                         "Multicast Packets Received");
 5204         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
 5205                         CTLFLAG_RD, &stats->bprc,
 5206                         "Broadcast Packets Received");
 5207         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
 5208                         CTLFLAG_RD, &stats->prc64,
 5209                         "64 byte frames received ");
 5210         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
 5211                         CTLFLAG_RD, &stats->prc127,
 5212                         "65-127 byte frames received");
 5213         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
 5214                         CTLFLAG_RD, &stats->prc255,
 5215                         "128-255 byte frames received");
 5216         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
 5217                         CTLFLAG_RD, &stats->prc511,
 5218                         "256-511 byte frames received");
 5219         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
 5220                         CTLFLAG_RD, &stats->prc1023,
 5221                         "512-1023 byte frames received");
 5222         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
 5223                         CTLFLAG_RD, &stats->prc1522,
 5224                         "1023-1522 byte frames received");
 5225         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
 5226                         CTLFLAG_RD, &stats->ruc,
 5227                         "Receive Undersized");
 5228         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
 5229                         CTLFLAG_RD, &stats->rfc,
 5230                         "Fragmented Packets Received ");
 5231         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
 5232                         CTLFLAG_RD, &stats->roc,
 5233                         "Oversized Packets Received");
 5234         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
 5235                         CTLFLAG_RD, &stats->rjc,
 5236                         "Received Jabber");
 5237         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
 5238                         CTLFLAG_RD, &stats->mngprc,
 5239                         "Management Packets Received");
 5240         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
 5241                         CTLFLAG_RD, &stats->mngptc,
 5242                         "Management Packets Dropped");
 5243         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
 5244                         CTLFLAG_RD, &stats->xec,
 5245                         "Checksum Errors");
 5246 
 5247         /* Packet Transmission Stats */
 5248         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
 5249                         CTLFLAG_RD, &stats->gotc, 
 5250                         "Good Octets Transmitted"); 
 5251         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
 5252                         CTLFLAG_RD, &stats->tpt,
 5253                         "Total Packets Transmitted");
 5254         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
 5255                         CTLFLAG_RD, &stats->gptc,
 5256                         "Good Packets Transmitted");
 5257         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
 5258                         CTLFLAG_RD, &stats->bptc,
 5259                         "Broadcast Packets Transmitted");
 5260         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
 5261                         CTLFLAG_RD, &stats->mptc,
 5262                         "Multicast Packets Transmitted");
 5263         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
 5264                         CTLFLAG_RD, &stats->mngptc,
 5265                         "Management Packets Transmitted");
 5266         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
 5267                         CTLFLAG_RD, &stats->ptc64,
 5268                         "64 byte frames transmitted ");
 5269         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
 5270                         CTLFLAG_RD, &stats->ptc127,
 5271                         "65-127 byte frames transmitted");
 5272         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
 5273                         CTLFLAG_RD, &stats->ptc255,
 5274                         "128-255 byte frames transmitted");
 5275         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
 5276                         CTLFLAG_RD, &stats->ptc511,
 5277                         "256-511 byte frames transmitted");
 5278         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
 5279                         CTLFLAG_RD, &stats->ptc1023,
 5280                         "512-1023 byte frames transmitted");
 5281         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
 5282                         CTLFLAG_RD, &stats->ptc1522,
 5283                         "1024-1522 byte frames transmitted");
 5284 
 5285         /* FC Stats */
 5286         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
 5287                 CTLFLAG_RD, &stats->fccrc,
 5288                 "FC CRC Errors");
 5289         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
 5290                 CTLFLAG_RD, &stats->fclast,
 5291                 "FC Last Error");
 5292         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
 5293                 CTLFLAG_RD, &stats->fcoerpdc,
 5294                 "FCoE Packets Dropped");
 5295         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
 5296                 CTLFLAG_RD, &stats->fcoeprc,
 5297                 "FCoE Packets Received");
 5298         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
 5299                 CTLFLAG_RD, &stats->fcoeptc,
 5300                 "FCoE Packets Transmitted");
 5301         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
 5302                 CTLFLAG_RD, &stats->fcoedwrc,
 5303                 "FCoE DWords Received");
 5304         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
 5305                 CTLFLAG_RD, &stats->fcoedwtc,
 5306                 "FCoE DWords Transmitted");
 5307 }
 5308 
 5309 /*
 5310 ** Set flow control using sysctl:
 5311 ** Flow control values:
 5312 **      0 - off
 5313 **      1 - rx pause
 5314 **      2 - tx pause
 5315 **      3 - full
 5316 */
 5317 static int
 5318 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
 5319 {
 5320         int error;
 5321         int last = ixgbe_flow_control;
 5322         struct adapter *adapter;
 5323 
 5324         error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
 5325         if (error)
 5326                 return (error);
 5327 
 5328         /* Don't bother if it's not changed */
 5329         if (ixgbe_flow_control == last)
 5330                 return (0);
 5331 
 5332         adapter = (struct adapter *) arg1;
 5333         switch (ixgbe_flow_control) {
 5334                 case ixgbe_fc_rx_pause:
 5335                 case ixgbe_fc_tx_pause:
 5336                 case ixgbe_fc_full:
 5337                         adapter->hw.fc.requested_mode = ixgbe_flow_control;
 5338                         break;
 5339                 case ixgbe_fc_none:
 5340                 default:
 5341                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
 5342         }
 5343 
 5344         ixgbe_fc_enable(&adapter->hw, 0);
 5345         return error;
 5346 }
 5347 
 5348 static void
 5349 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
 5350         const char *description, int *limit, int value)
 5351 {
 5352         *limit = value;
 5353         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
 5354             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
 5355             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
 5356 }
 5357 
 5358 /*
 5359 ** Control link advertise speed:
 5360 **      0 - normal
 5361 **      1 - advertise only 1G
 5362 */
 5363 static int
 5364 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
 5365 {
 5366         int                     error = 0;
 5367         struct adapter          *adapter;
 5368         struct ixgbe_hw         *hw;
 5369         ixgbe_link_speed        speed, last;
 5370 
 5371         adapter = (struct adapter *) arg1;
 5372         hw = &adapter->hw;
 5373         last = hw->phy.autoneg_advertised;
 5374 
 5375         error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
 5376 
 5377         if ((error) || (adapter->advertise == -1))
 5378                 return (error);
 5379 
 5380         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
 5381             (hw->phy.multispeed_fiber)))
 5382                 return (error);
 5383 
 5384         if (adapter->advertise == 1)
 5385                 speed = IXGBE_LINK_SPEED_1GB_FULL;
 5386         else
 5387                 speed = IXGBE_LINK_SPEED_1GB_FULL |
 5388                         IXGBE_LINK_SPEED_10GB_FULL;
 5389 
 5390         if (speed == last) /* no change */
 5391                 return (error);
 5392 
 5393         hw->mac.autotry_restart = TRUE;
 5394         hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
 5395 
 5396         return (error);
 5397 }

Cache object: 512cbcee45600101b21da45a33aef8c6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.