The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ixgbe/ixv.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2 
    3   Copyright (c) 2001-2011, Intel Corporation 
    4   All rights reserved.
    5   
    6   Redistribution and use in source and binary forms, with or without 
    7   modification, are permitted provided that the following conditions are met:
    8   
    9    1. Redistributions of source code must retain the above copyright notice, 
   10       this list of conditions and the following disclaimer.
   11   
   12    2. Redistributions in binary form must reproduce the above copyright 
   13       notice, this list of conditions and the following disclaimer in the 
   14       documentation and/or other materials provided with the distribution.
   15   
   16    3. Neither the name of the Intel Corporation nor the names of its 
   17       contributors may be used to endorse or promote products derived from 
   18       this software without specific prior written permission.
   19   
   20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
   22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
   23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
   24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
   25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
   26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
   27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
   28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
   29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30   POSSIBILITY OF SUCH DAMAGE.
   31 
   32 ******************************************************************************/
   33 /*$FreeBSD: releng/9.0/sys/dev/ixgbe/ixv.c 222592 2011-06-02 05:31:54Z jfv $*/
   34 
   35 #ifdef HAVE_KERNEL_OPTION_HEADERS
   36 #include "opt_inet.h"
   37 #include "opt_inet6.h"
   38 #endif
   39 
   40 #include "ixv.h"
   41 
   42 /*********************************************************************
   43  *  Driver version
   44  *********************************************************************/
   45 char ixv_driver_version[] = "1.0.1";
   46 
   47 /*********************************************************************
   48  *  PCI Device ID Table
   49  *
   50  *  Used by probe to select devices to load on
   51  *  Last field stores an index into ixv_strings
   52  *  Last entry must be all 0s
   53  *
   54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
   55  *********************************************************************/
   56 
   57 static ixv_vendor_info_t ixv_vendor_info_array[] =
   58 {
   59         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
   60         /* required last entry */
   61         {0, 0, 0, 0, 0}
   62 };
   63 
   64 /*********************************************************************
   65  *  Table of branding strings
   66  *********************************************************************/
   67 
   68 static char    *ixv_strings[] = {
   69         "Intel(R) PRO/10GbE Virtual Function Network Driver"
   70 };
   71 
   72 /*********************************************************************
   73  *  Function prototypes
   74  *********************************************************************/
   75 static int      ixv_probe(device_t);
   76 static int      ixv_attach(device_t);
   77 static int      ixv_detach(device_t);
   78 static int      ixv_shutdown(device_t);
   79 #if __FreeBSD_version < 800000
   80 static void     ixv_start(struct ifnet *);
   81 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
   82 #else
   83 static int      ixv_mq_start(struct ifnet *, struct mbuf *);
   84 static int      ixv_mq_start_locked(struct ifnet *,
   85                     struct tx_ring *, struct mbuf *);
   86 static void     ixv_qflush(struct ifnet *);
   87 #endif
   88 static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
   89 static void     ixv_init(void *);
   90 static void     ixv_init_locked(struct adapter *);
   91 static void     ixv_stop(void *);
   92 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
   93 static int      ixv_media_change(struct ifnet *);
   94 static void     ixv_identify_hardware(struct adapter *);
   95 static int      ixv_allocate_pci_resources(struct adapter *);
   96 static int      ixv_allocate_msix(struct adapter *);
   97 static int      ixv_allocate_queues(struct adapter *);
   98 static int      ixv_setup_msix(struct adapter *);
   99 static void     ixv_free_pci_resources(struct adapter *);
  100 static void     ixv_local_timer(void *);
  101 static void     ixv_setup_interface(device_t, struct adapter *);
  102 static void     ixv_config_link(struct adapter *);
  103 
  104 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
  105 static int      ixv_setup_transmit_structures(struct adapter *);
  106 static void     ixv_setup_transmit_ring(struct tx_ring *);
  107 static void     ixv_initialize_transmit_units(struct adapter *);
  108 static void     ixv_free_transmit_structures(struct adapter *);
  109 static void     ixv_free_transmit_buffers(struct tx_ring *);
  110 
  111 static int      ixv_allocate_receive_buffers(struct rx_ring *);
  112 static int      ixv_setup_receive_structures(struct adapter *);
  113 static int      ixv_setup_receive_ring(struct rx_ring *);
  114 static void     ixv_initialize_receive_units(struct adapter *);
  115 static void     ixv_free_receive_structures(struct adapter *);
  116 static void     ixv_free_receive_buffers(struct rx_ring *);
  117 
  118 static void     ixv_enable_intr(struct adapter *);
  119 static void     ixv_disable_intr(struct adapter *);
  120 static bool     ixv_txeof(struct tx_ring *);
  121 static bool     ixv_rxeof(struct ix_queue *, int);
  122 static void     ixv_rx_checksum(u32, struct mbuf *, u32);
  123 static void     ixv_set_multi(struct adapter *);
  124 static void     ixv_update_link_status(struct adapter *);
  125 static void     ixv_refresh_mbufs(struct rx_ring *, int);
  126 static int      ixv_xmit(struct tx_ring *, struct mbuf **);
  127 static int      ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
  128 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
  129 static int      ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
  130 static int      ixv_dma_malloc(struct adapter *, bus_size_t,
  131                     struct ixv_dma_alloc *, int);
  132 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
  133 static void     ixv_add_rx_process_limit(struct adapter *, const char *,
  134                     const char *, int *, int);
  135 static bool     ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
  136 static bool     ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
  137 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
  138 static void     ixv_configure_ivars(struct adapter *);
  139 static u8 *     ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
  140 
  141 static void     ixv_setup_vlan_support(struct adapter *);
  142 static void     ixv_register_vlan(void *, struct ifnet *, u16);
  143 static void     ixv_unregister_vlan(void *, struct ifnet *, u16);
  144 
  145 static void     ixv_save_stats(struct adapter *);
  146 static void     ixv_init_stats(struct adapter *);
  147 static void     ixv_update_stats(struct adapter *);
  148 
  149 static __inline void ixv_rx_discard(struct rx_ring *, int);
  150 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
  151                     struct mbuf *, u32);
  152 
  153 /* The MSI/X Interrupt handlers */
  154 static void     ixv_msix_que(void *);
  155 static void     ixv_msix_mbx(void *);
  156 
  157 /* Deferred interrupt tasklets */
  158 static void     ixv_handle_que(void *, int);
  159 static void     ixv_handle_mbx(void *, int);
  160 
  161 /*********************************************************************
  162  *  FreeBSD Device Interface Entry Points
  163  *********************************************************************/
  164 
  165 static device_method_t ixv_methods[] = {
  166         /* Device interface */
  167         DEVMETHOD(device_probe, ixv_probe),
  168         DEVMETHOD(device_attach, ixv_attach),
  169         DEVMETHOD(device_detach, ixv_detach),
  170         DEVMETHOD(device_shutdown, ixv_shutdown),
  171         {0, 0}
  172 };
  173 
  174 static driver_t ixv_driver = {
  175         "ix", ixv_methods, sizeof(struct adapter),
  176 };
  177 
  178 extern devclass_t ixgbe_devclass;
  179 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
  180 MODULE_DEPEND(ixv, pci, 1, 1, 1);
  181 MODULE_DEPEND(ixv, ether, 1, 1, 1);
  182 
  183 /*
  184 ** TUNEABLE PARAMETERS:
  185 */
  186 
  187 /*
  188 ** AIM: Adaptive Interrupt Moderation
  189 ** which means that the interrupt rate
  190 ** is varied over time based on the
  191 ** traffic for that interrupt vector
  192 */
  193 static int ixv_enable_aim = FALSE;
  194 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
  195 
  196 /* How many packets rxeof tries to clean at a time */
  197 static int ixv_rx_process_limit = 128;
  198 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
  199 
  200 /* Flow control setting, default to full */
  201 static int ixv_flow_control = ixgbe_fc_full;
  202 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
  203 
  204 /*
  205  * Header split: this causes the hardware to DMA
  206  * the header into a seperate mbuf from the payload,
  207  * it can be a performance win in some workloads, but
  208  * in others it actually hurts, its off by default.
  209  */
  210 static bool ixv_header_split = FALSE;
  211 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
  212 
  213 /*
  214 ** Number of TX descriptors per ring,
  215 ** setting higher than RX as this seems
  216 ** the better performing choice.
  217 */
  218 static int ixv_txd = DEFAULT_TXD;
  219 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
  220 
  221 /* Number of RX descriptors per ring */
  222 static int ixv_rxd = DEFAULT_RXD;
  223 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
  224 
  225 /*
  226 ** Shadow VFTA table, this is needed because
  227 ** the real filter table gets cleared during
  228 ** a soft reset and we need to repopulate it.
  229 */
  230 static u32 ixv_shadow_vfta[VFTA_SIZE];
  231 
  232 /*********************************************************************
  233  *  Device identification routine
  234  *
  235  *  ixv_probe determines if the driver should be loaded on
  236  *  adapter based on PCI vendor/device id of the adapter.
  237  *
  238  *  return BUS_PROBE_DEFAULT on success, positive on failure
  239  *********************************************************************/
  240 
  241 static int
  242 ixv_probe(device_t dev)
  243 {
  244         ixv_vendor_info_t *ent;
  245 
  246         u16     pci_vendor_id = 0;
  247         u16     pci_device_id = 0;
  248         u16     pci_subvendor_id = 0;
  249         u16     pci_subdevice_id = 0;
  250         char    adapter_name[256];
  251 
  252 
  253         pci_vendor_id = pci_get_vendor(dev);
  254         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
  255                 return (ENXIO);
  256 
  257         pci_device_id = pci_get_device(dev);
  258         pci_subvendor_id = pci_get_subvendor(dev);
  259         pci_subdevice_id = pci_get_subdevice(dev);
  260 
  261         ent = ixv_vendor_info_array;
  262         while (ent->vendor_id != 0) {
  263                 if ((pci_vendor_id == ent->vendor_id) &&
  264                     (pci_device_id == ent->device_id) &&
  265 
  266                     ((pci_subvendor_id == ent->subvendor_id) ||
  267                      (ent->subvendor_id == 0)) &&
  268 
  269                     ((pci_subdevice_id == ent->subdevice_id) ||
  270                      (ent->subdevice_id == 0))) {
  271                         sprintf(adapter_name, "%s, Version - %s",
  272                                 ixv_strings[ent->index],
  273                                 ixv_driver_version);
  274                         device_set_desc_copy(dev, adapter_name);
  275                         return (BUS_PROBE_DEFAULT);
  276                 }
  277                 ent++;
  278         }
  279         return (ENXIO);
  280 }
  281 
  282 /*********************************************************************
  283  *  Device initialization routine
  284  *
  285  *  The attach entry point is called when the driver is being loaded.
  286  *  This routine identifies the type of hardware, allocates all resources
  287  *  and initializes the hardware.
  288  *
  289  *  return 0 on success, positive on failure
  290  *********************************************************************/
  291 
  292 static int
  293 ixv_attach(device_t dev)
  294 {
  295         struct adapter *adapter;
  296         struct ixgbe_hw *hw;
  297         int             error = 0;
  298 
  299         INIT_DEBUGOUT("ixv_attach: begin");
  300 
  301         if (resource_disabled("ixgbe", device_get_unit(dev))) {
  302                 device_printf(dev, "Disabled by device hint\n");
  303                 return (ENXIO);
  304         }
  305 
  306         /* Allocate, clear, and link in our adapter structure */
  307         adapter = device_get_softc(dev);
  308         adapter->dev = adapter->osdep.dev = dev;
  309         hw = &adapter->hw;
  310 
  311         /* Core Lock Init*/
  312         IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
  313 
  314         /* SYSCTL APIs */
  315         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  316                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  317                         OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
  318                         adapter, 0, ixv_sysctl_stats, "I", "Statistics");
  319 
  320         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  321                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  322                         OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
  323                         adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
  324 
  325         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  326                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  327                         OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
  328                         adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
  329                 
  330         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
  331                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  332                         OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
  333                         &ixv_enable_aim, 1, "Interrupt Moderation");
  334 
  335         /* Set up the timer callout */
  336         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
  337 
  338         /* Determine hardware revision */
  339         ixv_identify_hardware(adapter);
  340 
  341         /* Do base PCI setup - map BAR0 */
  342         if (ixv_allocate_pci_resources(adapter)) {
  343                 device_printf(dev, "Allocation of PCI resources failed\n");
  344                 error = ENXIO;
  345                 goto err_out;
  346         }
  347 
  348         /* Do descriptor calc and sanity checks */
  349         if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
  350             ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
  351                 device_printf(dev, "TXD config issue, using default!\n");
  352                 adapter->num_tx_desc = DEFAULT_TXD;
  353         } else
  354                 adapter->num_tx_desc = ixv_txd;
  355 
  356         if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
  357             ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
  358                 device_printf(dev, "RXD config issue, using default!\n");
  359                 adapter->num_rx_desc = DEFAULT_RXD;
  360         } else
  361                 adapter->num_rx_desc = ixv_rxd;
  362 
  363         /* Allocate our TX/RX Queues */
  364         if (ixv_allocate_queues(adapter)) {
  365                 error = ENOMEM;
  366                 goto err_out;
  367         }
  368 
  369         /*
  370         ** Initialize the shared code: its
  371         ** at this point the mac type is set.
  372         */
  373         error = ixgbe_init_shared_code(hw);
  374         if (error) {
  375                 device_printf(dev,"Shared Code Initialization Failure\n");
  376                 error = EIO;
  377                 goto err_late;
  378         }
  379 
  380         /* Setup the mailbox */
  381         ixgbe_init_mbx_params_vf(hw);
  382 
  383         ixgbe_reset_hw(hw);
  384 
  385         /* Get Hardware Flow Control setting */
  386         hw->fc.requested_mode = ixgbe_fc_full;
  387         hw->fc.pause_time = IXV_FC_PAUSE;
  388         hw->fc.low_water = IXV_FC_LO;
  389         hw->fc.high_water = IXV_FC_HI;
  390         hw->fc.send_xon = TRUE;
  391 
  392         error = ixgbe_init_hw(hw);
  393         if (error) {
  394                 device_printf(dev,"Hardware Initialization Failure\n");
  395                 error = EIO;
  396                 goto err_late;
  397         }
  398         
  399         error = ixv_allocate_msix(adapter); 
  400         if (error) 
  401                 goto err_late;
  402 
  403         /* Setup OS specific network interface */
  404         ixv_setup_interface(dev, adapter);
  405 
  406         /* Sysctl for limiting the amount of work done in the taskqueue */
  407         ixv_add_rx_process_limit(adapter, "rx_processing_limit",
  408             "max number of rx packets to process", &adapter->rx_process_limit,
  409             ixv_rx_process_limit);
  410 
  411         /* Do the stats setup */
  412         ixv_save_stats(adapter);
  413         ixv_init_stats(adapter);
  414 
  415         /* Register for VLAN events */
  416         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
  417             ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
  418         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
  419             ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
  420 
  421         INIT_DEBUGOUT("ixv_attach: end");
  422         return (0);
  423 
  424 err_late:
  425         ixv_free_transmit_structures(adapter);
  426         ixv_free_receive_structures(adapter);
  427 err_out:
  428         ixv_free_pci_resources(adapter);
  429         return (error);
  430 
  431 }
  432 
  433 /*********************************************************************
  434  *  Device removal routine
  435  *
  436  *  The detach entry point is called when the driver is being removed.
  437  *  This routine stops the adapter and deallocates all the resources
  438  *  that were allocated for driver operation.
  439  *
  440  *  return 0 on success, positive on failure
  441  *********************************************************************/
  442 
  443 static int
  444 ixv_detach(device_t dev)
  445 {
  446         struct adapter *adapter = device_get_softc(dev);
  447         struct ix_queue *que = adapter->queues;
  448 
  449         INIT_DEBUGOUT("ixv_detach: begin");
  450 
  451         /* Make sure VLANS are not using driver */
  452         if (adapter->ifp->if_vlantrunk != NULL) {
  453                 device_printf(dev,"Vlan in use, detach first\n");
  454                 return (EBUSY);
  455         }
  456 
  457         IXV_CORE_LOCK(adapter);
  458         ixv_stop(adapter);
  459         IXV_CORE_UNLOCK(adapter);
  460 
  461         for (int i = 0; i < adapter->num_queues; i++, que++) {
  462                 if (que->tq) {
  463                         taskqueue_drain(que->tq, &que->que_task);
  464                         taskqueue_free(que->tq);
  465                 }
  466         }
  467 
  468         /* Drain the Link queue */
  469         if (adapter->tq) {
  470                 taskqueue_drain(adapter->tq, &adapter->mbx_task);
  471                 taskqueue_free(adapter->tq);
  472         }
  473 
  474         /* Unregister VLAN events */
  475         if (adapter->vlan_attach != NULL)
  476                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
  477         if (adapter->vlan_detach != NULL)
  478                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
  479 
  480         ether_ifdetach(adapter->ifp);
  481         callout_drain(&adapter->timer);
  482         ixv_free_pci_resources(adapter);
  483         bus_generic_detach(dev);
  484         if_free(adapter->ifp);
  485 
  486         ixv_free_transmit_structures(adapter);
  487         ixv_free_receive_structures(adapter);
  488 
  489         IXV_CORE_LOCK_DESTROY(adapter);
  490         return (0);
  491 }
  492 
  493 /*********************************************************************
  494  *
  495  *  Shutdown entry point
  496  *
  497  **********************************************************************/
  498 static int
  499 ixv_shutdown(device_t dev)
  500 {
  501         struct adapter *adapter = device_get_softc(dev);
  502         IXV_CORE_LOCK(adapter);
  503         ixv_stop(adapter);
  504         IXV_CORE_UNLOCK(adapter);
  505         return (0);
  506 }
  507 
  508 #if __FreeBSD_version < 800000
  509 /*********************************************************************
  510  *  Transmit entry point
  511  *
  512  *  ixv_start is called by the stack to initiate a transmit.
  513  *  The driver will remain in this routine as long as there are
  514  *  packets to transmit and transmit resources are available.
  515  *  In case resources are not available stack is notified and
  516  *  the packet is requeued.
  517  **********************************************************************/
  518 static void
  519 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
  520 {
  521         struct mbuf    *m_head;
  522         struct adapter *adapter = txr->adapter;
  523 
  524         IXV_TX_LOCK_ASSERT(txr);
  525 
  526         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
  527             IFF_DRV_RUNNING)
  528                 return;
  529         if (!adapter->link_active)
  530                 return;
  531 
  532         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
  533 
  534                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
  535                 if (m_head == NULL)
  536                         break;
  537 
  538                 if (ixv_xmit(txr, &m_head)) {
  539                         if (m_head == NULL)
  540                                 break;
  541                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  542                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
  543                         break;
  544                 }
  545                 /* Send a copy of the frame to the BPF listener */
  546                 ETHER_BPF_MTAP(ifp, m_head);
  547 
  548                 /* Set watchdog on */
  549                 txr->watchdog_check = TRUE;
  550                 txr->watchdog_time = ticks;
  551 
  552         }
  553         return;
  554 }
  555 
  556 /*
  557  * Legacy TX start - called by the stack, this
  558  * always uses the first tx ring, and should
  559  * not be used with multiqueue tx enabled.
  560  */
  561 static void
  562 ixv_start(struct ifnet *ifp)
  563 {
  564         struct adapter *adapter = ifp->if_softc;
  565         struct tx_ring  *txr = adapter->tx_rings;
  566 
  567         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  568                 IXV_TX_LOCK(txr);
  569                 ixv_start_locked(txr, ifp);
  570                 IXV_TX_UNLOCK(txr);
  571         }
  572         return;
  573 }
  574 
  575 #else
  576 
  577 /*
  578 ** Multiqueue Transmit driver
  579 **
  580 */
  581 static int
  582 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
  583 {
  584         struct adapter  *adapter = ifp->if_softc;
  585         struct ix_queue *que;
  586         struct tx_ring  *txr;
  587         int             i = 0, err = 0;
  588 
  589         /* Which queue to use */
  590         if ((m->m_flags & M_FLOWID) != 0)
  591                 i = m->m_pkthdr.flowid % adapter->num_queues;
  592 
  593         txr = &adapter->tx_rings[i];
  594         que = &adapter->queues[i];
  595 
  596         if (IXV_TX_TRYLOCK(txr)) {
  597                 err = ixv_mq_start_locked(ifp, txr, m);
  598                 IXV_TX_UNLOCK(txr);
  599         } else {
  600                 err = drbr_enqueue(ifp, txr->br, m);
  601                 taskqueue_enqueue(que->tq, &que->que_task);
  602         }
  603 
  604         return (err);
  605 }
  606 
  607 static int
  608 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
  609 {
  610         struct adapter  *adapter = txr->adapter;
  611         struct mbuf     *next;
  612         int             enqueued, err = 0;
  613 
  614         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
  615             IFF_DRV_RUNNING || adapter->link_active == 0) {
  616                 if (m != NULL)
  617                         err = drbr_enqueue(ifp, txr->br, m);
  618                 return (err);
  619         }
  620 
  621         /* Do a clean if descriptors are low */
  622         if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
  623                 ixv_txeof(txr);
  624 
  625         enqueued = 0;
  626         if (m == NULL) {
  627                 next = drbr_dequeue(ifp, txr->br);
  628         } else if (drbr_needs_enqueue(ifp, txr->br)) {
  629                 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
  630                         return (err);
  631                 next = drbr_dequeue(ifp, txr->br);
  632         } else
  633                 next = m;
  634 
  635         /* Process the queue */
  636         while (next != NULL) {
  637                 if ((err = ixv_xmit(txr, &next)) != 0) {
  638                         if (next != NULL)
  639                                 err = drbr_enqueue(ifp, txr->br, next);
  640                         break;
  641                 }
  642                 enqueued++;
  643                 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
  644                 /* Send a copy of the frame to the BPF listener */
  645                 ETHER_BPF_MTAP(ifp, next);
  646                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  647                         break;
  648                 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
  649                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  650                         break;
  651                 }
  652                 next = drbr_dequeue(ifp, txr->br);
  653         }
  654 
  655         if (enqueued > 0) {
  656                 /* Set watchdog on */
  657                 txr->watchdog_check = TRUE;
  658                 txr->watchdog_time = ticks;
  659         }
  660 
  661         return (err);
  662 }
  663 
  664 /*
  665 ** Flush all ring buffers
  666 */
  667 static void     
  668 ixv_qflush(struct ifnet *ifp)
  669 {
  670         struct adapter  *adapter = ifp->if_softc;
  671         struct tx_ring  *txr = adapter->tx_rings;
  672         struct mbuf     *m;
  673 
  674         for (int i = 0; i < adapter->num_queues; i++, txr++) {
  675                 IXV_TX_LOCK(txr);
  676                 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
  677                         m_freem(m);
  678                 IXV_TX_UNLOCK(txr);
  679         }
  680         if_qflush(ifp);
  681 }
  682 
  683 #endif
  684 
  685 /*********************************************************************
  686  *  Ioctl entry point
  687  *
  688  *  ixv_ioctl is called when the user wants to configure the
  689  *  interface.
  690  *
  691  *  return 0 on success, positive on failure
  692  **********************************************************************/
  693 
  694 static int
  695 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
  696 {
  697         struct adapter  *adapter = ifp->if_softc;
  698         struct ifreq    *ifr = (struct ifreq *) data;
  699 #if defined(INET) || defined(INET6)
  700         struct ifaddr   *ifa = (struct ifaddr *) data;
  701         bool            avoid_reset = FALSE;
  702 #endif
  703         int             error = 0;
  704 
  705         switch (command) {
  706 
  707         case SIOCSIFADDR:
  708 #ifdef INET
  709                 if (ifa->ifa_addr->sa_family == AF_INET)
  710                         avoid_reset = TRUE;
  711 #endif
  712 #ifdef INET6
  713                 if (ifa->ifa_addr->sa_family == AF_INET6)
  714                         avoid_reset = TRUE;
  715 #endif
  716 #if defined(INET) || defined(INET6)
  717                 /*
  718                 ** Calling init results in link renegotiation,
  719                 ** so we avoid doing it when possible.
  720                 */
  721                 if (avoid_reset) {
  722                         ifp->if_flags |= IFF_UP;
  723                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
  724                                 ixv_init(adapter);
  725                         if (!(ifp->if_flags & IFF_NOARP))
  726                                 arp_ifinit(ifp, ifa);
  727                 } else
  728                         error = ether_ioctl(ifp, command, data);
  729                 break;
  730 #endif
  731         case SIOCSIFMTU:
  732                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
  733                 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
  734                         error = EINVAL;
  735                 } else {
  736                         IXV_CORE_LOCK(adapter);
  737                         ifp->if_mtu = ifr->ifr_mtu;
  738                         adapter->max_frame_size =
  739                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
  740                         ixv_init_locked(adapter);
  741                         IXV_CORE_UNLOCK(adapter);
  742                 }
  743                 break;
  744         case SIOCSIFFLAGS:
  745                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
  746                 IXV_CORE_LOCK(adapter);
  747                 if (ifp->if_flags & IFF_UP) {
  748                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  749                                 ixv_init_locked(adapter);
  750                 } else
  751                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  752                                 ixv_stop(adapter);
  753                 adapter->if_flags = ifp->if_flags;
  754                 IXV_CORE_UNLOCK(adapter);
  755                 break;
  756         case SIOCADDMULTI:
  757         case SIOCDELMULTI:
  758                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
  759                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  760                         IXV_CORE_LOCK(adapter);
  761                         ixv_disable_intr(adapter);
  762                         ixv_set_multi(adapter);
  763                         ixv_enable_intr(adapter);
  764                         IXV_CORE_UNLOCK(adapter);
  765                 }
  766                 break;
  767         case SIOCSIFMEDIA:
  768         case SIOCGIFMEDIA:
  769                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
  770                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
  771                 break;
  772         case SIOCSIFCAP:
  773         {
  774                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
  775                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
  776                 if (mask & IFCAP_HWCSUM)
  777                         ifp->if_capenable ^= IFCAP_HWCSUM;
  778                 if (mask & IFCAP_TSO4)
  779                         ifp->if_capenable ^= IFCAP_TSO4;
  780                 if (mask & IFCAP_LRO)
  781                         ifp->if_capenable ^= IFCAP_LRO;
  782                 if (mask & IFCAP_VLAN_HWTAGGING)
  783                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
  784                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  785                         IXV_CORE_LOCK(adapter);
  786                         ixv_init_locked(adapter);
  787                         IXV_CORE_UNLOCK(adapter);
  788                 }
  789                 VLAN_CAPABILITIES(ifp);
  790                 break;
  791         }
  792 
  793         default:
  794                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
  795                 error = ether_ioctl(ifp, command, data);
  796                 break;
  797         }
  798 
  799         return (error);
  800 }
  801 
  802 /*********************************************************************
  803  *  Init entry point
  804  *
  805  *  This routine is used in two ways. It is used by the stack as
  806  *  init entry point in network interface structure. It is also used
  807  *  by the driver as a hw/sw initialization routine to get to a
  808  *  consistent state.
  809  *
  810  *  return 0 on success, positive on failure
  811  **********************************************************************/
  812 #define IXGBE_MHADD_MFS_SHIFT 16
  813 
  814 static void
  815 ixv_init_locked(struct adapter *adapter)
  816 {
  817         struct ifnet    *ifp = adapter->ifp;
  818         device_t        dev = adapter->dev;
  819         struct ixgbe_hw *hw = &adapter->hw;
  820         u32             mhadd, gpie;
  821 
  822         INIT_DEBUGOUT("ixv_init: begin");
  823         mtx_assert(&adapter->core_mtx, MA_OWNED);
  824         hw->adapter_stopped = FALSE;
  825         ixgbe_stop_adapter(hw);
  826         callout_stop(&adapter->timer);
  827 
  828         /* reprogram the RAR[0] in case user changed it. */
  829         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
  830 
  831         /* Get the latest mac address, User can use a LAA */
  832         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
  833              IXGBE_ETH_LENGTH_OF_ADDRESS);
  834         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
  835         hw->addr_ctrl.rar_used_count = 1;
  836 
  837         /* Prepare transmit descriptors and buffers */
  838         if (ixv_setup_transmit_structures(adapter)) {
  839                 device_printf(dev,"Could not setup transmit structures\n");
  840                 ixv_stop(adapter);
  841                 return;
  842         }
  843 
  844         ixgbe_reset_hw(hw);
  845         ixv_initialize_transmit_units(adapter);
  846 
  847         /* Setup Multicast table */
  848         ixv_set_multi(adapter);
  849 
  850         /*
  851         ** Determine the correct mbuf pool
  852         ** for doing jumbo/headersplit
  853         */
  854         if (ifp->if_mtu > ETHERMTU)
  855                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
  856         else
  857                 adapter->rx_mbuf_sz = MCLBYTES;
  858 
  859         /* Prepare receive descriptors and buffers */
  860         if (ixv_setup_receive_structures(adapter)) {
  861                 device_printf(dev,"Could not setup receive structures\n");
  862                 ixv_stop(adapter);
  863                 return;
  864         }
  865 
  866         /* Configure RX settings */
  867         ixv_initialize_receive_units(adapter);
  868 
  869         /* Enable Enhanced MSIX mode */
  870         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
  871         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
  872         gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
  873         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  874 
  875         /* Set the various hardware offload abilities */
  876         ifp->if_hwassist = 0;
  877         if (ifp->if_capenable & IFCAP_TSO4)
  878                 ifp->if_hwassist |= CSUM_TSO;
  879         if (ifp->if_capenable & IFCAP_TXCSUM) {
  880                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
  881 #if __FreeBSD_version >= 800000
  882                 ifp->if_hwassist |= CSUM_SCTP;
  883 #endif
  884         }
  885         
  886         /* Set MTU size */
  887         if (ifp->if_mtu > ETHERMTU) {
  888                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
  889                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
  890                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
  891                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
  892         }
  893 
  894         /* Set up VLAN offload and filter */
  895         ixv_setup_vlan_support(adapter);
  896 
  897         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
  898 
  899         /* Set up MSI/X routing */
  900         ixv_configure_ivars(adapter);
  901 
  902         /* Set up auto-mask */
  903         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
  904 
  905         /* Set moderation on the Link interrupt */
  906         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
  907 
  908         /* Stats init */
  909         ixv_init_stats(adapter);
  910 
  911         /* Config/Enable Link */
  912         ixv_config_link(adapter);
  913 
  914         /* And now turn on interrupts */
  915         ixv_enable_intr(adapter);
  916 
  917         /* Now inform the stack we're ready */
  918         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  919         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  920 
  921         return;
  922 }
  923 
  924 static void
  925 ixv_init(void *arg)
  926 {
  927         struct adapter *adapter = arg;
  928 
  929         IXV_CORE_LOCK(adapter);
  930         ixv_init_locked(adapter);
  931         IXV_CORE_UNLOCK(adapter);
  932         return;
  933 }
  934 
  935 
  936 /*
  937 **
  938 ** MSIX Interrupt Handlers and Tasklets
  939 **
  940 */
  941 
  942 static inline void
  943 ixv_enable_queue(struct adapter *adapter, u32 vector)
  944 {
  945         struct ixgbe_hw *hw = &adapter->hw;
  946         u32     queue = 1 << vector;
  947         u32     mask;
  948 
  949         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
  950         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
  951 }
  952 
  953 static inline void
  954 ixv_disable_queue(struct adapter *adapter, u32 vector)
  955 {
  956         struct ixgbe_hw *hw = &adapter->hw;
  957         u64     queue = (u64)(1 << vector);
  958         u32     mask;
  959 
  960         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
  961         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
  962 }
  963 
  964 static inline void
  965 ixv_rearm_queues(struct adapter *adapter, u64 queues)
  966 {
  967         u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
  968         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
  969 }
  970 
  971 
  972 static void
  973 ixv_handle_que(void *context, int pending)
  974 {
  975         struct ix_queue *que = context;
  976         struct adapter  *adapter = que->adapter;
  977         struct tx_ring  *txr = que->txr;
  978         struct ifnet    *ifp = adapter->ifp;
  979         bool            more;
  980 
  981         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  982                 more = ixv_rxeof(que, adapter->rx_process_limit);
  983                 IXV_TX_LOCK(txr);
  984                 ixv_txeof(txr);
  985 #if __FreeBSD_version >= 800000
  986                 if (!drbr_empty(ifp, txr->br))
  987                         ixv_mq_start_locked(ifp, txr, NULL);
  988 #else
  989                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
  990                         ixv_start_locked(txr, ifp);
  991 #endif
  992                 IXV_TX_UNLOCK(txr);
  993                 if (more) {
  994                         taskqueue_enqueue(que->tq, &que->que_task);
  995                         return;
  996                 }
  997         }
  998 
  999         /* Reenable this interrupt */
 1000         ixv_enable_queue(adapter, que->msix);
 1001         return;
 1002 }
 1003 
 1004 /*********************************************************************
 1005  *
 1006  *  MSI Queue Interrupt Service routine
 1007  *
 1008  **********************************************************************/
 1009 void
 1010 ixv_msix_que(void *arg)
 1011 {
 1012         struct ix_queue *que = arg;
 1013         struct adapter  *adapter = que->adapter;
 1014         struct tx_ring  *txr = que->txr;
 1015         struct rx_ring  *rxr = que->rxr;
 1016         bool            more_tx, more_rx;
 1017         u32             newitr = 0;
 1018 
 1019         ixv_disable_queue(adapter, que->msix);
 1020         ++que->irqs;
 1021 
 1022         more_rx = ixv_rxeof(que, adapter->rx_process_limit);
 1023 
 1024         IXV_TX_LOCK(txr);
 1025         more_tx = ixv_txeof(txr);
 1026         IXV_TX_UNLOCK(txr);
 1027 
 1028         more_rx = ixv_rxeof(que, adapter->rx_process_limit);
 1029 
 1030         /* Do AIM now? */
 1031 
 1032         if (ixv_enable_aim == FALSE)
 1033                 goto no_calc;
 1034         /*
 1035         ** Do Adaptive Interrupt Moderation:
 1036         **  - Write out last calculated setting
 1037         **  - Calculate based on average size over
 1038         **    the last interval.
 1039         */
 1040         if (que->eitr_setting)
 1041                 IXGBE_WRITE_REG(&adapter->hw,
 1042                     IXGBE_VTEITR(que->msix),
 1043                     que->eitr_setting);
 1044  
 1045         que->eitr_setting = 0;
 1046 
 1047         /* Idle, do nothing */
 1048         if ((txr->bytes == 0) && (rxr->bytes == 0))
 1049                 goto no_calc;
 1050                                 
 1051         if ((txr->bytes) && (txr->packets))
 1052                 newitr = txr->bytes/txr->packets;
 1053         if ((rxr->bytes) && (rxr->packets))
 1054                 newitr = max(newitr,
 1055                     (rxr->bytes / rxr->packets));
 1056         newitr += 24; /* account for hardware frame, crc */
 1057 
 1058         /* set an upper boundary */
 1059         newitr = min(newitr, 3000);
 1060 
 1061         /* Be nice to the mid range */
 1062         if ((newitr > 300) && (newitr < 1200))
 1063                 newitr = (newitr / 3);
 1064         else
 1065                 newitr = (newitr / 2);
 1066 
 1067         newitr |= newitr << 16;
 1068                  
 1069         /* save for next interrupt */
 1070         que->eitr_setting = newitr;
 1071 
 1072         /* Reset state */
 1073         txr->bytes = 0;
 1074         txr->packets = 0;
 1075         rxr->bytes = 0;
 1076         rxr->packets = 0;
 1077 
 1078 no_calc:
 1079         if (more_tx || more_rx)
 1080                 taskqueue_enqueue(que->tq, &que->que_task);
 1081         else /* Reenable this interrupt */
 1082                 ixv_enable_queue(adapter, que->msix);
 1083         return;
 1084 }
 1085 
 1086 static void
 1087 ixv_msix_mbx(void *arg)
 1088 {
 1089         struct adapter  *adapter = arg;
 1090         struct ixgbe_hw *hw = &adapter->hw;
 1091         u32             reg;
 1092 
 1093         ++adapter->mbx_irq;
 1094 
 1095         /* First get the cause */
 1096         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
 1097         /* Clear interrupt with write */
 1098         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
 1099 
 1100         /* Link status change */
 1101         if (reg & IXGBE_EICR_LSC)
 1102                 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
 1103 
 1104         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
 1105         return;
 1106 }
 1107 
 1108 /*********************************************************************
 1109  *
 1110  *  Media Ioctl callback
 1111  *
 1112  *  This routine is called whenever the user queries the status of
 1113  *  the interface using ifconfig.
 1114  *
 1115  **********************************************************************/
 1116 static void
 1117 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
 1118 {
 1119         struct adapter *adapter = ifp->if_softc;
 1120 
 1121         INIT_DEBUGOUT("ixv_media_status: begin");
 1122         IXV_CORE_LOCK(adapter);
 1123         ixv_update_link_status(adapter);
 1124 
 1125         ifmr->ifm_status = IFM_AVALID;
 1126         ifmr->ifm_active = IFM_ETHER;
 1127 
 1128         if (!adapter->link_active) {
 1129                 IXV_CORE_UNLOCK(adapter);
 1130                 return;
 1131         }
 1132 
 1133         ifmr->ifm_status |= IFM_ACTIVE;
 1134 
 1135         switch (adapter->link_speed) {
 1136                 case IXGBE_LINK_SPEED_1GB_FULL:
 1137                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
 1138                         break;
 1139                 case IXGBE_LINK_SPEED_10GB_FULL:
 1140                         ifmr->ifm_active |= IFM_FDX;
 1141                         break;
 1142         }
 1143 
 1144         IXV_CORE_UNLOCK(adapter);
 1145 
 1146         return;
 1147 }
 1148 
 1149 /*********************************************************************
 1150  *
 1151  *  Media Ioctl callback
 1152  *
 1153  *  This routine is called when the user changes speed/duplex using
 1154  *  media/mediopt option with ifconfig.
 1155  *
 1156  **********************************************************************/
 1157 static int
 1158 ixv_media_change(struct ifnet * ifp)
 1159 {
 1160         struct adapter *adapter = ifp->if_softc;
 1161         struct ifmedia *ifm = &adapter->media;
 1162 
 1163         INIT_DEBUGOUT("ixv_media_change: begin");
 1164 
 1165         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1166                 return (EINVAL);
 1167 
 1168         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 1169         case IFM_AUTO:
 1170                 break;
 1171         default:
 1172                 device_printf(adapter->dev, "Only auto media type\n");
 1173                 return (EINVAL);
 1174         }
 1175 
 1176         return (0);
 1177 }
 1178 
 1179 /*********************************************************************
 1180  *
 1181  *  This routine maps the mbufs to tx descriptors, allowing the
 1182  *  TX engine to transmit the packets. 
 1183  *      - return 0 on success, positive on failure
 1184  *
 1185  **********************************************************************/
 1186 
 1187 static int
 1188 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
 1189 {
 1190         struct adapter  *adapter = txr->adapter;
 1191         u32             olinfo_status = 0, cmd_type_len;
 1192         u32             paylen = 0;
 1193         int             i, j, error, nsegs;
 1194         int             first, last = 0;
 1195         struct mbuf     *m_head;
 1196         bus_dma_segment_t segs[32];
 1197         bus_dmamap_t    map;
 1198         struct ixv_tx_buf *txbuf;
 1199         union ixgbe_adv_tx_desc *txd = NULL;
 1200 
 1201         m_head = *m_headp;
 1202 
 1203         /* Basic descriptor defines */
 1204         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
 1205             IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
 1206 
 1207         if (m_head->m_flags & M_VLANTAG)
 1208                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
 1209 
 1210         /*
 1211          * Important to capture the first descriptor
 1212          * used because it will contain the index of
 1213          * the one we tell the hardware to report back
 1214          */
 1215         first = txr->next_avail_desc;
 1216         txbuf = &txr->tx_buffers[first];
 1217         map = txbuf->map;
 1218 
 1219         /*
 1220          * Map the packet for DMA.
 1221          */
 1222         error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
 1223             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 1224 
 1225         if (error == EFBIG) {
 1226                 struct mbuf *m;
 1227 
 1228                 m = m_defrag(*m_headp, M_DONTWAIT);
 1229                 if (m == NULL) {
 1230                         adapter->mbuf_defrag_failed++;
 1231                         m_freem(*m_headp);
 1232                         *m_headp = NULL;
 1233                         return (ENOBUFS);
 1234                 }
 1235                 *m_headp = m;
 1236 
 1237                 /* Try it again */
 1238                 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
 1239                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 1240 
 1241                 if (error == ENOMEM) {
 1242                         adapter->no_tx_dma_setup++;
 1243                         return (error);
 1244                 } else if (error != 0) {
 1245                         adapter->no_tx_dma_setup++;
 1246                         m_freem(*m_headp);
 1247                         *m_headp = NULL;
 1248                         return (error);
 1249                 }
 1250         } else if (error == ENOMEM) {
 1251                 adapter->no_tx_dma_setup++;
 1252                 return (error);
 1253         } else if (error != 0) {
 1254                 adapter->no_tx_dma_setup++;
 1255                 m_freem(*m_headp);
 1256                 *m_headp = NULL;
 1257                 return (error);
 1258         }
 1259 
 1260         /* Make certain there are enough descriptors */
 1261         if (nsegs > txr->tx_avail - 2) {
 1262                 txr->no_desc_avail++;
 1263                 error = ENOBUFS;
 1264                 goto xmit_fail;
 1265         }
 1266         m_head = *m_headp;
 1267 
 1268         /*
 1269         ** Set up the appropriate offload context
 1270         ** this becomes the first descriptor of 
 1271         ** a packet.
 1272         */
 1273         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
 1274                 if (ixv_tso_setup(txr, m_head, &paylen)) {
 1275                         cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
 1276                         olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
 1277                         olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
 1278                         olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
 1279                         ++adapter->tso_tx;
 1280                 } else
 1281                         return (ENXIO);
 1282         } else if (ixv_tx_ctx_setup(txr, m_head))
 1283                 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
 1284 
 1285         /* Record payload length */
 1286         if (paylen == 0)
 1287                 olinfo_status |= m_head->m_pkthdr.len <<
 1288                     IXGBE_ADVTXD_PAYLEN_SHIFT;
 1289 
 1290         i = txr->next_avail_desc;
 1291         for (j = 0; j < nsegs; j++) {
 1292                 bus_size_t seglen;
 1293                 bus_addr_t segaddr;
 1294 
 1295                 txbuf = &txr->tx_buffers[i];
 1296                 txd = &txr->tx_base[i];
 1297                 seglen = segs[j].ds_len;
 1298                 segaddr = htole64(segs[j].ds_addr);
 1299 
 1300                 txd->read.buffer_addr = segaddr;
 1301                 txd->read.cmd_type_len = htole32(txr->txd_cmd |
 1302                     cmd_type_len |seglen);
 1303                 txd->read.olinfo_status = htole32(olinfo_status);
 1304                 last = i; /* descriptor that will get completion IRQ */
 1305 
 1306                 if (++i == adapter->num_tx_desc)
 1307                         i = 0;
 1308 
 1309                 txbuf->m_head = NULL;
 1310                 txbuf->eop_index = -1;
 1311         }
 1312 
 1313         txd->read.cmd_type_len |=
 1314             htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
 1315         txr->tx_avail -= nsegs;
 1316         txr->next_avail_desc = i;
 1317 
 1318         txbuf->m_head = m_head;
 1319         txr->tx_buffers[first].map = txbuf->map;
 1320         txbuf->map = map;
 1321         bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
 1322 
 1323         /* Set the index of the descriptor that will be marked done */
 1324         txbuf = &txr->tx_buffers[first];
 1325         txbuf->eop_index = last;
 1326 
 1327         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 1328             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1329         /*
 1330          * Advance the Transmit Descriptor Tail (Tdt), this tells the
 1331          * hardware that this frame is available to transmit.
 1332          */
 1333         ++txr->total_packets;
 1334         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
 1335 
 1336         return (0);
 1337 
 1338 xmit_fail:
 1339         bus_dmamap_unload(txr->txtag, txbuf->map);
 1340         return (error);
 1341 
 1342 }
 1343 
 1344 
 1345 /*********************************************************************
 1346  *  Multicast Update
 1347  *
 1348  *  This routine is called whenever multicast address list is updated.
 1349  *
 1350  **********************************************************************/
 1351 #define IXGBE_RAR_ENTRIES 16
 1352 
 1353 static void
 1354 ixv_set_multi(struct adapter *adapter)
 1355 {
 1356         u8      mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
 1357         u8      *update_ptr;
 1358         struct  ifmultiaddr *ifma;
 1359         int     mcnt = 0;
 1360         struct ifnet   *ifp = adapter->ifp;
 1361 
 1362         IOCTL_DEBUGOUT("ixv_set_multi: begin");
 1363 
 1364 #if __FreeBSD_version < 800000
 1365         IF_ADDR_LOCK(ifp);
 1366 #else
 1367         if_maddr_rlock(ifp);
 1368 #endif
 1369         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1370                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1371                         continue;
 1372                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
 1373                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
 1374                     IXGBE_ETH_LENGTH_OF_ADDRESS);
 1375                 mcnt++;
 1376         }
 1377 #if __FreeBSD_version < 800000
 1378         IF_ADDR_UNLOCK(ifp);
 1379 #else
 1380         if_maddr_runlock(ifp);
 1381 #endif
 1382 
 1383         update_ptr = mta;
 1384 
 1385         ixgbe_update_mc_addr_list(&adapter->hw,
 1386             update_ptr, mcnt, ixv_mc_array_itr);
 1387 
 1388         return;
 1389 }
 1390 
 1391 /*
 1392  * This is an iterator function now needed by the multicast
 1393  * shared code. It simply feeds the shared code routine the
 1394  * addresses in the array of ixv_set_multi() one by one.
 1395  */
 1396 static u8 *
 1397 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
 1398 {
 1399         u8 *addr = *update_ptr;
 1400         u8 *newptr;
 1401         *vmdq = 0;
 1402 
 1403         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
 1404         *update_ptr = newptr;
 1405         return addr;
 1406 }
 1407 
 1408 /*********************************************************************
 1409  *  Timer routine
 1410  *
 1411  *  This routine checks for link status,updates statistics,
 1412  *  and runs the watchdog check.
 1413  *
 1414  **********************************************************************/
 1415 
 1416 static void
 1417 ixv_local_timer(void *arg)
 1418 {
 1419         struct adapter  *adapter = arg;
 1420         device_t        dev = adapter->dev;
 1421         struct tx_ring  *txr = adapter->tx_rings;
 1422         int             i;
 1423 
 1424         mtx_assert(&adapter->core_mtx, MA_OWNED);
 1425 
 1426         ixv_update_link_status(adapter);
 1427 
 1428         /* Stats Update */
 1429         ixv_update_stats(adapter);
 1430 
 1431         /*
 1432          * If the interface has been paused
 1433          * then don't do the watchdog check
 1434          */
 1435         if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
 1436                 goto out;
 1437         /*
 1438         ** Check for time since any descriptor was cleaned
 1439         */
 1440         for (i = 0; i < adapter->num_queues; i++, txr++) {
 1441                 IXV_TX_LOCK(txr);
 1442                 if (txr->watchdog_check == FALSE) {
 1443                         IXV_TX_UNLOCK(txr);
 1444                         continue;
 1445                 }
 1446                 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
 1447                         goto hung;
 1448                 IXV_TX_UNLOCK(txr);
 1449         }
 1450 out:
 1451         ixv_rearm_queues(adapter, adapter->que_mask);
 1452         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
 1453         return;
 1454 
 1455 hung:
 1456         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
 1457         device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
 1458             IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
 1459             IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
 1460         device_printf(dev,"TX(%d) desc avail = %d,"
 1461             "Next TX to Clean = %d\n",
 1462             txr->me, txr->tx_avail, txr->next_to_clean);
 1463         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1464         adapter->watchdog_events++;
 1465         IXV_TX_UNLOCK(txr);
 1466         ixv_init_locked(adapter);
 1467 }
 1468 
 1469 /*
 1470 ** Note: this routine updates the OS on the link state
 1471 **      the real check of the hardware only happens with
 1472 **      a link interrupt.
 1473 */
 1474 static void
 1475 ixv_update_link_status(struct adapter *adapter)
 1476 {
 1477         struct ifnet    *ifp = adapter->ifp;
 1478         struct tx_ring *txr = adapter->tx_rings;
 1479         device_t dev = adapter->dev;
 1480 
 1481 
 1482         if (adapter->link_up){ 
 1483                 if (adapter->link_active == FALSE) {
 1484                         if (bootverbose)
 1485                                 device_printf(dev,"Link is up %d Gbps %s \n",
 1486                                     ((adapter->link_speed == 128)? 10:1),
 1487                                     "Full Duplex");
 1488                         adapter->link_active = TRUE;
 1489                         if_link_state_change(ifp, LINK_STATE_UP);
 1490                 }
 1491         } else { /* Link down */
 1492                 if (adapter->link_active == TRUE) {
 1493                         if (bootverbose)
 1494                                 device_printf(dev,"Link is Down\n");
 1495                         if_link_state_change(ifp, LINK_STATE_DOWN);
 1496                         adapter->link_active = FALSE;
 1497                         for (int i = 0; i < adapter->num_queues;
 1498                             i++, txr++)
 1499                                 txr->watchdog_check = FALSE;
 1500                 }
 1501         }
 1502 
 1503         return;
 1504 }
 1505 
 1506 
 1507 /*********************************************************************
 1508  *
 1509  *  This routine disables all traffic on the adapter by issuing a
 1510  *  global reset on the MAC and deallocates TX/RX buffers.
 1511  *
 1512  **********************************************************************/
 1513 
 1514 static void
 1515 ixv_stop(void *arg)
 1516 {
 1517         struct ifnet   *ifp;
 1518         struct adapter *adapter = arg;
 1519         struct ixgbe_hw *hw = &adapter->hw;
 1520         ifp = adapter->ifp;
 1521 
 1522         mtx_assert(&adapter->core_mtx, MA_OWNED);
 1523 
 1524         INIT_DEBUGOUT("ixv_stop: begin\n");
 1525         ixv_disable_intr(adapter);
 1526 
 1527         /* Tell the stack that the interface is no longer active */
 1528         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1529 
 1530         ixgbe_reset_hw(hw);
 1531         adapter->hw.adapter_stopped = FALSE;
 1532         ixgbe_stop_adapter(hw);
 1533         callout_stop(&adapter->timer);
 1534 
 1535         /* reprogram the RAR[0] in case user changed it. */
 1536         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 1537 
 1538         return;
 1539 }
 1540 
 1541 
 1542 /*********************************************************************
 1543  *
 1544  *  Determine hardware revision.
 1545  *
 1546  **********************************************************************/
 1547 static void
 1548 ixv_identify_hardware(struct adapter *adapter)
 1549 {
 1550         device_t        dev = adapter->dev;
 1551         u16             pci_cmd_word;
 1552 
 1553         /*
 1554         ** Make sure BUSMASTER is set, on a VM under
 1555         ** KVM it may not be and will break things.
 1556         */
 1557         pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
 1558         if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
 1559             (pci_cmd_word & PCIM_CMD_MEMEN))) {
 1560                 INIT_DEBUGOUT("Memory Access and/or Bus Master "
 1561                     "bits were not set!\n");
 1562                 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
 1563                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
 1564         }
 1565 
 1566         /* Save off the information about this board */
 1567         adapter->hw.vendor_id = pci_get_vendor(dev);
 1568         adapter->hw.device_id = pci_get_device(dev);
 1569         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
 1570         adapter->hw.subsystem_vendor_id =
 1571             pci_read_config(dev, PCIR_SUBVEND_0, 2);
 1572         adapter->hw.subsystem_device_id =
 1573             pci_read_config(dev, PCIR_SUBDEV_0, 2);
 1574 
 1575         return;
 1576 }
 1577 
 1578 /*********************************************************************
 1579  *
 1580  *  Setup MSIX Interrupt resources and handlers 
 1581  *
 1582  **********************************************************************/
 1583 static int
 1584 ixv_allocate_msix(struct adapter *adapter)
 1585 {
 1586         device_t        dev = adapter->dev;
 1587         struct          ix_queue *que = adapter->queues;
 1588         int             error, rid, vector = 0;
 1589 
 1590         for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
 1591                 rid = vector + 1;
 1592                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1593                     RF_SHAREABLE | RF_ACTIVE);
 1594                 if (que->res == NULL) {
 1595                         device_printf(dev,"Unable to allocate"
 1596                             " bus resource: que interrupt [%d]\n", vector);
 1597                         return (ENXIO);
 1598                 }
 1599                 /* Set the handler function */
 1600                 error = bus_setup_intr(dev, que->res,
 1601                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
 1602                     ixv_msix_que, que, &que->tag);
 1603                 if (error) {
 1604                         que->res = NULL;
 1605                         device_printf(dev, "Failed to register QUE handler");
 1606                         return (error);
 1607                 }
 1608 #if __FreeBSD_version >= 800504
 1609                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
 1610 #endif
 1611                 que->msix = vector;
 1612                 adapter->que_mask |= (u64)(1 << que->msix);
 1613                 /*
 1614                 ** Bind the msix vector, and thus the
 1615                 ** ring to the corresponding cpu.
 1616                 */
 1617                 if (adapter->num_queues > 1)
 1618                         bus_bind_intr(dev, que->res, i);
 1619 
 1620                 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
 1621                 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
 1622                     taskqueue_thread_enqueue, &que->tq);
 1623                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
 1624                     device_get_nameunit(adapter->dev));
 1625         }
 1626 
 1627         /* and Mailbox */
 1628         rid = vector + 1;
 1629         adapter->res = bus_alloc_resource_any(dev,
 1630             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
 1631         if (!adapter->res) {
 1632                 device_printf(dev,"Unable to allocate"
 1633             " bus resource: MBX interrupt [%d]\n", rid);
 1634                 return (ENXIO);
 1635         }
 1636         /* Set the mbx handler function */
 1637         error = bus_setup_intr(dev, adapter->res,
 1638             INTR_TYPE_NET | INTR_MPSAFE, NULL,
 1639             ixv_msix_mbx, adapter, &adapter->tag);
 1640         if (error) {
 1641                 adapter->res = NULL;
 1642                 device_printf(dev, "Failed to register LINK handler");
 1643                 return (error);
 1644         }
 1645 #if __FreeBSD_version >= 800504
 1646         bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
 1647 #endif
 1648         adapter->mbxvec = vector;
 1649         /* Tasklets for Mailbox */
 1650         TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
 1651         adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
 1652             taskqueue_thread_enqueue, &adapter->tq);
 1653         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
 1654             device_get_nameunit(adapter->dev));
 1655         /*
 1656         ** Due to a broken design QEMU will fail to properly
 1657         ** enable the guest for MSIX unless the vectors in
 1658         ** the table are all set up, so we must rewrite the
 1659         ** ENABLE in the MSIX control register again at this
 1660         ** point to cause it to successfully initialize us.
 1661         */
 1662         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
 1663                 int msix_ctrl;
 1664                 pci_find_cap(dev, PCIY_MSIX, &rid);
 1665                 rid += PCIR_MSIX_CTRL;
 1666                 msix_ctrl = pci_read_config(dev, rid, 2);
 1667                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
 1668                 pci_write_config(dev, rid, msix_ctrl, 2);
 1669         }
 1670 
 1671         return (0);
 1672 }
 1673 
 1674 /*
 1675  * Setup MSIX resources, note that the VF
 1676  * device MUST use MSIX, there is no fallback.
 1677  */
 1678 static int
 1679 ixv_setup_msix(struct adapter *adapter)
 1680 {
 1681         device_t dev = adapter->dev;
 1682         int rid, vectors, want = 2;
 1683 
 1684 
 1685         /* First try MSI/X */
 1686         rid = PCIR_BAR(3);
 1687         adapter->msix_mem = bus_alloc_resource_any(dev,
 1688             SYS_RES_MEMORY, &rid, RF_ACTIVE);
 1689         if (!adapter->msix_mem) {
 1690                 device_printf(adapter->dev,
 1691                     "Unable to map MSIX table \n");
 1692                 goto out;
 1693         }
 1694 
 1695         vectors = pci_msix_count(dev); 
 1696         if (vectors < 2) {
 1697                 bus_release_resource(dev, SYS_RES_MEMORY,
 1698                     rid, adapter->msix_mem);
 1699                 adapter->msix_mem = NULL;
 1700                 goto out;
 1701         }
 1702 
 1703         /*
 1704         ** Want two vectors: one for a queue,
 1705         ** plus an additional for mailbox.
 1706         */
 1707         if (pci_alloc_msix(dev, &want) == 0) {
 1708                 device_printf(adapter->dev,
 1709                     "Using MSIX interrupts with %d vectors\n", want);
 1710                 return (want);
 1711         }
 1712 out:
 1713         device_printf(adapter->dev,"MSIX config error\n");
 1714         return (ENXIO);
 1715 }
 1716 
 1717 
 1718 static int
 1719 ixv_allocate_pci_resources(struct adapter *adapter)
 1720 {
 1721         int             rid;
 1722         device_t        dev = adapter->dev;
 1723 
 1724         rid = PCIR_BAR(0);
 1725         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
 1726             &rid, RF_ACTIVE);
 1727 
 1728         if (!(adapter->pci_mem)) {
 1729                 device_printf(dev,"Unable to allocate bus resource: memory\n");
 1730                 return (ENXIO);
 1731         }
 1732 
 1733         adapter->osdep.mem_bus_space_tag =
 1734                 rman_get_bustag(adapter->pci_mem);
 1735         adapter->osdep.mem_bus_space_handle =
 1736                 rman_get_bushandle(adapter->pci_mem);
 1737         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
 1738 
 1739         adapter->num_queues = 1;
 1740         adapter->hw.back = &adapter->osdep;
 1741 
 1742         /*
 1743         ** Now setup MSI/X, should
 1744         ** return us the number of
 1745         ** configured vectors.
 1746         */
 1747         adapter->msix = ixv_setup_msix(adapter);
 1748         if (adapter->msix == ENXIO)
 1749                 return (ENXIO);
 1750         else
 1751                 return (0);
 1752 }
 1753 
 1754 static void
 1755 ixv_free_pci_resources(struct adapter * adapter)
 1756 {
 1757         struct          ix_queue *que = adapter->queues;
 1758         device_t        dev = adapter->dev;
 1759         int             rid, memrid;
 1760 
 1761         memrid = PCIR_BAR(MSIX_BAR);
 1762 
 1763         /*
 1764         ** There is a slight possibility of a failure mode
 1765         ** in attach that will result in entering this function
 1766         ** before interrupt resources have been initialized, and
 1767         ** in that case we do not want to execute the loops below
 1768         ** We can detect this reliably by the state of the adapter
 1769         ** res pointer.
 1770         */
 1771         if (adapter->res == NULL)
 1772                 goto mem;
 1773 
 1774         /*
 1775         **  Release all msix queue resources:
 1776         */
 1777         for (int i = 0; i < adapter->num_queues; i++, que++) {
 1778                 rid = que->msix + 1;
 1779                 if (que->tag != NULL) {
 1780                         bus_teardown_intr(dev, que->res, que->tag);
 1781                         que->tag = NULL;
 1782                 }
 1783                 if (que->res != NULL)
 1784                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
 1785         }
 1786 
 1787 
 1788         /* Clean the Legacy or Link interrupt last */
 1789         if (adapter->mbxvec) /* we are doing MSIX */
 1790                 rid = adapter->mbxvec + 1;
 1791         else
 1792                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
 1793 
 1794         if (adapter->tag != NULL) {
 1795                 bus_teardown_intr(dev, adapter->res, adapter->tag);
 1796                 adapter->tag = NULL;
 1797         }
 1798         if (adapter->res != NULL)
 1799                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
 1800 
 1801 mem:
 1802         if (adapter->msix)
 1803                 pci_release_msi(dev);
 1804 
 1805         if (adapter->msix_mem != NULL)
 1806                 bus_release_resource(dev, SYS_RES_MEMORY,
 1807                     memrid, adapter->msix_mem);
 1808 
 1809         if (adapter->pci_mem != NULL)
 1810                 bus_release_resource(dev, SYS_RES_MEMORY,
 1811                     PCIR_BAR(0), adapter->pci_mem);
 1812 
 1813         return;
 1814 }
 1815 
 1816 /*********************************************************************
 1817  *
 1818  *  Setup networking device structure and register an interface.
 1819  *
 1820  **********************************************************************/
 1821 static void
 1822 ixv_setup_interface(device_t dev, struct adapter *adapter)
 1823 {
 1824         struct ifnet   *ifp;
 1825 
 1826         INIT_DEBUGOUT("ixv_setup_interface: begin");
 1827 
 1828         ifp = adapter->ifp = if_alloc(IFT_ETHER);
 1829         if (ifp == NULL)
 1830                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
 1831         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1832         ifp->if_mtu = ETHERMTU;
 1833         ifp->if_baudrate = 1000000000;
 1834         ifp->if_init = ixv_init;
 1835         ifp->if_softc = adapter;
 1836         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1837         ifp->if_ioctl = ixv_ioctl;
 1838 #if __FreeBSD_version >= 800000
 1839         ifp->if_transmit = ixv_mq_start;
 1840         ifp->if_qflush = ixv_qflush;
 1841 #else
 1842         ifp->if_start = ixv_start;
 1843 #endif
 1844         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
 1845 
 1846         ether_ifattach(ifp, adapter->hw.mac.addr);
 1847 
 1848         adapter->max_frame_size =
 1849             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 1850 
 1851         /*
 1852          * Tell the upper layer(s) we support long frames.
 1853          */
 1854         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 1855 
 1856         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
 1857         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
 1858         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
 1859                              |  IFCAP_VLAN_HWTSO
 1860                              |  IFCAP_VLAN_MTU;
 1861         ifp->if_capenable = ifp->if_capabilities;
 1862 
 1863         /* Don't enable LRO by default */
 1864         ifp->if_capabilities |= IFCAP_LRO;
 1865 
 1866         /*
 1867          * Specify the media types supported by this adapter and register
 1868          * callbacks to update media and link information
 1869          */
 1870         ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
 1871                      ixv_media_status);
 1872         ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
 1873         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
 1874         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
 1875 
 1876         return;
 1877 }
 1878         
 1879 static void
 1880 ixv_config_link(struct adapter *adapter)
 1881 {
 1882         struct ixgbe_hw *hw = &adapter->hw;
 1883         u32     autoneg, err = 0;
 1884         bool    negotiate = TRUE;
 1885 
 1886         if (hw->mac.ops.check_link)
 1887                 err = hw->mac.ops.check_link(hw, &autoneg,
 1888                     &adapter->link_up, FALSE);
 1889         if (err)
 1890                 goto out;
 1891 
 1892         if (hw->mac.ops.setup_link)
 1893                 err = hw->mac.ops.setup_link(hw, autoneg,
 1894                     negotiate, adapter->link_up);
 1895 out:
 1896         return;
 1897 }
 1898 
 1899 /********************************************************************
 1900  * Manage DMA'able memory.
 1901  *******************************************************************/
 1902 static void
 1903 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
 1904 {
 1905         if (error)
 1906                 return;
 1907         *(bus_addr_t *) arg = segs->ds_addr;
 1908         return;
 1909 }
 1910 
 1911 static int
 1912 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
 1913                 struct ixv_dma_alloc *dma, int mapflags)
 1914 {
 1915         device_t dev = adapter->dev;
 1916         int             r;
 1917 
 1918         r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
 1919                                DBA_ALIGN, 0,    /* alignment, bounds */
 1920                                BUS_SPACE_MAXADDR,       /* lowaddr */
 1921                                BUS_SPACE_MAXADDR,       /* highaddr */
 1922                                NULL, NULL,      /* filter, filterarg */
 1923                                size,    /* maxsize */
 1924                                1,       /* nsegments */
 1925                                size,    /* maxsegsize */
 1926                                BUS_DMA_ALLOCNOW,        /* flags */
 1927                                NULL,    /* lockfunc */
 1928                                NULL,    /* lockfuncarg */
 1929                                &dma->dma_tag);
 1930         if (r != 0) {
 1931                 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
 1932                        "error %u\n", r);
 1933                 goto fail_0;
 1934         }
 1935         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
 1936                              BUS_DMA_NOWAIT, &dma->dma_map);
 1937         if (r != 0) {
 1938                 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
 1939                        "error %u\n", r);
 1940                 goto fail_1;
 1941         }
 1942         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
 1943                             size,
 1944                             ixv_dmamap_cb,
 1945                             &dma->dma_paddr,
 1946                             mapflags | BUS_DMA_NOWAIT);
 1947         if (r != 0) {
 1948                 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
 1949                        "error %u\n", r);
 1950                 goto fail_2;
 1951         }
 1952         dma->dma_size = size;
 1953         return (0);
 1954 fail_2:
 1955         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
 1956 fail_1:
 1957         bus_dma_tag_destroy(dma->dma_tag);
 1958 fail_0:
 1959         dma->dma_map = NULL;
 1960         dma->dma_tag = NULL;
 1961         return (r);
 1962 }
 1963 
 1964 static void
 1965 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
 1966 {
 1967         bus_dmamap_sync(dma->dma_tag, dma->dma_map,
 1968             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1969         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
 1970         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
 1971         bus_dma_tag_destroy(dma->dma_tag);
 1972 }
 1973 
 1974 
 1975 /*********************************************************************
 1976  *
 1977  *  Allocate memory for the transmit and receive rings, and then
 1978  *  the descriptors associated with each, called only once at attach.
 1979  *
 1980  **********************************************************************/
 1981 static int
 1982 ixv_allocate_queues(struct adapter *adapter)
 1983 {
 1984         device_t        dev = adapter->dev;
 1985         struct ix_queue *que;
 1986         struct tx_ring  *txr;
 1987         struct rx_ring  *rxr;
 1988         int rsize, tsize, error = 0;
 1989         int txconf = 0, rxconf = 0;
 1990 
 1991         /* First allocate the top level queue structs */
 1992         if (!(adapter->queues =
 1993             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
 1994             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 1995                 device_printf(dev, "Unable to allocate queue memory\n");
 1996                 error = ENOMEM;
 1997                 goto fail;
 1998         }
 1999 
 2000         /* First allocate the TX ring struct memory */
 2001         if (!(adapter->tx_rings =
 2002             (struct tx_ring *) malloc(sizeof(struct tx_ring) *
 2003             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2004                 device_printf(dev, "Unable to allocate TX ring memory\n");
 2005                 error = ENOMEM;
 2006                 goto tx_fail;
 2007         }
 2008 
 2009         /* Next allocate the RX */
 2010         if (!(adapter->rx_rings =
 2011             (struct rx_ring *) malloc(sizeof(struct rx_ring) *
 2012             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2013                 device_printf(dev, "Unable to allocate RX ring memory\n");
 2014                 error = ENOMEM;
 2015                 goto rx_fail;
 2016         }
 2017 
 2018         /* For the ring itself */
 2019         tsize = roundup2(adapter->num_tx_desc *
 2020             sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
 2021 
 2022         /*
 2023          * Now set up the TX queues, txconf is needed to handle the
 2024          * possibility that things fail midcourse and we need to
 2025          * undo memory gracefully
 2026          */ 
 2027         for (int i = 0; i < adapter->num_queues; i++, txconf++) {
 2028                 /* Set up some basics */
 2029                 txr = &adapter->tx_rings[i];
 2030                 txr->adapter = adapter;
 2031                 txr->me = i;
 2032 
 2033                 /* Initialize the TX side lock */
 2034                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
 2035                     device_get_nameunit(dev), txr->me);
 2036                 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
 2037 
 2038                 if (ixv_dma_malloc(adapter, tsize,
 2039                         &txr->txdma, BUS_DMA_NOWAIT)) {
 2040                         device_printf(dev,
 2041                             "Unable to allocate TX Descriptor memory\n");
 2042                         error = ENOMEM;
 2043                         goto err_tx_desc;
 2044                 }
 2045                 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
 2046                 bzero((void *)txr->tx_base, tsize);
 2047 
 2048                 /* Now allocate transmit buffers for the ring */
 2049                 if (ixv_allocate_transmit_buffers(txr)) {
 2050                         device_printf(dev,
 2051                             "Critical Failure setting up transmit buffers\n");
 2052                         error = ENOMEM;
 2053                         goto err_tx_desc;
 2054                 }
 2055 #if __FreeBSD_version >= 800000
 2056                 /* Allocate a buf ring */
 2057                 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
 2058                     M_WAITOK, &txr->tx_mtx);
 2059                 if (txr->br == NULL) {
 2060                         device_printf(dev,
 2061                             "Critical Failure setting up buf ring\n");
 2062                         error = ENOMEM;
 2063                         goto err_tx_desc;
 2064                 }
 2065 #endif
 2066         }
 2067 
 2068         /*
 2069          * Next the RX queues...
 2070          */ 
 2071         rsize = roundup2(adapter->num_rx_desc *
 2072             sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
 2073         for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
 2074                 rxr = &adapter->rx_rings[i];
 2075                 /* Set up some basics */
 2076                 rxr->adapter = adapter;
 2077                 rxr->me = i;
 2078 
 2079                 /* Initialize the RX side lock */
 2080                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
 2081                     device_get_nameunit(dev), rxr->me);
 2082                 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
 2083 
 2084                 if (ixv_dma_malloc(adapter, rsize,
 2085                         &rxr->rxdma, BUS_DMA_NOWAIT)) {
 2086                         device_printf(dev,
 2087                             "Unable to allocate RxDescriptor memory\n");
 2088                         error = ENOMEM;
 2089                         goto err_rx_desc;
 2090                 }
 2091                 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
 2092                 bzero((void *)rxr->rx_base, rsize);
 2093 
 2094                 /* Allocate receive buffers for the ring*/
 2095                 if (ixv_allocate_receive_buffers(rxr)) {
 2096                         device_printf(dev,
 2097                             "Critical Failure setting up receive buffers\n");
 2098                         error = ENOMEM;
 2099                         goto err_rx_desc;
 2100                 }
 2101         }
 2102 
 2103         /*
 2104         ** Finally set up the queue holding structs
 2105         */
 2106         for (int i = 0; i < adapter->num_queues; i++) {
 2107                 que = &adapter->queues[i];
 2108                 que->adapter = adapter;
 2109                 que->txr = &adapter->tx_rings[i];
 2110                 que->rxr = &adapter->rx_rings[i];
 2111         }
 2112 
 2113         return (0);
 2114 
 2115 err_rx_desc:
 2116         for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
 2117                 ixv_dma_free(adapter, &rxr->rxdma);
 2118 err_tx_desc:
 2119         for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
 2120                 ixv_dma_free(adapter, &txr->txdma);
 2121         free(adapter->rx_rings, M_DEVBUF);
 2122 rx_fail:
 2123         free(adapter->tx_rings, M_DEVBUF);
 2124 tx_fail:
 2125         free(adapter->queues, M_DEVBUF);
 2126 fail:
 2127         return (error);
 2128 }
 2129 
 2130 
 2131 /*********************************************************************
 2132  *
 2133  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
 2134  *  the information needed to transmit a packet on the wire. This is
 2135  *  called only once at attach, setup is done every reset.
 2136  *
 2137  **********************************************************************/
 2138 static int
 2139 ixv_allocate_transmit_buffers(struct tx_ring *txr)
 2140 {
 2141         struct adapter *adapter = txr->adapter;
 2142         device_t dev = adapter->dev;
 2143         struct ixv_tx_buf *txbuf;
 2144         int error, i;
 2145 
 2146         /*
 2147          * Setup DMA descriptor areas.
 2148          */
 2149         if ((error = bus_dma_tag_create(NULL,           /* parent */
 2150                                1, 0,            /* alignment, bounds */
 2151                                BUS_SPACE_MAXADDR,       /* lowaddr */
 2152                                BUS_SPACE_MAXADDR,       /* highaddr */
 2153                                NULL, NULL,              /* filter, filterarg */
 2154                                IXV_TSO_SIZE,            /* maxsize */
 2155                                32,                      /* nsegments */
 2156                                PAGE_SIZE,               /* maxsegsize */
 2157                                0,                       /* flags */
 2158                                NULL,                    /* lockfunc */
 2159                                NULL,                    /* lockfuncarg */
 2160                                &txr->txtag))) {
 2161                 device_printf(dev,"Unable to allocate TX DMA tag\n");
 2162                 goto fail;
 2163         }
 2164 
 2165         if (!(txr->tx_buffers =
 2166             (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
 2167             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2168                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
 2169                 error = ENOMEM;
 2170                 goto fail;
 2171         }
 2172 
 2173         /* Create the descriptor buffer dma maps */
 2174         txbuf = txr->tx_buffers;
 2175         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
 2176                 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
 2177                 if (error != 0) {
 2178                         device_printf(dev, "Unable to create TX DMA map\n");
 2179                         goto fail;
 2180                 }
 2181         }
 2182 
 2183         return 0;
 2184 fail:
 2185         /* We free all, it handles case where we are in the middle */
 2186         ixv_free_transmit_structures(adapter);
 2187         return (error);
 2188 }
 2189 
 2190 /*********************************************************************
 2191  *
 2192  *  Initialize a transmit ring.
 2193  *
 2194  **********************************************************************/
 2195 static void
 2196 ixv_setup_transmit_ring(struct tx_ring *txr)
 2197 {
 2198         struct adapter *adapter = txr->adapter;
 2199         struct ixv_tx_buf *txbuf;
 2200         int i;
 2201 
 2202         /* Clear the old ring contents */
 2203         IXV_TX_LOCK(txr);
 2204         bzero((void *)txr->tx_base,
 2205               (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
 2206         /* Reset indices */
 2207         txr->next_avail_desc = 0;
 2208         txr->next_to_clean = 0;
 2209 
 2210         /* Free any existing tx buffers. */
 2211         txbuf = txr->tx_buffers;
 2212         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
 2213                 if (txbuf->m_head != NULL) {
 2214                         bus_dmamap_sync(txr->txtag, txbuf->map,
 2215                             BUS_DMASYNC_POSTWRITE);
 2216                         bus_dmamap_unload(txr->txtag, txbuf->map);
 2217                         m_freem(txbuf->m_head);
 2218                         txbuf->m_head = NULL;
 2219                 }
 2220                 /* Clear the EOP index */
 2221                 txbuf->eop_index = -1;
 2222         }
 2223 
 2224         /* Set number of descriptors available */
 2225         txr->tx_avail = adapter->num_tx_desc;
 2226 
 2227         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 2228             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2229         IXV_TX_UNLOCK(txr);
 2230 }
 2231 
 2232 /*********************************************************************
 2233  *
 2234  *  Initialize all transmit rings.
 2235  *
 2236  **********************************************************************/
 2237 static int
 2238 ixv_setup_transmit_structures(struct adapter *adapter)
 2239 {
 2240         struct tx_ring *txr = adapter->tx_rings;
 2241 
 2242         for (int i = 0; i < adapter->num_queues; i++, txr++)
 2243                 ixv_setup_transmit_ring(txr);
 2244 
 2245         return (0);
 2246 }
 2247 
 2248 /*********************************************************************
 2249  *
 2250  *  Enable transmit unit.
 2251  *
 2252  **********************************************************************/
 2253 static void
 2254 ixv_initialize_transmit_units(struct adapter *adapter)
 2255 {
 2256         struct tx_ring  *txr = adapter->tx_rings;
 2257         struct ixgbe_hw *hw = &adapter->hw;
 2258 
 2259 
 2260         for (int i = 0; i < adapter->num_queues; i++, txr++) {
 2261                 u64     tdba = txr->txdma.dma_paddr;
 2262                 u32     txctrl, txdctl;
 2263 
 2264                 /* Set WTHRESH to 8, burst writeback */
 2265                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 2266                 txdctl |= (8 << 16);
 2267                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
 2268                 /* Now enable */
 2269                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 2270                 txdctl |= IXGBE_TXDCTL_ENABLE;
 2271                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
 2272 
 2273                 /* Set the HW Tx Head and Tail indices */
 2274                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
 2275                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
 2276 
 2277                 /* Setup Transmit Descriptor Cmd Settings */
 2278                 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
 2279                 txr->watchdog_check = FALSE;
 2280 
 2281                 /* Set Ring parameters */
 2282                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
 2283                        (tdba & 0x00000000ffffffffULL));
 2284                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
 2285                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
 2286                     adapter->num_tx_desc *
 2287                     sizeof(struct ixgbe_legacy_tx_desc));
 2288                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
 2289                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
 2290                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
 2291                 break;
 2292         }
 2293 
 2294         return;
 2295 }
 2296 
 2297 /*********************************************************************
 2298  *
 2299  *  Free all transmit rings.
 2300  *
 2301  **********************************************************************/
 2302 static void
 2303 ixv_free_transmit_structures(struct adapter *adapter)
 2304 {
 2305         struct tx_ring *txr = adapter->tx_rings;
 2306 
 2307         for (int i = 0; i < adapter->num_queues; i++, txr++) {
 2308                 IXV_TX_LOCK(txr);
 2309                 ixv_free_transmit_buffers(txr);
 2310                 ixv_dma_free(adapter, &txr->txdma);
 2311                 IXV_TX_UNLOCK(txr);
 2312                 IXV_TX_LOCK_DESTROY(txr);
 2313         }
 2314         free(adapter->tx_rings, M_DEVBUF);
 2315 }
 2316 
 2317 /*********************************************************************
 2318  *
 2319  *  Free transmit ring related data structures.
 2320  *
 2321  **********************************************************************/
 2322 static void
 2323 ixv_free_transmit_buffers(struct tx_ring *txr)
 2324 {
 2325         struct adapter *adapter = txr->adapter;
 2326         struct ixv_tx_buf *tx_buffer;
 2327         int             i;
 2328 
 2329         INIT_DEBUGOUT("free_transmit_ring: begin");
 2330 
 2331         if (txr->tx_buffers == NULL)
 2332                 return;
 2333 
 2334         tx_buffer = txr->tx_buffers;
 2335         for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
 2336                 if (tx_buffer->m_head != NULL) {
 2337                         bus_dmamap_sync(txr->txtag, tx_buffer->map,
 2338                             BUS_DMASYNC_POSTWRITE);
 2339                         bus_dmamap_unload(txr->txtag,
 2340                             tx_buffer->map);
 2341                         m_freem(tx_buffer->m_head);
 2342                         tx_buffer->m_head = NULL;
 2343                         if (tx_buffer->map != NULL) {
 2344                                 bus_dmamap_destroy(txr->txtag,
 2345                                     tx_buffer->map);
 2346                                 tx_buffer->map = NULL;
 2347                         }
 2348                 } else if (tx_buffer->map != NULL) {
 2349                         bus_dmamap_unload(txr->txtag,
 2350                             tx_buffer->map);
 2351                         bus_dmamap_destroy(txr->txtag,
 2352                             tx_buffer->map);
 2353                         tx_buffer->map = NULL;
 2354                 }
 2355         }
 2356 #if __FreeBSD_version >= 800000
 2357         if (txr->br != NULL)
 2358                 buf_ring_free(txr->br, M_DEVBUF);
 2359 #endif
 2360         if (txr->tx_buffers != NULL) {
 2361                 free(txr->tx_buffers, M_DEVBUF);
 2362                 txr->tx_buffers = NULL;
 2363         }
 2364         if (txr->txtag != NULL) {
 2365                 bus_dma_tag_destroy(txr->txtag);
 2366                 txr->txtag = NULL;
 2367         }
 2368         return;
 2369 }
 2370 
 2371 /*********************************************************************
 2372  *
 2373  *  Advanced Context Descriptor setup for VLAN or CSUM
 2374  *
 2375  **********************************************************************/
 2376 
 2377 static boolean_t
 2378 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
 2379 {
 2380         struct adapter *adapter = txr->adapter;
 2381         struct ixgbe_adv_tx_context_desc *TXD;
 2382         struct ixv_tx_buf        *tx_buffer;
 2383         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 2384         struct ether_vlan_header *eh;
 2385         struct ip *ip;
 2386         struct ip6_hdr *ip6;
 2387         int  ehdrlen, ip_hlen = 0;
 2388         u16     etype;
 2389         u8      ipproto = 0;
 2390         bool    offload = TRUE;
 2391         int ctxd = txr->next_avail_desc;
 2392         u16 vtag = 0;
 2393 
 2394 
 2395         if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
 2396                 offload = FALSE;
 2397 
 2398 
 2399         tx_buffer = &txr->tx_buffers[ctxd];
 2400         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
 2401 
 2402         /*
 2403         ** In advanced descriptors the vlan tag must 
 2404         ** be placed into the descriptor itself.
 2405         */
 2406         if (mp->m_flags & M_VLANTAG) {
 2407                 vtag = htole16(mp->m_pkthdr.ether_vtag);
 2408                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
 2409         } else if (offload == FALSE)
 2410                 return FALSE;
 2411 
 2412         /*
 2413          * Determine where frame payload starts.
 2414          * Jump over vlan headers if already present,
 2415          * helpful for QinQ too.
 2416          */
 2417         eh = mtod(mp, struct ether_vlan_header *);
 2418         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 2419                 etype = ntohs(eh->evl_proto);
 2420                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 2421         } else {
 2422                 etype = ntohs(eh->evl_encap_proto);
 2423                 ehdrlen = ETHER_HDR_LEN;
 2424         }
 2425 
 2426         /* Set the ether header length */
 2427         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
 2428 
 2429         switch (etype) {
 2430                 case ETHERTYPE_IP:
 2431                         ip = (struct ip *)(mp->m_data + ehdrlen);
 2432                         ip_hlen = ip->ip_hl << 2;
 2433                         if (mp->m_len < ehdrlen + ip_hlen)
 2434                                 return (FALSE);
 2435                         ipproto = ip->ip_p;
 2436                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 2437                         break;
 2438                 case ETHERTYPE_IPV6:
 2439                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 2440                         ip_hlen = sizeof(struct ip6_hdr);
 2441                         if (mp->m_len < ehdrlen + ip_hlen)
 2442                                 return (FALSE);
 2443                         ipproto = ip6->ip6_nxt;
 2444                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
 2445                         break;
 2446                 default:
 2447                         offload = FALSE;
 2448                         break;
 2449         }
 2450 
 2451         vlan_macip_lens |= ip_hlen;
 2452         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 2453 
 2454         switch (ipproto) {
 2455                 case IPPROTO_TCP:
 2456                         if (mp->m_pkthdr.csum_flags & CSUM_TCP)
 2457                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 2458                         break;
 2459 
 2460                 case IPPROTO_UDP:
 2461                         if (mp->m_pkthdr.csum_flags & CSUM_UDP)
 2462                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
 2463                         break;
 2464 
 2465 #if __FreeBSD_version >= 800000
 2466                 case IPPROTO_SCTP:
 2467                         if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
 2468                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
 2469                         break;
 2470 #endif
 2471                 default:
 2472                         offload = FALSE;
 2473                         break;
 2474         }
 2475 
 2476         /* Now copy bits into descriptor */
 2477         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
 2478         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
 2479         TXD->seqnum_seed = htole32(0);
 2480         TXD->mss_l4len_idx = htole32(0);
 2481 
 2482         tx_buffer->m_head = NULL;
 2483         tx_buffer->eop_index = -1;
 2484 
 2485         /* We've consumed the first desc, adjust counters */
 2486         if (++ctxd == adapter->num_tx_desc)
 2487                 ctxd = 0;
 2488         txr->next_avail_desc = ctxd;
 2489         --txr->tx_avail;
 2490 
 2491         return (offload);
 2492 }
 2493 
 2494 /**********************************************************************
 2495  *
 2496  *  Setup work for hardware segmentation offload (TSO) on
 2497  *  adapters using advanced tx descriptors
 2498  *
 2499  **********************************************************************/
 2500 static boolean_t
 2501 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
 2502 {
 2503         struct adapter *adapter = txr->adapter;
 2504         struct ixgbe_adv_tx_context_desc *TXD;
 2505         struct ixv_tx_buf        *tx_buffer;
 2506         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 2507         u32 mss_l4len_idx = 0;
 2508         u16 vtag = 0;
 2509         int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
 2510         struct ether_vlan_header *eh;
 2511         struct ip *ip;
 2512         struct tcphdr *th;
 2513 
 2514 
 2515         /*
 2516          * Determine where frame payload starts.
 2517          * Jump over vlan headers if already present
 2518          */
 2519         eh = mtod(mp, struct ether_vlan_header *);
 2520         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 
 2521                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 2522         else
 2523                 ehdrlen = ETHER_HDR_LEN;
 2524 
 2525         /* Ensure we have at least the IP+TCP header in the first mbuf. */
 2526         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
 2527                 return FALSE;
 2528 
 2529         ctxd = txr->next_avail_desc;
 2530         tx_buffer = &txr->tx_buffers[ctxd];
 2531         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
 2532 
 2533         ip = (struct ip *)(mp->m_data + ehdrlen);
 2534         if (ip->ip_p != IPPROTO_TCP)
 2535                 return FALSE;   /* 0 */
 2536         ip->ip_sum = 0;
 2537         ip_hlen = ip->ip_hl << 2;
 2538         th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
 2539         th->th_sum = in_pseudo(ip->ip_src.s_addr,
 2540             ip->ip_dst.s_addr, htons(IPPROTO_TCP));
 2541         tcp_hlen = th->th_off << 2;
 2542         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
 2543 
 2544         /* This is used in the transmit desc in encap */
 2545         *paylen = mp->m_pkthdr.len - hdrlen;
 2546 
 2547         /* VLAN MACLEN IPLEN */
 2548         if (mp->m_flags & M_VLANTAG) {
 2549                 vtag = htole16(mp->m_pkthdr.ether_vtag);
 2550                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
 2551         }
 2552 
 2553         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
 2554         vlan_macip_lens |= ip_hlen;
 2555         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
 2556 
 2557         /* ADV DTYPE TUCMD */
 2558         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 2559         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 2560         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 2561         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
 2562 
 2563 
 2564         /* MSS L4LEN IDX */
 2565         mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
 2566         mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
 2567         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
 2568 
 2569         TXD->seqnum_seed = htole32(0);
 2570         tx_buffer->m_head = NULL;
 2571         tx_buffer->eop_index = -1;
 2572 
 2573         if (++ctxd == adapter->num_tx_desc)
 2574                 ctxd = 0;
 2575 
 2576         txr->tx_avail--;
 2577         txr->next_avail_desc = ctxd;
 2578         return TRUE;
 2579 }
 2580 
 2581 
 2582 /**********************************************************************
 2583  *
 2584  *  Examine each tx_buffer in the used queue. If the hardware is done
 2585  *  processing the packet then free associated resources. The
 2586  *  tx_buffer is put back on the free queue.
 2587  *
 2588  **********************************************************************/
 2589 static boolean_t
 2590 ixv_txeof(struct tx_ring *txr)
 2591 {
 2592         struct adapter  *adapter = txr->adapter;
 2593         struct ifnet    *ifp = adapter->ifp;
 2594         u32     first, last, done;
 2595         struct ixv_tx_buf *tx_buffer;
 2596         struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
 2597 
 2598         mtx_assert(&txr->tx_mtx, MA_OWNED);
 2599 
 2600         if (txr->tx_avail == adapter->num_tx_desc)
 2601                 return FALSE;
 2602 
 2603         first = txr->next_to_clean;
 2604         tx_buffer = &txr->tx_buffers[first];
 2605         /* For cleanup we just use legacy struct */
 2606         tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
 2607         last = tx_buffer->eop_index;
 2608         if (last == -1)
 2609                 return FALSE;
 2610         eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
 2611 
 2612         /*
 2613         ** Get the index of the first descriptor
 2614         ** BEYOND the EOP and call that 'done'.
 2615         ** I do this so the comparison in the
 2616         ** inner while loop below can be simple
 2617         */
 2618         if (++last == adapter->num_tx_desc) last = 0;
 2619         done = last;
 2620 
 2621         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 2622             BUS_DMASYNC_POSTREAD);
 2623         /*
 2624         ** Only the EOP descriptor of a packet now has the DD
 2625         ** bit set, this is what we look for...
 2626         */
 2627         while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
 2628                 /* We clean the range of the packet */
 2629                 while (first != done) {
 2630                         tx_desc->upper.data = 0;
 2631                         tx_desc->lower.data = 0;
 2632                         tx_desc->buffer_addr = 0;
 2633                         ++txr->tx_avail;
 2634 
 2635                         if (tx_buffer->m_head) {
 2636                                 bus_dmamap_sync(txr->txtag,
 2637                                     tx_buffer->map,
 2638                                     BUS_DMASYNC_POSTWRITE);
 2639                                 bus_dmamap_unload(txr->txtag,
 2640                                     tx_buffer->map);
 2641                                 m_freem(tx_buffer->m_head);
 2642                                 tx_buffer->m_head = NULL;
 2643                                 tx_buffer->map = NULL;
 2644                         }
 2645                         tx_buffer->eop_index = -1;
 2646                         txr->watchdog_time = ticks;
 2647 
 2648                         if (++first == adapter->num_tx_desc)
 2649                                 first = 0;
 2650 
 2651                         tx_buffer = &txr->tx_buffers[first];
 2652                         tx_desc =
 2653                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
 2654                 }
 2655                 ++ifp->if_opackets;
 2656                 /* See if there is more work now */
 2657                 last = tx_buffer->eop_index;
 2658                 if (last != -1) {
 2659                         eop_desc =
 2660                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
 2661                         /* Get next done point */
 2662                         if (++last == adapter->num_tx_desc) last = 0;
 2663                         done = last;
 2664                 } else
 2665                         break;
 2666         }
 2667         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 2668             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2669 
 2670         txr->next_to_clean = first;
 2671 
 2672         /*
 2673          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
 2674          * it is OK to send packets. If there are no pending descriptors,
 2675          * clear the timeout. Otherwise, if some descriptors have been freed,
 2676          * restart the timeout.
 2677          */
 2678         if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
 2679                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2680                 if (txr->tx_avail == adapter->num_tx_desc) {
 2681                         txr->watchdog_check = FALSE;
 2682                         return FALSE;
 2683                 }
 2684         }
 2685 
 2686         return TRUE;
 2687 }
 2688 
 2689 /*********************************************************************
 2690  *
 2691  *  Refresh mbuf buffers for RX descriptor rings
 2692  *   - now keeps its own state so discards due to resource
 2693  *     exhaustion are unnecessary, if an mbuf cannot be obtained
 2694  *     it just returns, keeping its placeholder, thus it can simply
 2695  *     be recalled to try again.
 2696  *
 2697  **********************************************************************/
 2698 static void
 2699 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
 2700 {
 2701         struct adapter          *adapter = rxr->adapter;
 2702         bus_dma_segment_t       hseg[1];
 2703         bus_dma_segment_t       pseg[1];
 2704         struct ixv_rx_buf       *rxbuf;
 2705         struct mbuf             *mh, *mp;
 2706         int                     i, nsegs, error, cleaned;
 2707 
 2708         i = rxr->next_to_refresh;
 2709         cleaned = -1; /* Signify no completions */
 2710         while (i != limit) {
 2711                 rxbuf = &rxr->rx_buffers[i];
 2712                 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
 2713                         mh = m_gethdr(M_DONTWAIT, MT_DATA);
 2714                         if (mh == NULL)
 2715                                 goto update;
 2716                         mh->m_pkthdr.len = mh->m_len = MHLEN;
 2717                         mh->m_len = MHLEN;
 2718                         mh->m_flags |= M_PKTHDR;
 2719                         m_adj(mh, ETHER_ALIGN);
 2720                         /* Get the memory mapping */
 2721                         error = bus_dmamap_load_mbuf_sg(rxr->htag,
 2722                             rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
 2723                         if (error != 0) {
 2724                                 printf("GET BUF: dmamap load"
 2725                                     " failure - %d\n", error);
 2726                                 m_free(mh);
 2727                                 goto update;
 2728                         }
 2729                         rxbuf->m_head = mh;
 2730                         bus_dmamap_sync(rxr->htag, rxbuf->hmap,
 2731                             BUS_DMASYNC_PREREAD);
 2732                         rxr->rx_base[i].read.hdr_addr =
 2733                             htole64(hseg[0].ds_addr);
 2734                 }
 2735 
 2736                 if (rxbuf->m_pack == NULL) {
 2737                         mp = m_getjcl(M_DONTWAIT, MT_DATA,
 2738                             M_PKTHDR, adapter->rx_mbuf_sz);
 2739                         if (mp == NULL)
 2740                                 goto update;
 2741                         mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
 2742                         /* Get the memory mapping */
 2743                         error = bus_dmamap_load_mbuf_sg(rxr->ptag,
 2744                             rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
 2745                         if (error != 0) {
 2746                                 printf("GET BUF: dmamap load"
 2747                                     " failure - %d\n", error);
 2748                                 m_free(mp);
 2749                                 goto update;
 2750                         }
 2751                         rxbuf->m_pack = mp;
 2752                         bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 2753                             BUS_DMASYNC_PREREAD);
 2754                         rxr->rx_base[i].read.pkt_addr =
 2755                             htole64(pseg[0].ds_addr);
 2756                 }
 2757 
 2758                 cleaned = i;
 2759                 /* Calculate next index */
 2760                 if (++i == adapter->num_rx_desc)
 2761                         i = 0;
 2762                 /* This is the work marker for refresh */
 2763                 rxr->next_to_refresh = i;
 2764         }
 2765 update:
 2766         if (cleaned != -1) /* If we refreshed some, bump tail */
 2767                 IXGBE_WRITE_REG(&adapter->hw,
 2768                     IXGBE_VFRDT(rxr->me), cleaned);
 2769         return;
 2770 }
 2771 
 2772 /*********************************************************************
 2773  *
 2774  *  Allocate memory for rx_buffer structures. Since we use one
 2775  *  rx_buffer per received packet, the maximum number of rx_buffer's
 2776  *  that we'll need is equal to the number of receive descriptors
 2777  *  that we've allocated.
 2778  *
 2779  **********************************************************************/
 2780 static int
 2781 ixv_allocate_receive_buffers(struct rx_ring *rxr)
 2782 {
 2783         struct  adapter         *adapter = rxr->adapter;
 2784         device_t                dev = adapter->dev;
 2785         struct ixv_rx_buf       *rxbuf;
 2786         int                     i, bsize, error;
 2787 
 2788         bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
 2789         if (!(rxr->rx_buffers =
 2790             (struct ixv_rx_buf *) malloc(bsize,
 2791             M_DEVBUF, M_NOWAIT | M_ZERO))) {
 2792                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
 2793                 error = ENOMEM;
 2794                 goto fail;
 2795         }
 2796 
 2797         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
 2798                                    1, 0,        /* alignment, bounds */
 2799                                    BUS_SPACE_MAXADDR,   /* lowaddr */
 2800                                    BUS_SPACE_MAXADDR,   /* highaddr */
 2801                                    NULL, NULL,          /* filter, filterarg */
 2802                                    MSIZE,               /* maxsize */
 2803                                    1,                   /* nsegments */
 2804                                    MSIZE,               /* maxsegsize */
 2805                                    0,                   /* flags */
 2806                                    NULL,                /* lockfunc */
 2807                                    NULL,                /* lockfuncarg */
 2808                                    &rxr->htag))) {
 2809                 device_printf(dev, "Unable to create RX DMA tag\n");
 2810                 goto fail;
 2811         }
 2812 
 2813         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
 2814                                    1, 0,        /* alignment, bounds */
 2815                                    BUS_SPACE_MAXADDR,   /* lowaddr */
 2816                                    BUS_SPACE_MAXADDR,   /* highaddr */
 2817                                    NULL, NULL,          /* filter, filterarg */
 2818                                    MJUMPAGESIZE,        /* maxsize */
 2819                                    1,                   /* nsegments */
 2820                                    MJUMPAGESIZE,        /* maxsegsize */
 2821                                    0,                   /* flags */
 2822                                    NULL,                /* lockfunc */
 2823                                    NULL,                /* lockfuncarg */
 2824                                    &rxr->ptag))) {
 2825                 device_printf(dev, "Unable to create RX DMA tag\n");
 2826                 goto fail;
 2827         }
 2828 
 2829         for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
 2830                 rxbuf = &rxr->rx_buffers[i];
 2831                 error = bus_dmamap_create(rxr->htag,
 2832                     BUS_DMA_NOWAIT, &rxbuf->hmap);
 2833                 if (error) {
 2834                         device_printf(dev, "Unable to create RX head map\n");
 2835                         goto fail;
 2836                 }
 2837                 error = bus_dmamap_create(rxr->ptag,
 2838                     BUS_DMA_NOWAIT, &rxbuf->pmap);
 2839                 if (error) {
 2840                         device_printf(dev, "Unable to create RX pkt map\n");
 2841                         goto fail;
 2842                 }
 2843         }
 2844 
 2845         return (0);
 2846 
 2847 fail:
 2848         /* Frees all, but can handle partial completion */
 2849         ixv_free_receive_structures(adapter);
 2850         return (error);
 2851 }
 2852 
 2853 static void     
 2854 ixv_free_receive_ring(struct rx_ring *rxr)
 2855 { 
 2856         struct  adapter         *adapter;
 2857         struct ixv_rx_buf       *rxbuf;
 2858         int i;
 2859 
 2860         adapter = rxr->adapter;
 2861         for (i = 0; i < adapter->num_rx_desc; i++) {
 2862                 rxbuf = &rxr->rx_buffers[i];
 2863                 if (rxbuf->m_head != NULL) {
 2864                         bus_dmamap_sync(rxr->htag, rxbuf->hmap,
 2865                             BUS_DMASYNC_POSTREAD);
 2866                         bus_dmamap_unload(rxr->htag, rxbuf->hmap);
 2867                         rxbuf->m_head->m_flags |= M_PKTHDR;
 2868                         m_freem(rxbuf->m_head);
 2869                 }
 2870                 if (rxbuf->m_pack != NULL) {
 2871                         bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 2872                             BUS_DMASYNC_POSTREAD);
 2873                         bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
 2874                         rxbuf->m_pack->m_flags |= M_PKTHDR;
 2875                         m_freem(rxbuf->m_pack);
 2876                 }
 2877                 rxbuf->m_head = NULL;
 2878                 rxbuf->m_pack = NULL;
 2879         }
 2880 }
 2881 
 2882 
 2883 /*********************************************************************
 2884  *
 2885  *  Initialize a receive ring and its buffers.
 2886  *
 2887  **********************************************************************/
 2888 static int
 2889 ixv_setup_receive_ring(struct rx_ring *rxr)
 2890 {
 2891         struct  adapter         *adapter;
 2892         struct ifnet            *ifp;
 2893         device_t                dev;
 2894         struct ixv_rx_buf       *rxbuf;
 2895         bus_dma_segment_t       pseg[1], hseg[1];
 2896         struct lro_ctrl         *lro = &rxr->lro;
 2897         int                     rsize, nsegs, error = 0;
 2898 
 2899         adapter = rxr->adapter;
 2900         ifp = adapter->ifp;
 2901         dev = adapter->dev;
 2902 
 2903         /* Clear the ring contents */
 2904         IXV_RX_LOCK(rxr);
 2905         rsize = roundup2(adapter->num_rx_desc *
 2906             sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
 2907         bzero((void *)rxr->rx_base, rsize);
 2908 
 2909         /* Free current RX buffer structs and their mbufs */
 2910         ixv_free_receive_ring(rxr);
 2911 
 2912         /* Configure header split? */
 2913         if (ixv_header_split)
 2914                 rxr->hdr_split = TRUE;
 2915 
 2916         /* Now replenish the mbufs */
 2917         for (int j = 0; j != adapter->num_rx_desc; ++j) {
 2918                 struct mbuf     *mh, *mp;
 2919 
 2920                 rxbuf = &rxr->rx_buffers[j];
 2921                 /*
 2922                 ** Dont allocate mbufs if not
 2923                 ** doing header split, its wasteful
 2924                 */ 
 2925                 if (rxr->hdr_split == FALSE)
 2926                         goto skip_head;
 2927 
 2928                 /* First the header */
 2929                 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
 2930                 if (rxbuf->m_head == NULL) {
 2931                         error = ENOBUFS;
 2932                         goto fail;
 2933                 }
 2934                 m_adj(rxbuf->m_head, ETHER_ALIGN);
 2935                 mh = rxbuf->m_head;
 2936                 mh->m_len = mh->m_pkthdr.len = MHLEN;
 2937                 mh->m_flags |= M_PKTHDR;
 2938                 /* Get the memory mapping */
 2939                 error = bus_dmamap_load_mbuf_sg(rxr->htag,
 2940                     rxbuf->hmap, rxbuf->m_head, hseg,
 2941                     &nsegs, BUS_DMA_NOWAIT);
 2942                 if (error != 0) /* Nothing elegant to do here */
 2943                         goto fail;
 2944                 bus_dmamap_sync(rxr->htag,
 2945                     rxbuf->hmap, BUS_DMASYNC_PREREAD);
 2946                 /* Update descriptor */
 2947                 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
 2948 
 2949 skip_head:
 2950                 /* Now the payload cluster */
 2951                 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
 2952                     M_PKTHDR, adapter->rx_mbuf_sz);
 2953                 if (rxbuf->m_pack == NULL) {
 2954                         error = ENOBUFS;
 2955                         goto fail;
 2956                 }
 2957                 mp = rxbuf->m_pack;
 2958                 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
 2959                 /* Get the memory mapping */
 2960                 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
 2961                     rxbuf->pmap, mp, pseg,
 2962                     &nsegs, BUS_DMA_NOWAIT);
 2963                 if (error != 0)
 2964                         goto fail;
 2965                 bus_dmamap_sync(rxr->ptag,
 2966                     rxbuf->pmap, BUS_DMASYNC_PREREAD);
 2967                 /* Update descriptor */
 2968                 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
 2969         }
 2970 
 2971 
 2972         /* Setup our descriptor indices */
 2973         rxr->next_to_check = 0;
 2974         rxr->next_to_refresh = 0;
 2975         rxr->lro_enabled = FALSE;
 2976         rxr->rx_split_packets = 0;
 2977         rxr->rx_bytes = 0;
 2978 
 2979         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 2980             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2981 
 2982         /*
 2983         ** Now set up the LRO interface:
 2984         */
 2985         if (ifp->if_capenable & IFCAP_LRO) {
 2986                 int err = tcp_lro_init(lro);
 2987                 if (err) {
 2988                         device_printf(dev, "LRO Initialization failed!\n");
 2989                         goto fail;
 2990                 }
 2991                 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
 2992                 rxr->lro_enabled = TRUE;
 2993                 lro->ifp = adapter->ifp;
 2994         }
 2995 
 2996         IXV_RX_UNLOCK(rxr);
 2997         return (0);
 2998 
 2999 fail:
 3000         ixv_free_receive_ring(rxr);
 3001         IXV_RX_UNLOCK(rxr);
 3002         return (error);
 3003 }
 3004 
 3005 /*********************************************************************
 3006  *
 3007  *  Initialize all receive rings.
 3008  *
 3009  **********************************************************************/
 3010 static int
 3011 ixv_setup_receive_structures(struct adapter *adapter)
 3012 {
 3013         struct rx_ring *rxr = adapter->rx_rings;
 3014         int j;
 3015 
 3016         for (j = 0; j < adapter->num_queues; j++, rxr++)
 3017                 if (ixv_setup_receive_ring(rxr))
 3018                         goto fail;
 3019 
 3020         return (0);
 3021 fail:
 3022         /*
 3023          * Free RX buffers allocated so far, we will only handle
 3024          * the rings that completed, the failing case will have
 3025          * cleaned up for itself. 'j' failed, so its the terminus.
 3026          */
 3027         for (int i = 0; i < j; ++i) {
 3028                 rxr = &adapter->rx_rings[i];
 3029                 ixv_free_receive_ring(rxr);
 3030         }
 3031 
 3032         return (ENOBUFS);
 3033 }
 3034 
 3035 /*********************************************************************
 3036  *
 3037  *  Setup receive registers and features.
 3038  *
 3039  **********************************************************************/
 3040 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 3041 
 3042 static void
 3043 ixv_initialize_receive_units(struct adapter *adapter)
 3044 {
 3045         struct  rx_ring *rxr = adapter->rx_rings;
 3046         struct ixgbe_hw *hw = &adapter->hw;
 3047         struct ifnet   *ifp = adapter->ifp;
 3048         u32             bufsz, fctrl, rxcsum, hlreg;
 3049 
 3050 
 3051         /* Enable broadcasts */
 3052         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 3053         fctrl |= IXGBE_FCTRL_BAM;
 3054         fctrl |= IXGBE_FCTRL_DPF;
 3055         fctrl |= IXGBE_FCTRL_PMCF;
 3056         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 3057 
 3058         /* Set for Jumbo Frames? */
 3059         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 3060         if (ifp->if_mtu > ETHERMTU) {
 3061                 hlreg |= IXGBE_HLREG0_JUMBOEN;
 3062                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 3063         } else {
 3064                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
 3065                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 3066         }
 3067         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
 3068 
 3069         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 3070                 u64 rdba = rxr->rxdma.dma_paddr;
 3071                 u32 reg, rxdctl;
 3072 
 3073                 /* Do the queue enabling first */
 3074                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 3075                 rxdctl |= IXGBE_RXDCTL_ENABLE;
 3076                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
 3077                 for (int k = 0; k < 10; k++) {
 3078                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
 3079                             IXGBE_RXDCTL_ENABLE)
 3080                                 break;
 3081                         else
 3082                                 msec_delay(1);
 3083                 }
 3084                 wmb();
 3085 
 3086                 /* Setup the Base and Length of the Rx Descriptor Ring */
 3087                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
 3088                     (rdba & 0x00000000ffffffffULL));
 3089                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
 3090                     (rdba >> 32));
 3091                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
 3092                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
 3093 
 3094                 /* Set up the SRRCTL register */
 3095                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
 3096                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 3097                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 3098                 reg |= bufsz;
 3099                 if (rxr->hdr_split) {
 3100                         /* Use a standard mbuf for the header */
 3101                         reg |= ((IXV_RX_HDR <<
 3102                             IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
 3103                             & IXGBE_SRRCTL_BSIZEHDR_MASK);
 3104                         reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
 3105                 } else
 3106                         reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 3107                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
 3108 
 3109                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
 3110                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
 3111                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
 3112                     adapter->num_rx_desc - 1);
 3113         }
 3114 
 3115         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 3116 
 3117         if (ifp->if_capenable & IFCAP_RXCSUM)
 3118                 rxcsum |= IXGBE_RXCSUM_PCSD;
 3119 
 3120         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
 3121                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
 3122 
 3123         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 3124 
 3125         return;
 3126 }
 3127 
 3128 /*********************************************************************
 3129  *
 3130  *  Free all receive rings.
 3131  *
 3132  **********************************************************************/
 3133 static void
 3134 ixv_free_receive_structures(struct adapter *adapter)
 3135 {
 3136         struct rx_ring *rxr = adapter->rx_rings;
 3137 
 3138         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 3139                 struct lro_ctrl         *lro = &rxr->lro;
 3140                 ixv_free_receive_buffers(rxr);
 3141                 /* Free LRO memory */
 3142                 tcp_lro_free(lro);
 3143                 /* Free the ring memory as well */
 3144                 ixv_dma_free(adapter, &rxr->rxdma);
 3145         }
 3146 
 3147         free(adapter->rx_rings, M_DEVBUF);
 3148 }
 3149 
 3150 
 3151 /*********************************************************************
 3152  *
 3153  *  Free receive ring data structures
 3154  *
 3155  **********************************************************************/
 3156 static void
 3157 ixv_free_receive_buffers(struct rx_ring *rxr)
 3158 {
 3159         struct adapter          *adapter = rxr->adapter;
 3160         struct ixv_rx_buf       *rxbuf;
 3161 
 3162         INIT_DEBUGOUT("free_receive_structures: begin");
 3163 
 3164         /* Cleanup any existing buffers */
 3165         if (rxr->rx_buffers != NULL) {
 3166                 for (int i = 0; i < adapter->num_rx_desc; i++) {
 3167                         rxbuf = &rxr->rx_buffers[i];
 3168                         if (rxbuf->m_head != NULL) {
 3169                                 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
 3170                                     BUS_DMASYNC_POSTREAD);
 3171                                 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
 3172                                 rxbuf->m_head->m_flags |= M_PKTHDR;
 3173                                 m_freem(rxbuf->m_head);
 3174                         }
 3175                         if (rxbuf->m_pack != NULL) {
 3176                                 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 3177                                     BUS_DMASYNC_POSTREAD);
 3178                                 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
 3179                                 rxbuf->m_pack->m_flags |= M_PKTHDR;
 3180                                 m_freem(rxbuf->m_pack);
 3181                         }
 3182                         rxbuf->m_head = NULL;
 3183                         rxbuf->m_pack = NULL;
 3184                         if (rxbuf->hmap != NULL) {
 3185                                 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
 3186                                 rxbuf->hmap = NULL;
 3187                         }
 3188                         if (rxbuf->pmap != NULL) {
 3189                                 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
 3190                                 rxbuf->pmap = NULL;
 3191                         }
 3192                 }
 3193                 if (rxr->rx_buffers != NULL) {
 3194                         free(rxr->rx_buffers, M_DEVBUF);
 3195                         rxr->rx_buffers = NULL;
 3196                 }
 3197         }
 3198 
 3199         if (rxr->htag != NULL) {
 3200                 bus_dma_tag_destroy(rxr->htag);
 3201                 rxr->htag = NULL;
 3202         }
 3203         if (rxr->ptag != NULL) {
 3204                 bus_dma_tag_destroy(rxr->ptag);
 3205                 rxr->ptag = NULL;
 3206         }
 3207 
 3208         return;
 3209 }
 3210 
 3211 static __inline void
 3212 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
 3213 {
 3214                  
 3215         /*
 3216          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
 3217          * should be computed by hardware. Also it should not have VLAN tag in
 3218          * ethernet header.
 3219          */
 3220         if (rxr->lro_enabled &&
 3221             (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
 3222             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
 3223             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
 3224             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
 3225             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
 3226             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
 3227                 /*
 3228                  * Send to the stack if:
 3229                  **  - LRO not enabled, or
 3230                  **  - no LRO resources, or
 3231                  **  - lro enqueue fails
 3232                  */
 3233                 if (rxr->lro.lro_cnt != 0)
 3234                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
 3235                                 return;
 3236         }
 3237         (*ifp->if_input)(ifp, m);
 3238 }
 3239 
 3240 static __inline void
 3241 ixv_rx_discard(struct rx_ring *rxr, int i)
 3242 {
 3243         struct adapter          *adapter = rxr->adapter;
 3244         struct ixv_rx_buf       *rbuf;
 3245         struct mbuf             *mh, *mp;
 3246 
 3247         rbuf = &rxr->rx_buffers[i];
 3248         if (rbuf->fmp != NULL) /* Partial chain ? */
 3249                 m_freem(rbuf->fmp);
 3250 
 3251         mh = rbuf->m_head;
 3252         mp = rbuf->m_pack;
 3253 
 3254         /* Reuse loaded DMA map and just update mbuf chain */
 3255         mh->m_len = MHLEN;
 3256         mh->m_flags |= M_PKTHDR;
 3257         mh->m_next = NULL;
 3258 
 3259         mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
 3260         mp->m_data = mp->m_ext.ext_buf;
 3261         mp->m_next = NULL;
 3262         return;
 3263 }
 3264 
 3265 
 3266 /*********************************************************************
 3267  *
 3268  *  This routine executes in interrupt context. It replenishes
 3269  *  the mbufs in the descriptor and sends data which has been
 3270  *  dma'ed into host memory to upper layer.
 3271  *
 3272  *  We loop at most count times if count is > 0, or until done if
 3273  *  count < 0.
 3274  *
 3275  *  Return TRUE for more work, FALSE for all clean.
 3276  *********************************************************************/
 3277 static bool
 3278 ixv_rxeof(struct ix_queue *que, int count)
 3279 {
 3280         struct adapter          *adapter = que->adapter;
 3281         struct rx_ring          *rxr = que->rxr;
 3282         struct ifnet            *ifp = adapter->ifp;
 3283         struct lro_ctrl         *lro = &rxr->lro;
 3284         struct lro_entry        *queued;
 3285         int                     i, nextp, processed = 0;
 3286         u32                     staterr = 0;
 3287         union ixgbe_adv_rx_desc *cur;
 3288         struct ixv_rx_buf       *rbuf, *nbuf;
 3289 
 3290         IXV_RX_LOCK(rxr);
 3291 
 3292         for (i = rxr->next_to_check; count != 0;) {
 3293                 struct mbuf     *sendmp, *mh, *mp;
 3294                 u32             rsc, ptype;
 3295                 u16             hlen, plen, hdr, vtag;
 3296                 bool            eop;
 3297  
 3298                 /* Sync the ring. */
 3299                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 3300                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3301 
 3302                 cur = &rxr->rx_base[i];
 3303                 staterr = le32toh(cur->wb.upper.status_error);
 3304 
 3305                 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
 3306                         break;
 3307                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 3308                         break;
 3309 
 3310                 count--;
 3311                 sendmp = NULL;
 3312                 nbuf = NULL;
 3313                 rsc = 0;
 3314                 cur->wb.upper.status_error = 0;
 3315                 rbuf = &rxr->rx_buffers[i];
 3316                 mh = rbuf->m_head;
 3317                 mp = rbuf->m_pack;
 3318 
 3319                 plen = le16toh(cur->wb.upper.length);
 3320                 ptype = le32toh(cur->wb.lower.lo_dword.data) &
 3321                     IXGBE_RXDADV_PKTTYPE_MASK;
 3322                 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
 3323                 vtag = le16toh(cur->wb.upper.vlan);
 3324                 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
 3325 
 3326                 /* Make sure all parts of a bad packet are discarded */
 3327                 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
 3328                     (rxr->discard)) {
 3329                         ifp->if_ierrors++;
 3330                         rxr->rx_discarded++;
 3331                         if (!eop)
 3332                                 rxr->discard = TRUE;
 3333                         else
 3334                                 rxr->discard = FALSE;
 3335                         ixv_rx_discard(rxr, i);
 3336                         goto next_desc;
 3337                 }
 3338 
 3339                 if (!eop) {
 3340                         nextp = i + 1;
 3341                         if (nextp == adapter->num_rx_desc)
 3342                                 nextp = 0;
 3343                         nbuf = &rxr->rx_buffers[nextp];
 3344                         prefetch(nbuf);
 3345                 }
 3346                 /*
 3347                 ** The header mbuf is ONLY used when header 
 3348                 ** split is enabled, otherwise we get normal 
 3349                 ** behavior, ie, both header and payload
 3350                 ** are DMA'd into the payload buffer.
 3351                 **
 3352                 ** Rather than using the fmp/lmp global pointers
 3353                 ** we now keep the head of a packet chain in the
 3354                 ** buffer struct and pass this along from one
 3355                 ** descriptor to the next, until we get EOP.
 3356                 */
 3357                 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
 3358                         /* This must be an initial descriptor */
 3359                         hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 3360                             IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 3361                         if (hlen > IXV_RX_HDR)
 3362                                 hlen = IXV_RX_HDR;
 3363                         mh->m_len = hlen;
 3364                         mh->m_flags |= M_PKTHDR;
 3365                         mh->m_next = NULL;
 3366                         mh->m_pkthdr.len = mh->m_len;
 3367                         /* Null buf pointer so it is refreshed */
 3368                         rbuf->m_head = NULL;
 3369                         /*
 3370                         ** Check the payload length, this
 3371                         ** could be zero if its a small
 3372                         ** packet.
 3373                         */
 3374                         if (plen > 0) {
 3375                                 mp->m_len = plen;
 3376                                 mp->m_next = NULL;
 3377                                 mp->m_flags &= ~M_PKTHDR;
 3378                                 mh->m_next = mp;
 3379                                 mh->m_pkthdr.len += mp->m_len;
 3380                                 /* Null buf pointer so it is refreshed */
 3381                                 rbuf->m_pack = NULL;
 3382                                 rxr->rx_split_packets++;
 3383                         }
 3384                         /*
 3385                         ** Now create the forward
 3386                         ** chain so when complete 
 3387                         ** we wont have to.
 3388                         */
 3389                         if (eop == 0) {
 3390                                 /* stash the chain head */
 3391                                 nbuf->fmp = mh;
 3392                                 /* Make forward chain */
 3393                                 if (plen)
 3394                                         mp->m_next = nbuf->m_pack;
 3395                                 else
 3396                                         mh->m_next = nbuf->m_pack;
 3397                         } else {
 3398                                 /* Singlet, prepare to send */
 3399                                 sendmp = mh;
 3400                                 if (staterr & IXGBE_RXD_STAT_VP) {
 3401                                         sendmp->m_pkthdr.ether_vtag = vtag;
 3402                                         sendmp->m_flags |= M_VLANTAG;
 3403                                 }
 3404                         }
 3405                 } else {
 3406                         /*
 3407                         ** Either no header split, or a
 3408                         ** secondary piece of a fragmented
 3409                         ** split packet.
 3410                         */
 3411                         mp->m_len = plen;
 3412                         /*
 3413                         ** See if there is a stored head
 3414                         ** that determines what we are
 3415                         */
 3416                         sendmp = rbuf->fmp;
 3417                         rbuf->m_pack = rbuf->fmp = NULL;
 3418 
 3419                         if (sendmp != NULL) /* secondary frag */
 3420                                 sendmp->m_pkthdr.len += mp->m_len;
 3421                         else {
 3422                                 /* first desc of a non-ps chain */
 3423                                 sendmp = mp;
 3424                                 sendmp->m_flags |= M_PKTHDR;
 3425                                 sendmp->m_pkthdr.len = mp->m_len;
 3426                                 if (staterr & IXGBE_RXD_STAT_VP) {
 3427                                         sendmp->m_pkthdr.ether_vtag = vtag;
 3428                                         sendmp->m_flags |= M_VLANTAG;
 3429                                 }
 3430                         }
 3431                         /* Pass the head pointer on */
 3432                         if (eop == 0) {
 3433                                 nbuf->fmp = sendmp;
 3434                                 sendmp = NULL;
 3435                                 mp->m_next = nbuf->m_pack;
 3436                         }
 3437                 }
 3438                 ++processed;
 3439                 /* Sending this frame? */
 3440                 if (eop) {
 3441                         sendmp->m_pkthdr.rcvif = ifp;
 3442                         ifp->if_ipackets++;
 3443                         rxr->rx_packets++;
 3444                         /* capture data for AIM */
 3445                         rxr->bytes += sendmp->m_pkthdr.len;
 3446                         rxr->rx_bytes += sendmp->m_pkthdr.len;
 3447                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3448                                 ixv_rx_checksum(staterr, sendmp, ptype);
 3449 #if __FreeBSD_version >= 800000
 3450                         sendmp->m_pkthdr.flowid = que->msix;
 3451                         sendmp->m_flags |= M_FLOWID;
 3452 #endif
 3453                 }
 3454 next_desc:
 3455                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
 3456                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3457 
 3458                 /* Advance our pointers to the next descriptor. */
 3459                 if (++i == adapter->num_rx_desc)
 3460                         i = 0;
 3461 
 3462                 /* Now send to the stack or do LRO */
 3463                 if (sendmp != NULL)
 3464                         ixv_rx_input(rxr, ifp, sendmp, ptype);
 3465 
 3466                /* Every 8 descriptors we go to refresh mbufs */
 3467                 if (processed == 8) {
 3468                         ixv_refresh_mbufs(rxr, i);
 3469                         processed = 0;
 3470                 }
 3471         }
 3472 
 3473         /* Refresh any remaining buf structs */
 3474         if (processed != 0) {
 3475                 ixv_refresh_mbufs(rxr, i);
 3476                 processed = 0;
 3477         }
 3478 
 3479         rxr->next_to_check = i;
 3480 
 3481         /*
 3482          * Flush any outstanding LRO work
 3483          */
 3484         while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
 3485                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
 3486                 tcp_lro_flush(lro, queued);
 3487         }
 3488 
 3489         IXV_RX_UNLOCK(rxr);
 3490 
 3491         /*
 3492         ** We still have cleaning to do?
 3493         ** Schedule another interrupt if so.
 3494         */
 3495         if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
 3496                 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
 3497                 return (TRUE);
 3498         }
 3499 
 3500         return (FALSE);
 3501 }
 3502 
 3503 
 3504 /*********************************************************************
 3505  *
 3506  *  Verify that the hardware indicated that the checksum is valid.
 3507  *  Inform the stack about the status of checksum so that stack
 3508  *  doesn't spend time verifying the checksum.
 3509  *
 3510  *********************************************************************/
 3511 static void
 3512 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
 3513 {
 3514         u16     status = (u16) staterr;
 3515         u8      errors = (u8) (staterr >> 24);
 3516         bool    sctp = FALSE;
 3517 
 3518         if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
 3519             (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
 3520                 sctp = TRUE;
 3521 
 3522         if (status & IXGBE_RXD_STAT_IPCS) {
 3523                 if (!(errors & IXGBE_RXD_ERR_IPE)) {
 3524                         /* IP Checksum Good */
 3525                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
 3526                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3527 
 3528                 } else
 3529                         mp->m_pkthdr.csum_flags = 0;
 3530         }
 3531         if (status & IXGBE_RXD_STAT_L4CS) {
 3532                 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 3533 #if __FreeBSD_version >= 800000
 3534                 if (sctp)
 3535                         type = CSUM_SCTP_VALID;
 3536 #endif
 3537                 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
 3538                         mp->m_pkthdr.csum_flags |= type;
 3539                         if (!sctp)
 3540                                 mp->m_pkthdr.csum_data = htons(0xffff);
 3541                 } 
 3542         }
 3543         return;
 3544 }
 3545 
 3546 static void
 3547 ixv_setup_vlan_support(struct adapter *adapter)
 3548 {
 3549         struct ixgbe_hw *hw = &adapter->hw;
 3550         u32             ctrl, vid, vfta, retry;
 3551 
 3552 
 3553         /*
 3554         ** We get here thru init_locked, meaning
 3555         ** a soft reset, this has already cleared
 3556         ** the VFTA and other state, so if there
 3557         ** have been no vlan's registered do nothing.
 3558         */
 3559         if (adapter->num_vlans == 0)
 3560                 return;
 3561 
 3562         /* Enable the queues */
 3563         for (int i = 0; i < adapter->num_queues; i++) {
 3564                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 3565                 ctrl |= IXGBE_RXDCTL_VME;
 3566                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
 3567         }
 3568 
 3569         /*
 3570         ** A soft reset zero's out the VFTA, so
 3571         ** we need to repopulate it now.
 3572         */
 3573         for (int i = 0; i < VFTA_SIZE; i++) {
 3574                 if (ixv_shadow_vfta[i] == 0)
 3575                         continue;
 3576                 vfta = ixv_shadow_vfta[i];
 3577                 /*
 3578                 ** Reconstruct the vlan id's
 3579                 ** based on the bits set in each
 3580                 ** of the array ints.
 3581                 */
 3582                 for ( int j = 0; j < 32; j++) {
 3583                         retry = 0;
 3584                         if ((vfta & (1 << j)) == 0)
 3585                                 continue;
 3586                         vid = (i * 32) + j;
 3587                         /* Call the shared code mailbox routine */
 3588                         while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
 3589                                 if (++retry > 5)
 3590                                         break;
 3591                         }
 3592                 }
 3593         }
 3594 }
 3595 
 3596 /*
 3597 ** This routine is run via an vlan config EVENT,
 3598 ** it enables us to use the HW Filter table since
 3599 ** we can get the vlan id. This just creates the
 3600 ** entry in the soft version of the VFTA, init will
 3601 ** repopulate the real table.
 3602 */
 3603 static void
 3604 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
 3605 {
 3606         struct adapter  *adapter = ifp->if_softc;
 3607         u16             index, bit;
 3608 
 3609         if (ifp->if_softc !=  arg)   /* Not our event */
 3610                 return;
 3611 
 3612         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
 3613                 return;
 3614 
 3615         index = (vtag >> 5) & 0x7F;
 3616         bit = vtag & 0x1F;
 3617         ixv_shadow_vfta[index] |= (1 << bit);
 3618         ++adapter->num_vlans;
 3619         /* Re-init to load the changes */
 3620         ixv_init(adapter);
 3621 }
 3622 
 3623 /*
 3624 ** This routine is run via an vlan
 3625 ** unconfig EVENT, remove our entry
 3626 ** in the soft vfta.
 3627 */
 3628 static void
 3629 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
 3630 {
 3631         struct adapter  *adapter = ifp->if_softc;
 3632         u16             index, bit;
 3633 
 3634         if (ifp->if_softc !=  arg)
 3635                 return;
 3636 
 3637         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
 3638                 return;
 3639 
 3640         index = (vtag >> 5) & 0x7F;
 3641         bit = vtag & 0x1F;
 3642         ixv_shadow_vfta[index] &= ~(1 << bit);
 3643         --adapter->num_vlans;
 3644         /* Re-init to load the changes */
 3645         ixv_init(adapter);
 3646 }
 3647 
 3648 static void
 3649 ixv_enable_intr(struct adapter *adapter)
 3650 {
 3651         struct ixgbe_hw *hw = &adapter->hw;
 3652         struct ix_queue *que = adapter->queues;
 3653         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 3654 
 3655 
 3656         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 3657 
 3658         mask = IXGBE_EIMS_ENABLE_MASK;
 3659         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
 3660         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
 3661 
 3662         for (int i = 0; i < adapter->num_queues; i++, que++)
 3663                 ixv_enable_queue(adapter, que->msix);
 3664 
 3665         IXGBE_WRITE_FLUSH(hw);
 3666 
 3667         return;
 3668 }
 3669 
 3670 static void
 3671 ixv_disable_intr(struct adapter *adapter)
 3672 {
 3673         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
 3674         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
 3675         IXGBE_WRITE_FLUSH(&adapter->hw);
 3676         return;
 3677 }
 3678 
 3679 /*
 3680 ** Setup the correct IVAR register for a particular MSIX interrupt
 3681 **  - entry is the register array entry
 3682 **  - vector is the MSIX vector for this queue
 3683 **  - type is RX/TX/MISC
 3684 */
 3685 static void
 3686 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
 3687 {
 3688         struct ixgbe_hw *hw = &adapter->hw;
 3689         u32 ivar, index;
 3690 
 3691         vector |= IXGBE_IVAR_ALLOC_VAL;
 3692 
 3693         if (type == -1) { /* MISC IVAR */
 3694                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
 3695                 ivar &= ~0xFF;
 3696                 ivar |= vector;
 3697                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
 3698         } else {        /* RX/TX IVARS */
 3699                 index = (16 * (entry & 1)) + (8 * type);
 3700                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
 3701                 ivar &= ~(0xFF << index);
 3702                 ivar |= (vector << index);
 3703                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
 3704         }
 3705 }
 3706 
 3707 static void
 3708 ixv_configure_ivars(struct adapter *adapter)
 3709 {
 3710         struct  ix_queue *que = adapter->queues;
 3711 
 3712         for (int i = 0; i < adapter->num_queues; i++, que++) {
 3713                 /* First the RX queue entry */
 3714                 ixv_set_ivar(adapter, i, que->msix, 0);
 3715                 /* ... and the TX */
 3716                 ixv_set_ivar(adapter, i, que->msix, 1);
 3717                 /* Set an initial value in EITR */
 3718                 IXGBE_WRITE_REG(&adapter->hw,
 3719                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
 3720         }
 3721 
 3722         /* For the Link interrupt */
 3723         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
 3724 }
 3725 
 3726 
 3727 /*
 3728 ** Tasklet handler for MSIX MBX interrupts
 3729 **  - do outside interrupt since it might sleep
 3730 */
 3731 static void
 3732 ixv_handle_mbx(void *context, int pending)
 3733 {
 3734         struct adapter  *adapter = context;
 3735 
 3736         ixgbe_check_link(&adapter->hw,
 3737             &adapter->link_speed, &adapter->link_up, 0);
 3738         ixv_update_link_status(adapter);
 3739 }
 3740 
 3741 /*
 3742 ** The VF stats registers never have a truely virgin
 3743 ** starting point, so this routine tries to make an
 3744 ** artificial one, marking ground zero on attach as
 3745 ** it were.
 3746 */
 3747 static void
 3748 ixv_save_stats(struct adapter *adapter)
 3749 {
 3750         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
 3751                 adapter->stats.saved_reset_vfgprc +=
 3752                     adapter->stats.vfgprc - adapter->stats.base_vfgprc;
 3753                 adapter->stats.saved_reset_vfgptc +=
 3754                     adapter->stats.vfgptc - adapter->stats.base_vfgptc;
 3755                 adapter->stats.saved_reset_vfgorc +=
 3756                     adapter->stats.vfgorc - adapter->stats.base_vfgorc;
 3757                 adapter->stats.saved_reset_vfgotc +=
 3758                     adapter->stats.vfgotc - adapter->stats.base_vfgotc;
 3759                 adapter->stats.saved_reset_vfmprc +=
 3760                     adapter->stats.vfmprc - adapter->stats.base_vfmprc;
 3761         }
 3762 }
 3763  
 3764 static void
 3765 ixv_init_stats(struct adapter *adapter)
 3766 {
 3767         struct ixgbe_hw *hw = &adapter->hw;
 3768  
 3769         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
 3770         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
 3771         adapter->stats.last_vfgorc |=
 3772             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
 3773 
 3774         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
 3775         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
 3776         adapter->stats.last_vfgotc |=
 3777             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
 3778 
 3779         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
 3780 
 3781         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
 3782         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
 3783         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
 3784         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
 3785         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
 3786 }
 3787 
 3788 #define UPDATE_STAT_32(reg, last, count)                \
 3789 {                                                       \
 3790         u32 current = IXGBE_READ_REG(hw, reg);          \
 3791         if (current < last)                             \
 3792                 count += 0x100000000LL;                 \
 3793         last = current;                                 \
 3794         count &= 0xFFFFFFFF00000000LL;                  \
 3795         count |= current;                               \
 3796 }
 3797 
 3798 #define UPDATE_STAT_36(lsb, msb, last, count)           \
 3799 {                                                       \
 3800         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
 3801         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
 3802         u64 current = ((cur_msb << 32) | cur_lsb);      \
 3803         if (current < last)                             \
 3804                 count += 0x1000000000LL;                \
 3805         last = current;                                 \
 3806         count &= 0xFFFFFFF000000000LL;                  \
 3807         count |= current;                               \
 3808 }
 3809 
 3810 /*
 3811 ** ixv_update_stats - Update the board statistics counters.
 3812 */
 3813 void
 3814 ixv_update_stats(struct adapter *adapter)
 3815 {
 3816         struct ixgbe_hw *hw = &adapter->hw;
 3817 
 3818         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
 3819             adapter->stats.vfgprc);
 3820         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
 3821             adapter->stats.vfgptc);
 3822         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
 3823             adapter->stats.last_vfgorc, adapter->stats.vfgorc);
 3824         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
 3825             adapter->stats.last_vfgotc, adapter->stats.vfgotc);
 3826         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
 3827             adapter->stats.vfmprc);
 3828 }
 3829 
 3830 /**********************************************************************
 3831  *
 3832  *  This routine is called only when ixgbe_display_debug_stats is enabled.
 3833  *  This routine provides a way to take a look at important statistics
 3834  *  maintained by the driver and hardware.
 3835  *
 3836  **********************************************************************/
 3837 static void
 3838 ixv_print_hw_stats(struct adapter * adapter)
 3839 {
 3840         device_t dev = adapter->dev;
 3841 
 3842         device_printf(dev,"Std Mbuf Failed = %lu\n",
 3843                adapter->mbuf_defrag_failed);
 3844         device_printf(dev,"Driver dropped packets = %lu\n",
 3845                adapter->dropped_pkts);
 3846         device_printf(dev, "watchdog timeouts = %ld\n",
 3847                adapter->watchdog_events);
 3848 
 3849         device_printf(dev,"Good Packets Rcvd = %llu\n",
 3850                (long long)adapter->stats.vfgprc);
 3851         device_printf(dev,"Good Packets Xmtd = %llu\n",
 3852                (long long)adapter->stats.vfgptc);
 3853         device_printf(dev,"TSO Transmissions = %lu\n",
 3854                adapter->tso_tx);
 3855 
 3856 }
 3857 
 3858 /**********************************************************************
 3859  *
 3860  *  This routine is called only when em_display_debug_stats is enabled.
 3861  *  This routine provides a way to take a look at important statistics
 3862  *  maintained by the driver and hardware.
 3863  *
 3864  **********************************************************************/
 3865 static void
 3866 ixv_print_debug_info(struct adapter *adapter)
 3867 {
 3868         device_t dev = adapter->dev;
 3869         struct ixgbe_hw         *hw = &adapter->hw;
 3870         struct ix_queue         *que = adapter->queues;
 3871         struct rx_ring          *rxr;
 3872         struct tx_ring          *txr;
 3873         struct lro_ctrl         *lro;
 3874 
 3875         device_printf(dev,"Error Byte Count = %u \n",
 3876             IXGBE_READ_REG(hw, IXGBE_ERRBC));
 3877 
 3878         for (int i = 0; i < adapter->num_queues; i++, que++) {
 3879                 txr = que->txr;
 3880                 rxr = que->rxr;
 3881                 lro = &rxr->lro;
 3882                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
 3883                     que->msix, (long)que->irqs);
 3884                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
 3885                     rxr->me, (long long)rxr->rx_packets);
 3886                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
 3887                     rxr->me, (long long)rxr->rx_split_packets);
 3888                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
 3889                     rxr->me, (long)rxr->rx_bytes);
 3890                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
 3891                     rxr->me, lro->lro_queued);
 3892                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
 3893                     rxr->me, lro->lro_flushed);
 3894                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
 3895                     txr->me, (long)txr->total_packets);
 3896                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
 3897                     txr->me, (long)txr->no_desc_avail);
 3898         }
 3899 
 3900         device_printf(dev,"MBX IRQ Handled: %lu\n",
 3901             (long)adapter->mbx_irq);
 3902         return;
 3903 }
 3904 
 3905 static int
 3906 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
 3907 {
 3908         int             error;
 3909         int             result;
 3910         struct adapter *adapter;
 3911 
 3912         result = -1;
 3913         error = sysctl_handle_int(oidp, &result, 0, req);
 3914 
 3915         if (error || !req->newptr)
 3916                 return (error);
 3917 
 3918         if (result == 1) {
 3919                 adapter = (struct adapter *) arg1;
 3920                 ixv_print_hw_stats(adapter);
 3921         }
 3922         return error;
 3923 }
 3924 
 3925 static int
 3926 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
 3927 {
 3928         int error, result;
 3929         struct adapter *adapter;
 3930 
 3931         result = -1;
 3932         error = sysctl_handle_int(oidp, &result, 0, req);
 3933 
 3934         if (error || !req->newptr)
 3935                 return (error);
 3936 
 3937         if (result == 1) {
 3938                 adapter = (struct adapter *) arg1;
 3939                 ixv_print_debug_info(adapter);
 3940         }
 3941         return error;
 3942 }
 3943 
 3944 /*
 3945 ** Set flow control using sysctl:
 3946 ** Flow control values:
 3947 **      0 - off
 3948 **      1 - rx pause
 3949 **      2 - tx pause
 3950 **      3 - full
 3951 */
 3952 static int
 3953 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
 3954 {
 3955         int error;
 3956         struct adapter *adapter;
 3957 
 3958         error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
 3959 
 3960         if (error)
 3961                 return (error);
 3962 
 3963         adapter = (struct adapter *) arg1;
 3964         switch (ixv_flow_control) {
 3965                 case ixgbe_fc_rx_pause:
 3966                 case ixgbe_fc_tx_pause:
 3967                 case ixgbe_fc_full:
 3968                         adapter->hw.fc.requested_mode = ixv_flow_control;
 3969                         break;
 3970                 case ixgbe_fc_none:
 3971                 default:
 3972                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
 3973         }
 3974 
 3975         ixgbe_fc_enable(&adapter->hw, 0);
 3976         return error;
 3977 }
 3978 
 3979 static void
 3980 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
 3981         const char *description, int *limit, int value)
 3982 {
 3983         *limit = value;
 3984         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
 3985             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
 3986             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
 3987 }
 3988 

Cache object: c925fc77cb310df8e20cd8b6ad18615e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.