The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ena/ena.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  *
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include "opt_rss.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/bus.h>
   38 #include <sys/endian.h>
   39 #include <sys/kernel.h>
   40 #include <sys/kthread.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/module.h>
   44 #include <sys/rman.h>
   45 #include <sys/smp.h>
   46 #include <sys/socket.h>
   47 #include <sys/sockio.h>
   48 #include <sys/sysctl.h>
   49 #include <sys/taskqueue.h>
   50 #include <sys/time.h>
   51 #include <sys/eventhandler.h>
   52 
   53 #include <machine/bus.h>
   54 #include <machine/resource.h>
   55 #include <machine/in_cksum.h>
   56 
   57 #include <net/bpf.h>
   58 #include <net/ethernet.h>
   59 #include <net/if.h>
   60 #include <net/if_var.h>
   61 #include <net/if_arp.h>
   62 #include <net/if_dl.h>
   63 #include <net/if_media.h>
   64 #include <net/if_types.h>
   65 #include <net/if_vlan_var.h>
   66 
   67 #include <netinet/in_systm.h>
   68 #include <netinet/in.h>
   69 #include <netinet/if_ether.h>
   70 #include <netinet/ip.h>
   71 #include <netinet/ip6.h>
   72 #include <netinet/tcp.h>
   73 #include <netinet/udp.h>
   74 
   75 #include <dev/pci/pcivar.h>
   76 #include <dev/pci/pcireg.h>
   77 
   78 #include <vm/vm.h>
   79 #include <vm/pmap.h>
   80 
   81 #include "ena_datapath.h"
   82 #include "ena.h"
   83 #include "ena_sysctl.h"
   84 #include "ena_rss.h"
   85 
   86 #ifdef DEV_NETMAP
   87 #include "ena_netmap.h"
   88 #endif /* DEV_NETMAP */
   89 
   90 /*********************************************************
   91  *  Function prototypes
   92  *********************************************************/
   93 static int      ena_probe(device_t);
   94 static void     ena_intr_msix_mgmnt(void *);
   95 static void     ena_free_pci_resources(struct ena_adapter *);
   96 static int      ena_change_mtu(if_t, int);
   97 static inline void ena_alloc_counters(counter_u64_t *, int);
   98 static inline void ena_free_counters(counter_u64_t *, int);
   99 static inline void ena_reset_counters(counter_u64_t *, int);
  100 static void     ena_init_io_rings_common(struct ena_adapter *,
  101     struct ena_ring *, uint16_t);
  102 static void     ena_init_io_rings_basic(struct ena_adapter *);
  103 static void     ena_init_io_rings_advanced(struct ena_adapter *);
  104 static void     ena_init_io_rings(struct ena_adapter *);
  105 static void     ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
  106 static void     ena_free_all_io_rings_resources(struct ena_adapter *);
  107 static int      ena_setup_tx_dma_tag(struct ena_adapter *);
  108 static int      ena_free_tx_dma_tag(struct ena_adapter *);
  109 static int      ena_setup_rx_dma_tag(struct ena_adapter *);
  110 static int      ena_free_rx_dma_tag(struct ena_adapter *);
  111 static void     ena_release_all_tx_dmamap(struct ena_ring *);
  112 static int      ena_setup_tx_resources(struct ena_adapter *, int);
  113 static void     ena_free_tx_resources(struct ena_adapter *, int);
  114 static int      ena_setup_all_tx_resources(struct ena_adapter *);
  115 static void     ena_free_all_tx_resources(struct ena_adapter *);
  116 static int      ena_setup_rx_resources(struct ena_adapter *, unsigned int);
  117 static void     ena_free_rx_resources(struct ena_adapter *, unsigned int);
  118 static int      ena_setup_all_rx_resources(struct ena_adapter *);
  119 static void     ena_free_all_rx_resources(struct ena_adapter *);
  120 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
  121     struct ena_rx_buffer *);
  122 static void     ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
  123     struct ena_rx_buffer *);
  124 static void     ena_free_rx_bufs(struct ena_adapter *, unsigned int);
  125 static void     ena_refill_all_rx_bufs(struct ena_adapter *);
  126 static void     ena_free_all_rx_bufs(struct ena_adapter *);
  127 static void     ena_free_tx_bufs(struct ena_adapter *, unsigned int);
  128 static void     ena_free_all_tx_bufs(struct ena_adapter *);
  129 static void     ena_destroy_all_tx_queues(struct ena_adapter *);
  130 static void     ena_destroy_all_rx_queues(struct ena_adapter *);
  131 static void     ena_destroy_all_io_queues(struct ena_adapter *);
  132 static int      ena_create_io_queues(struct ena_adapter *);
  133 static int      ena_handle_msix(void *);
  134 static int      ena_enable_msix(struct ena_adapter *);
  135 static void     ena_setup_mgmnt_intr(struct ena_adapter *);
  136 static int      ena_setup_io_intr(struct ena_adapter *);
  137 static int      ena_request_mgmnt_irq(struct ena_adapter *);
  138 static int      ena_request_io_irq(struct ena_adapter *);
  139 static void     ena_free_mgmnt_irq(struct ena_adapter *);
  140 static void     ena_free_io_irq(struct ena_adapter *);
  141 static void     ena_free_irqs(struct ena_adapter*);
  142 static void     ena_disable_msix(struct ena_adapter *);
  143 static void     ena_unmask_all_io_irqs(struct ena_adapter *);
  144 static int      ena_up_complete(struct ena_adapter *);
  145 static uint64_t ena_get_counter(if_t, ift_counter);
  146 static int      ena_media_change(if_t);
  147 static void     ena_media_status(if_t, struct ifmediareq *);
  148 static void     ena_init(void *);
  149 static int      ena_ioctl(if_t, u_long, caddr_t);
  150 static int      ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
  151 static void     ena_update_host_info(struct ena_admin_host_info *, if_t);
  152 static void     ena_update_hwassist(struct ena_adapter *);
  153 static int      ena_setup_ifnet(device_t, struct ena_adapter *,
  154     struct ena_com_dev_get_features_ctx *);
  155 static int      ena_enable_wc(device_t, struct resource *);
  156 static int      ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
  157     struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
  158 static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
  159     struct ena_com_dev_get_features_ctx *);
  160 static int      ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
  161 static void     ena_config_host_info(struct ena_com_dev *, device_t);
  162 static int      ena_attach(device_t);
  163 static int      ena_detach(device_t);
  164 static int      ena_device_init(struct ena_adapter *, device_t,
  165     struct ena_com_dev_get_features_ctx *, int *);
  166 static int      ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
  167 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
  168 static void     unimplemented_aenq_handler(void *,
  169     struct ena_admin_aenq_entry *);
  170 static int      ena_copy_eni_metrics(struct ena_adapter *);
  171 static void     ena_timer_service(void *);
  172 
  173 static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
  174 
  175 static ena_vendor_info_t ena_vendor_info_array[] = {
  176     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
  177     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0},
  178     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
  179     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0},
  180     /* Last entry */
  181     { 0, 0, 0 }
  182 };
  183 
  184 struct sx ena_global_lock;
  185 
  186 /*
  187  * Contains pointers to event handlers, e.g. link state chage.
  188  */
  189 static struct ena_aenq_handlers aenq_handlers;
  190 
  191 void
  192 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  193 {
  194         if (error != 0)
  195                 return;
  196         *(bus_addr_t *) arg = segs[0].ds_addr;
  197 }
  198 
  199 int
  200 ena_dma_alloc(device_t dmadev, bus_size_t size,
  201     ena_mem_handle_t *dma, int mapflags, bus_size_t alignment)
  202 {
  203         struct ena_adapter* adapter = device_get_softc(dmadev);
  204         device_t pdev = adapter->pdev;
  205         uint32_t maxsize;
  206         uint64_t dma_space_addr;
  207         int error;
  208 
  209         maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
  210 
  211         dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
  212         if (unlikely(dma_space_addr == 0))
  213                 dma_space_addr = BUS_SPACE_MAXADDR;
  214 
  215         error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
  216             alignment, 0,     /* alignment, bounds              */
  217             dma_space_addr,   /* lowaddr of exclusion window    */
  218             BUS_SPACE_MAXADDR,/* highaddr of exclusion window   */
  219             NULL, NULL,       /* filter, filterarg              */
  220             maxsize,          /* maxsize                        */
  221             1,                /* nsegments                      */
  222             maxsize,          /* maxsegsize                     */
  223             BUS_DMA_ALLOCNOW, /* flags                          */
  224             NULL,             /* lockfunc                       */
  225             NULL,             /* lockarg                        */
  226             &dma->tag);
  227         if (unlikely(error != 0)) {
  228                 ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error);
  229                 goto fail_tag;
  230         }
  231 
  232         error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
  233             BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
  234         if (unlikely(error != 0)) {
  235                 ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n",
  236                     (uintmax_t)size, error);
  237                 goto fail_map_create;
  238         }
  239 
  240         dma->paddr = 0;
  241         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
  242             size, ena_dmamap_callback, &dma->paddr, mapflags);
  243         if (unlikely((error != 0) || (dma->paddr == 0))) {
  244                 ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error);
  245                 goto fail_map_load;
  246         }
  247 
  248         bus_dmamap_sync(dma->tag, dma->map,
  249             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  250 
  251         return (0);
  252 
  253 fail_map_load:
  254         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
  255 fail_map_create:
  256         bus_dma_tag_destroy(dma->tag);
  257 fail_tag:
  258         dma->tag = NULL;
  259         dma->vaddr = NULL;
  260         dma->paddr = 0;
  261 
  262         return (error);
  263 }
  264 
  265 static void
  266 ena_free_pci_resources(struct ena_adapter *adapter)
  267 {
  268         device_t pdev = adapter->pdev;
  269 
  270         if (adapter->memory != NULL) {
  271                 bus_release_resource(pdev, SYS_RES_MEMORY,
  272                     PCIR_BAR(ENA_MEM_BAR), adapter->memory);
  273         }
  274 
  275         if (adapter->registers != NULL) {
  276                 bus_release_resource(pdev, SYS_RES_MEMORY,
  277                     PCIR_BAR(ENA_REG_BAR), adapter->registers);
  278         }
  279 
  280         if (adapter->msix != NULL) {
  281                 bus_release_resource(pdev, SYS_RES_MEMORY,
  282                     adapter->msix_rid, adapter->msix);
  283         }
  284 }
  285 
  286 static int
  287 ena_probe(device_t dev)
  288 {
  289         ena_vendor_info_t *ent;
  290         char            adapter_name[60];
  291         uint16_t        pci_vendor_id = 0;
  292         uint16_t        pci_device_id = 0;
  293 
  294         pci_vendor_id = pci_get_vendor(dev);
  295         pci_device_id = pci_get_device(dev);
  296 
  297         ent = ena_vendor_info_array;
  298         while (ent->vendor_id != 0) {
  299                 if ((pci_vendor_id == ent->vendor_id) &&
  300                     (pci_device_id == ent->device_id)) {
  301                         ena_log_raw(DBG, "vendor=%x device=%x\n",
  302                             pci_vendor_id, pci_device_id);
  303 
  304                         sprintf(adapter_name, DEVICE_DESC);
  305                         device_set_desc_copy(dev, adapter_name);
  306                         return (BUS_PROBE_DEFAULT);
  307                 }
  308 
  309                 ent++;
  310 
  311         }
  312 
  313         return (ENXIO);
  314 }
  315 
  316 static int
  317 ena_change_mtu(if_t ifp, int new_mtu)
  318 {
  319         struct ena_adapter *adapter = if_getsoftc(ifp);
  320         device_t pdev = adapter->pdev;
  321         int rc;
  322 
  323         if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
  324                 ena_log(pdev, ERR, "Invalid MTU setting. "
  325                     "new_mtu: %d max mtu: %d min mtu: %d\n",
  326                     new_mtu, adapter->max_mtu, ENA_MIN_MTU);
  327                 return (EINVAL);
  328         }
  329 
  330         rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
  331         if (likely(rc == 0)) {
  332                 ena_log(pdev, DBG, "set MTU to %d\n", new_mtu);
  333                 if_setmtu(ifp, new_mtu);
  334         } else {
  335                 ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu);
  336         }
  337 
  338         return (rc);
  339 }
  340 
  341 static inline void
  342 ena_alloc_counters(counter_u64_t *begin, int size)
  343 {
  344         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
  345 
  346         for (; begin < end; ++begin)
  347                 *begin = counter_u64_alloc(M_WAITOK);
  348 }
  349 
  350 static inline void
  351 ena_free_counters(counter_u64_t *begin, int size)
  352 {
  353         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
  354 
  355         for (; begin < end; ++begin)
  356                 counter_u64_free(*begin);
  357 }
  358 
  359 static inline void
  360 ena_reset_counters(counter_u64_t *begin, int size)
  361 {
  362         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
  363 
  364         for (; begin < end; ++begin)
  365                 counter_u64_zero(*begin);
  366 }
  367 
  368 static void
  369 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
  370     uint16_t qid)
  371 {
  372 
  373         ring->qid = qid;
  374         ring->adapter = adapter;
  375         ring->ena_dev = adapter->ena_dev;
  376         ring->first_interrupt = false;
  377         ring->no_interrupt_event_cnt = 0;
  378 }
  379 
  380 static void
  381 ena_init_io_rings_basic(struct ena_adapter *adapter)
  382 {
  383         struct ena_com_dev *ena_dev;
  384         struct ena_ring *txr, *rxr;
  385         struct ena_que *que;
  386         int i;
  387 
  388         ena_dev = adapter->ena_dev;
  389 
  390         for (i = 0; i < adapter->num_io_queues; i++) {
  391                 txr = &adapter->tx_ring[i];
  392                 rxr = &adapter->rx_ring[i];
  393 
  394                 /* TX/RX common ring state */
  395                 ena_init_io_rings_common(adapter, txr, i);
  396                 ena_init_io_rings_common(adapter, rxr, i);
  397 
  398                 /* TX specific ring state */
  399                 txr->tx_max_header_size = ena_dev->tx_max_header_size;
  400                 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
  401 
  402                 que = &adapter->que[i];
  403                 que->adapter = adapter;
  404                 que->id = i;
  405                 que->tx_ring = txr;
  406                 que->rx_ring = rxr;
  407 
  408                 txr->que = que;
  409                 rxr->que = que;
  410 
  411                 rxr->empty_rx_queue = 0;
  412                 rxr->rx_mbuf_sz = ena_mbuf_sz;
  413         }
  414 }
  415 
  416 static void
  417 ena_init_io_rings_advanced(struct ena_adapter *adapter)
  418 {
  419         struct ena_ring *txr, *rxr;
  420         int i;
  421 
  422         for (i = 0; i < adapter->num_io_queues; i++) {
  423                 txr = &adapter->tx_ring[i];
  424                 rxr = &adapter->rx_ring[i];
  425 
  426                 /* Allocate a buf ring */
  427                 txr->buf_ring_size = adapter->buf_ring_size;
  428                 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF,
  429                     M_WAITOK, &txr->ring_mtx);
  430 
  431                 /* Allocate Tx statistics. */
  432                 ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
  433                     sizeof(txr->tx_stats));
  434 
  435                 /* Allocate Rx statistics. */
  436                 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
  437                     sizeof(rxr->rx_stats));
  438 
  439                 /* Initialize locks */
  440                 snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
  441                     device_get_nameunit(adapter->pdev), i);
  442                 snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
  443                     device_get_nameunit(adapter->pdev), i);
  444 
  445                 mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
  446         }
  447 }
  448 
  449 static void
  450 ena_init_io_rings(struct ena_adapter *adapter)
  451 {
  452         /*
  453          * IO rings initialization can be divided into the 2 steps:
  454          *   1. Initialize variables and fields with initial values and copy
  455          *      them from adapter/ena_dev (basic)
  456          *   2. Allocate mutex, counters and buf_ring (advanced)
  457          */
  458         ena_init_io_rings_basic(adapter);
  459         ena_init_io_rings_advanced(adapter);
  460 }
  461 
  462 static void
  463 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
  464 {
  465         struct ena_ring *txr = &adapter->tx_ring[qid];
  466         struct ena_ring *rxr = &adapter->rx_ring[qid];
  467 
  468         ena_free_counters((counter_u64_t *)&txr->tx_stats,
  469             sizeof(txr->tx_stats));
  470         ena_free_counters((counter_u64_t *)&rxr->rx_stats,
  471             sizeof(rxr->rx_stats));
  472 
  473         ENA_RING_MTX_LOCK(txr);
  474         drbr_free(txr->br, M_DEVBUF);
  475         ENA_RING_MTX_UNLOCK(txr);
  476 
  477         mtx_destroy(&txr->ring_mtx);
  478 }
  479 
  480 static void
  481 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
  482 {
  483         int i;
  484 
  485         for (i = 0; i < adapter->num_io_queues; i++)
  486                 ena_free_io_ring_resources(adapter, i);
  487 
  488 }
  489 
  490 static int
  491 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
  492 {
  493         int ret;
  494 
  495         /* Create DMA tag for Tx buffers */
  496         ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
  497             1, 0,                                 /* alignment, bounds       */
  498             ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
  499             BUS_SPACE_MAXADDR,                    /* highaddr of excl window */
  500             NULL, NULL,                           /* filter, filterarg       */
  501             ENA_TSO_MAXSIZE,                      /* maxsize                 */
  502             adapter->max_tx_sgl_size - 1,         /* nsegments               */
  503             ENA_TSO_MAXSIZE,                      /* maxsegsize              */
  504             0,                                    /* flags                   */
  505             NULL,                                 /* lockfunc                */
  506             NULL,                                 /* lockfuncarg             */
  507             &adapter->tx_buf_tag);
  508 
  509         return (ret);
  510 }
  511 
  512 static int
  513 ena_free_tx_dma_tag(struct ena_adapter *adapter)
  514 {
  515         int ret;
  516 
  517         ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
  518 
  519         if (likely(ret == 0))
  520                 adapter->tx_buf_tag = NULL;
  521 
  522         return (ret);
  523 }
  524 
  525 static int
  526 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
  527 {
  528         int ret;
  529 
  530         /* Create DMA tag for Rx buffers*/
  531         ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
  532             1, 0,                                 /* alignment, bounds       */
  533             ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
  534             BUS_SPACE_MAXADDR,                    /* highaddr of excl window */
  535             NULL, NULL,                           /* filter, filterarg       */
  536             ena_mbuf_sz,                          /* maxsize                 */
  537             adapter->max_rx_sgl_size,             /* nsegments               */
  538             ena_mbuf_sz,                          /* maxsegsize              */
  539             0,                                    /* flags                   */
  540             NULL,                                 /* lockfunc                */
  541             NULL,                                 /* lockarg                 */
  542             &adapter->rx_buf_tag);
  543 
  544         return (ret);
  545 }
  546 
  547 static int
  548 ena_free_rx_dma_tag(struct ena_adapter *adapter)
  549 {
  550         int ret;
  551 
  552         ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
  553 
  554         if (likely(ret == 0))
  555                 adapter->rx_buf_tag = NULL;
  556 
  557         return (ret);
  558 }
  559 
  560 static void
  561 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
  562 {
  563         struct ena_adapter *adapter = tx_ring->adapter;
  564         struct ena_tx_buffer *tx_info;
  565         bus_dma_tag_t tx_tag = adapter->tx_buf_tag;;
  566         int i;
  567 #ifdef DEV_NETMAP
  568         struct ena_netmap_tx_info *nm_info;
  569         int j;
  570 #endif /* DEV_NETMAP */
  571 
  572         for (i = 0; i < tx_ring->ring_size; ++i) {
  573                 tx_info = &tx_ring->tx_buffer_info[i];
  574 #ifdef DEV_NETMAP
  575                 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
  576                         nm_info = &tx_info->nm_info;
  577                         for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
  578                                 if (nm_info->map_seg[j] != NULL) {
  579                                         bus_dmamap_destroy(tx_tag,
  580                                             nm_info->map_seg[j]);
  581                                         nm_info->map_seg[j] = NULL;
  582                                 }
  583                         }
  584                 }
  585 #endif /* DEV_NETMAP */
  586                 if (tx_info->dmamap != NULL) {
  587                         bus_dmamap_destroy(tx_tag, tx_info->dmamap);
  588                         tx_info->dmamap = NULL;
  589                 }
  590         }
  591 }
  592 
  593 /**
  594  * ena_setup_tx_resources - allocate Tx resources (Descriptors)
  595  * @adapter: network interface device structure
  596  * @qid: queue index
  597  *
  598  * Returns 0 on success, otherwise on failure.
  599  **/
  600 static int
  601 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
  602 {
  603         device_t pdev = adapter->pdev;
  604         char thread_name[MAXCOMLEN + 1];
  605         struct ena_que *que = &adapter->que[qid];
  606         struct ena_ring *tx_ring = que->tx_ring;
  607         cpuset_t *cpu_mask = NULL;
  608         int size, i, err;
  609 #ifdef DEV_NETMAP
  610         bus_dmamap_t *map;
  611         int j;
  612 
  613         ena_netmap_reset_tx_ring(adapter, qid);
  614 #endif /* DEV_NETMAP */
  615 
  616         size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
  617 
  618         tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
  619         if (unlikely(tx_ring->tx_buffer_info == NULL))
  620                 return (ENOMEM);
  621 
  622         size = sizeof(uint16_t) * tx_ring->ring_size;
  623         tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
  624         if (unlikely(tx_ring->free_tx_ids == NULL))
  625                 goto err_buf_info_free;
  626 
  627         size = tx_ring->tx_max_header_size;
  628         tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
  629             M_NOWAIT | M_ZERO);
  630         if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
  631                 goto err_tx_ids_free;
  632 
  633         /* Req id stack for TX OOO completions */
  634         for (i = 0; i < tx_ring->ring_size; i++)
  635                 tx_ring->free_tx_ids[i] = i;
  636 
  637         /* Reset TX statistics. */
  638         ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
  639             sizeof(tx_ring->tx_stats));
  640 
  641         tx_ring->next_to_use = 0;
  642         tx_ring->next_to_clean = 0;
  643         tx_ring->acum_pkts = 0;
  644 
  645         /* Make sure that drbr is empty */
  646         ENA_RING_MTX_LOCK(tx_ring);
  647         drbr_flush(adapter->ifp, tx_ring->br);
  648         ENA_RING_MTX_UNLOCK(tx_ring);
  649 
  650         /* ... and create the buffer DMA maps */
  651         for (i = 0; i < tx_ring->ring_size; i++) {
  652                 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
  653                     &tx_ring->tx_buffer_info[i].dmamap);
  654                 if (unlikely(err != 0)) {
  655                         ena_log(pdev, ERR,
  656                             "Unable to create Tx DMA map for buffer %d\n",
  657                             i);
  658                         goto err_map_release;
  659                 }
  660 
  661 #ifdef DEV_NETMAP
  662                 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
  663                         map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
  664                         for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
  665                                 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
  666                                     &map[j]);
  667                                 if (unlikely(err != 0)) {
  668                                         ena_log(pdev, ERR,
  669                                             "Unable to create "
  670                                             "Tx DMA for buffer %d %d\n", i, j);
  671                                         goto err_map_release;
  672                                 }
  673                         }
  674                 }
  675 #endif /* DEV_NETMAP */
  676         }
  677 
  678         /* Allocate taskqueues */
  679         TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
  680         tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
  681             taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
  682         if (unlikely(tx_ring->enqueue_tq == NULL)) {
  683                 ena_log(pdev, ERR,
  684                     "Unable to create taskqueue for enqueue task\n");
  685                 i = tx_ring->ring_size;
  686                 goto err_map_release;
  687         }
  688 
  689         tx_ring->running = true;
  690 
  691 #ifdef RSS
  692         cpu_mask = &que->cpu_mask;
  693         snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
  694             device_get_nameunit(adapter->pdev), que->cpu);
  695 #else
  696         snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
  697             device_get_nameunit(adapter->pdev), que->id);
  698 #endif
  699         taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
  700             cpu_mask, "%s", thread_name);
  701 
  702         return (0);
  703 
  704 err_map_release:
  705         ena_release_all_tx_dmamap(tx_ring);
  706 err_tx_ids_free:
  707         free(tx_ring->free_tx_ids, M_DEVBUF);
  708         tx_ring->free_tx_ids = NULL;
  709 err_buf_info_free:
  710         free(tx_ring->tx_buffer_info, M_DEVBUF);
  711         tx_ring->tx_buffer_info = NULL;
  712 
  713         return (ENOMEM);
  714 }
  715 
  716 /**
  717  * ena_free_tx_resources - Free Tx Resources per Queue
  718  * @adapter: network interface device structure
  719  * @qid: queue index
  720  *
  721  * Free all transmit software resources
  722  **/
  723 static void
  724 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
  725 {
  726         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
  727 #ifdef DEV_NETMAP
  728         struct ena_netmap_tx_info *nm_info;
  729         int j;
  730 #endif /* DEV_NETMAP */
  731 
  732         while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
  733             NULL))
  734                 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
  735 
  736         taskqueue_free(tx_ring->enqueue_tq);
  737 
  738         ENA_RING_MTX_LOCK(tx_ring);
  739         /* Flush buffer ring, */
  740         drbr_flush(adapter->ifp, tx_ring->br);
  741 
  742         /* Free buffer DMA maps, */
  743         for (int i = 0; i < tx_ring->ring_size; i++) {
  744                 bus_dmamap_sync(adapter->tx_buf_tag,
  745                     tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
  746                 bus_dmamap_unload(adapter->tx_buf_tag,
  747                     tx_ring->tx_buffer_info[i].dmamap);
  748                 bus_dmamap_destroy(adapter->tx_buf_tag,
  749                     tx_ring->tx_buffer_info[i].dmamap);
  750 
  751 #ifdef DEV_NETMAP
  752                 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
  753                         nm_info = &tx_ring->tx_buffer_info[i].nm_info;
  754                         for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
  755                                 if (nm_info->socket_buf_idx[j] != 0) {
  756                                         bus_dmamap_sync(adapter->tx_buf_tag,
  757                                             nm_info->map_seg[j],
  758                                             BUS_DMASYNC_POSTWRITE);
  759                                         ena_netmap_unload(adapter,
  760                                             nm_info->map_seg[j]);
  761                                 }
  762                                 bus_dmamap_destroy(adapter->tx_buf_tag,
  763                                     nm_info->map_seg[j]);
  764                                 nm_info->socket_buf_idx[j] = 0;
  765                         }
  766                 }
  767 #endif /* DEV_NETMAP */
  768 
  769                 m_freem(tx_ring->tx_buffer_info[i].mbuf);
  770                 tx_ring->tx_buffer_info[i].mbuf = NULL;
  771         }
  772         ENA_RING_MTX_UNLOCK(tx_ring);
  773 
  774         /* And free allocated memory. */
  775         free(tx_ring->tx_buffer_info, M_DEVBUF);
  776         tx_ring->tx_buffer_info = NULL;
  777 
  778         free(tx_ring->free_tx_ids, M_DEVBUF);
  779         tx_ring->free_tx_ids = NULL;
  780 
  781         free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
  782         tx_ring->push_buf_intermediate_buf = NULL;
  783 }
  784 
  785 /**
  786  * ena_setup_all_tx_resources - allocate all queues Tx resources
  787  * @adapter: network interface device structure
  788  *
  789  * Returns 0 on success, otherwise on failure.
  790  **/
  791 static int
  792 ena_setup_all_tx_resources(struct ena_adapter *adapter)
  793 {
  794         int i, rc;
  795 
  796         for (i = 0; i < adapter->num_io_queues; i++) {
  797                 rc = ena_setup_tx_resources(adapter, i);
  798                 if (rc != 0) {
  799                         ena_log(adapter->pdev, ERR,
  800                             "Allocation for Tx Queue %u failed\n", i);
  801                         goto err_setup_tx;
  802                 }
  803         }
  804 
  805         return (0);
  806 
  807 err_setup_tx:
  808         /* Rewind the index freeing the rings as we go */
  809         while (i--)
  810                 ena_free_tx_resources(adapter, i);
  811         return (rc);
  812 }
  813 
  814 /**
  815  * ena_free_all_tx_resources - Free Tx Resources for All Queues
  816  * @adapter: network interface device structure
  817  *
  818  * Free all transmit software resources
  819  **/
  820 static void
  821 ena_free_all_tx_resources(struct ena_adapter *adapter)
  822 {
  823         int i;
  824 
  825         for (i = 0; i < adapter->num_io_queues; i++)
  826                 ena_free_tx_resources(adapter, i);
  827 }
  828 
  829 /**
  830  * ena_setup_rx_resources - allocate Rx resources (Descriptors)
  831  * @adapter: network interface device structure
  832  * @qid: queue index
  833  *
  834  * Returns 0 on success, otherwise on failure.
  835  **/
  836 static int
  837 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
  838 {
  839         device_t pdev = adapter->pdev;
  840         struct ena_que *que = &adapter->que[qid];
  841         struct ena_ring *rx_ring = que->rx_ring;
  842         int size, err, i;
  843 
  844         size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
  845 
  846 #ifdef DEV_NETMAP
  847         ena_netmap_reset_rx_ring(adapter, qid);
  848         rx_ring->initialized = false;
  849 #endif /* DEV_NETMAP */
  850 
  851         /*
  852          * Alloc extra element so in rx path
  853          * we can always prefetch rx_info + 1
  854          */
  855         size += sizeof(struct ena_rx_buffer);
  856 
  857         rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
  858 
  859         size = sizeof(uint16_t) * rx_ring->ring_size;
  860         rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
  861 
  862         for (i = 0; i < rx_ring->ring_size; i++)
  863                 rx_ring->free_rx_ids[i] = i;
  864 
  865         /* Reset RX statistics. */
  866         ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
  867             sizeof(rx_ring->rx_stats));
  868 
  869         rx_ring->next_to_clean = 0;
  870         rx_ring->next_to_use = 0;
  871 
  872         /* ... and create the buffer DMA maps */
  873         for (i = 0; i < rx_ring->ring_size; i++) {
  874                 err = bus_dmamap_create(adapter->rx_buf_tag, 0,
  875                     &(rx_ring->rx_buffer_info[i].map));
  876                 if (err != 0) {
  877                         ena_log(pdev, ERR,
  878                             "Unable to create Rx DMA map for buffer %d\n", i);
  879                         goto err_buf_info_unmap;
  880                 }
  881         }
  882 
  883         /* Create LRO for the ring */
  884         if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
  885                 int err = tcp_lro_init(&rx_ring->lro);
  886                 if (err != 0) {
  887                         ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n",
  888                             qid);
  889                 } else {
  890                         ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n",
  891                             qid);
  892                         rx_ring->lro.ifp = adapter->ifp;
  893                 }
  894         }
  895 
  896         return (0);
  897 
  898 err_buf_info_unmap:
  899         while (i--) {
  900                 bus_dmamap_destroy(adapter->rx_buf_tag,
  901                     rx_ring->rx_buffer_info[i].map);
  902         }
  903 
  904         free(rx_ring->free_rx_ids, M_DEVBUF);
  905         rx_ring->free_rx_ids = NULL;
  906         free(rx_ring->rx_buffer_info, M_DEVBUF);
  907         rx_ring->rx_buffer_info = NULL;
  908         return (ENOMEM);
  909 }
  910 
  911 /**
  912  * ena_free_rx_resources - Free Rx Resources
  913  * @adapter: network interface device structure
  914  * @qid: queue index
  915  *
  916  * Free all receive software resources
  917  **/
  918 static void
  919 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
  920 {
  921         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
  922 
  923         /* Free buffer DMA maps, */
  924         for (int i = 0; i < rx_ring->ring_size; i++) {
  925                 bus_dmamap_sync(adapter->rx_buf_tag,
  926                     rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
  927                 m_freem(rx_ring->rx_buffer_info[i].mbuf);
  928                 rx_ring->rx_buffer_info[i].mbuf = NULL;
  929                 bus_dmamap_unload(adapter->rx_buf_tag,
  930                     rx_ring->rx_buffer_info[i].map);
  931                 bus_dmamap_destroy(adapter->rx_buf_tag,
  932                     rx_ring->rx_buffer_info[i].map);
  933         }
  934 
  935         /* free LRO resources, */
  936         tcp_lro_free(&rx_ring->lro);
  937 
  938         /* free allocated memory */
  939         free(rx_ring->rx_buffer_info, M_DEVBUF);
  940         rx_ring->rx_buffer_info = NULL;
  941 
  942         free(rx_ring->free_rx_ids, M_DEVBUF);
  943         rx_ring->free_rx_ids = NULL;
  944 }
  945 
  946 /**
  947  * ena_setup_all_rx_resources - allocate all queues Rx resources
  948  * @adapter: network interface device structure
  949  *
  950  * Returns 0 on success, otherwise on failure.
  951  **/
  952 static int
  953 ena_setup_all_rx_resources(struct ena_adapter *adapter)
  954 {
  955         int i, rc = 0;
  956 
  957         for (i = 0; i < adapter->num_io_queues; i++) {
  958                 rc = ena_setup_rx_resources(adapter, i);
  959                 if (rc != 0) {
  960                         ena_log(adapter->pdev, ERR,
  961                             "Allocation for Rx Queue %u failed\n", i);
  962                         goto err_setup_rx;
  963                 }
  964         }
  965         return (0);
  966 
  967 err_setup_rx:
  968         /* rewind the index freeing the rings as we go */
  969         while (i--)
  970                 ena_free_rx_resources(adapter, i);
  971         return (rc);
  972 }
  973 
  974 /**
  975  * ena_free_all_rx_resources - Free Rx resources for all queues
  976  * @adapter: network interface device structure
  977  *
  978  * Free all receive software resources
  979  **/
  980 static void
  981 ena_free_all_rx_resources(struct ena_adapter *adapter)
  982 {
  983         int i;
  984 
  985         for (i = 0; i < adapter->num_io_queues; i++)
  986                 ena_free_rx_resources(adapter, i);
  987 }
  988 
  989 static inline int
  990 ena_alloc_rx_mbuf(struct ena_adapter *adapter,
  991     struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
  992 {
  993         device_t pdev = adapter->pdev;
  994         struct ena_com_buf *ena_buf;
  995         bus_dma_segment_t segs[1];
  996         int nsegs, error;
  997         int mlen;
  998 
  999         /* if previous allocated frag is not used */
 1000         if (unlikely(rx_info->mbuf != NULL))
 1001                 return (0);
 1002 
 1003         /* Get mbuf using UMA allocator */
 1004         rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
 1005             rx_ring->rx_mbuf_sz);
 1006 
 1007         if (unlikely(rx_info->mbuf == NULL)) {
 1008                 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
 1009                 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1010                 if (unlikely(rx_info->mbuf == NULL)) {
 1011                         counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
 1012                         return (ENOMEM);
 1013                 }
 1014                 mlen = MCLBYTES;
 1015         } else {
 1016                 mlen = rx_ring->rx_mbuf_sz;
 1017         }
 1018         /* Set mbuf length*/
 1019         rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
 1020 
 1021         /* Map packets for DMA */
 1022         ena_log(pdev, DBG, "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
 1023             adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
 1024         error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
 1025             rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
 1026         if (unlikely((error != 0) || (nsegs != 1))) {
 1027                 ena_log(pdev, WARN,
 1028                     "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs);
 1029                 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
 1030                 goto exit;
 1031 
 1032         }
 1033 
 1034         bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
 1035 
 1036         ena_buf = &rx_info->ena_buf;
 1037         ena_buf->paddr = segs[0].ds_addr;
 1038         ena_buf->len = mlen;
 1039 
 1040         ena_log(pdev, DBG, "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
 1041             rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
 1042 
 1043         return (0);
 1044 
 1045 exit:
 1046         m_freem(rx_info->mbuf);
 1047         rx_info->mbuf = NULL;
 1048         return (EFAULT);
 1049 }
 1050 
 1051 static void
 1052 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
 1053     struct ena_rx_buffer *rx_info)
 1054 {
 1055 
 1056         if (rx_info->mbuf == NULL) {
 1057                 ena_log(adapter->pdev, WARN,
 1058                     "Trying to free unallocated buffer\n");
 1059                 return;
 1060         }
 1061 
 1062         bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
 1063             BUS_DMASYNC_POSTREAD);
 1064         bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
 1065         m_freem(rx_info->mbuf);
 1066         rx_info->mbuf = NULL;
 1067 }
 1068 
 1069 /**
 1070  * ena_refill_rx_bufs - Refills ring with descriptors
 1071  * @rx_ring: the ring which we want to feed with free descriptors
 1072  * @num: number of descriptors to refill
 1073  * Refills the ring with newly allocated DMA-mapped mbufs for receiving
 1074  **/
 1075 int
 1076 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
 1077 {
 1078         struct ena_adapter *adapter = rx_ring->adapter;
 1079         device_t pdev = adapter->pdev;
 1080         uint16_t next_to_use, req_id;
 1081         uint32_t i;
 1082         int rc;
 1083 
 1084         ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
 1085 
 1086         next_to_use = rx_ring->next_to_use;
 1087 
 1088         for (i = 0; i < num; i++) {
 1089                 struct ena_rx_buffer *rx_info;
 1090 
 1091                 ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n",
 1092                     next_to_use);
 1093 
 1094                 req_id = rx_ring->free_rx_ids[next_to_use];
 1095                 rx_info = &rx_ring->rx_buffer_info[req_id];
 1096 #ifdef DEV_NETMAP
 1097                 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
 1098                         rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info);
 1099                 else
 1100 #endif /* DEV_NETMAP */
 1101                         rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
 1102                 if (unlikely(rc != 0)) {
 1103                         ena_log_io(pdev, WARN,
 1104                             "failed to alloc buffer for rx queue %d\n",
 1105                             rx_ring->qid);
 1106                         break;
 1107                 }
 1108                 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
 1109                     &rx_info->ena_buf, req_id);
 1110                 if (unlikely(rc != 0)) {
 1111                         ena_log_io(pdev, WARN,
 1112                             "failed to add buffer for rx queue %d\n",
 1113                             rx_ring->qid);
 1114                         break;
 1115                 }
 1116                 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
 1117                     rx_ring->ring_size);
 1118         }
 1119 
 1120         if (unlikely(i < num)) {
 1121                 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
 1122                 ena_log_io(pdev, WARN,
 1123                      "refilled rx qid %d with only %d mbufs (from %d)\n",
 1124                      rx_ring->qid, i, num);
 1125         }
 1126 
 1127         if (likely(i != 0))
 1128                 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
 1129 
 1130         rx_ring->next_to_use = next_to_use;
 1131         return (i);
 1132 }
 1133 
 1134 int
 1135 ena_update_buf_ring_size(struct ena_adapter *adapter,
 1136     uint32_t new_buf_ring_size)
 1137 {
 1138         uint32_t old_buf_ring_size;
 1139         int rc = 0;
 1140         bool dev_was_up;
 1141 
 1142         old_buf_ring_size = adapter->buf_ring_size;
 1143         adapter->buf_ring_size = new_buf_ring_size;
 1144 
 1145         dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
 1146         ena_down(adapter);
 1147 
 1148         /* Reconfigure buf ring for all Tx rings. */
 1149         ena_free_all_io_rings_resources(adapter);
 1150         ena_init_io_rings_advanced(adapter);
 1151         if (dev_was_up) {
 1152                 /*
 1153                  * If ena_up() fails, it's not because of recent buf_ring size
 1154                  * changes. Because of that, we just want to revert old drbr
 1155                  * value and trigger the reset because something else had to
 1156                  * go wrong.
 1157                  */
 1158                 rc = ena_up(adapter);
 1159                 if (unlikely(rc != 0)) {
 1160                         ena_log(adapter->pdev, ERR,
 1161                             "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
 1162                             new_buf_ring_size, old_buf_ring_size);
 1163 
 1164                         /* Revert old size and trigger the reset */
 1165                         adapter->buf_ring_size = old_buf_ring_size;
 1166                         ena_free_all_io_rings_resources(adapter);
 1167                         ena_init_io_rings_advanced(adapter);
 1168 
 1169                         ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
 1170                             adapter);
 1171                         ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
 1172 
 1173                 }
 1174         }
 1175 
 1176         return (rc);
 1177 }
 1178 
 1179 int
 1180 ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
 1181     uint32_t new_rx_size)
 1182 {
 1183         uint32_t old_tx_size, old_rx_size;
 1184         int rc = 0;
 1185         bool dev_was_up;
 1186 
 1187         old_tx_size = adapter->requested_tx_ring_size;
 1188         old_rx_size = adapter->requested_rx_ring_size;
 1189         adapter->requested_tx_ring_size = new_tx_size;
 1190         adapter->requested_rx_ring_size = new_rx_size;
 1191 
 1192         dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
 1193         ena_down(adapter);
 1194 
 1195         /* Configure queues with new size. */
 1196         ena_init_io_rings_basic(adapter);
 1197         if (dev_was_up) {
 1198                 rc = ena_up(adapter);
 1199                 if (unlikely(rc != 0)) {
 1200                         ena_log(adapter->pdev, ERR,
 1201                             "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
 1202                             new_tx_size, new_rx_size, old_tx_size, old_rx_size);
 1203 
 1204                         /* Revert old size. */
 1205                         adapter->requested_tx_ring_size = old_tx_size;
 1206                         adapter->requested_rx_ring_size = old_rx_size;
 1207                         ena_init_io_rings_basic(adapter);
 1208 
 1209                         /* And try again. */
 1210                         rc = ena_up(adapter);
 1211                         if (unlikely(rc != 0)) {
 1212                                 ena_log(adapter->pdev, ERR,
 1213                                     "Failed to revert old queue sizes. Triggering device reset.\n");
 1214                                 /*
 1215                                  * If we've failed again, something had to go
 1216                                  * wrong. After reset, the device should try to
 1217                                  * go up
 1218                                  */
 1219                                 ENA_FLAG_SET_ATOMIC(
 1220                                     ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
 1221                                 ena_trigger_reset(adapter,
 1222                                     ENA_REGS_RESET_OS_TRIGGER);
 1223                         }
 1224                 }
 1225         }
 1226 
 1227         return (rc);
 1228 }
 1229 
 1230 static void
 1231 ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
 1232 {
 1233         ena_free_all_io_rings_resources(adapter);
 1234         /* Force indirection table to be reinitialized */
 1235         ena_com_rss_destroy(adapter->ena_dev);
 1236 
 1237         adapter->num_io_queues = num;
 1238         ena_init_io_rings(adapter);
 1239 }
 1240 
 1241 /* Caller should sanitize new_num */
 1242 int
 1243 ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
 1244 {
 1245         uint32_t old_num;
 1246         int rc = 0;
 1247         bool dev_was_up;
 1248 
 1249         dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
 1250         old_num = adapter->num_io_queues;
 1251         ena_down(adapter);
 1252 
 1253         ena_update_io_rings(adapter, new_num);
 1254 
 1255         if (dev_was_up) {
 1256                 rc = ena_up(adapter);
 1257                 if (unlikely(rc != 0)) {
 1258                         ena_log(adapter->pdev, ERR,
 1259                             "Failed to configure device with %u IO queues. "
 1260                             "Reverting to previous value: %u\n",
 1261                             new_num, old_num);
 1262 
 1263                         ena_update_io_rings(adapter, old_num);
 1264 
 1265                         rc = ena_up(adapter);
 1266                         if (unlikely(rc != 0)) {
 1267                                 ena_log(adapter->pdev, ERR,
 1268                                     "Failed to revert to previous setup IO "
 1269                                     "queues. Triggering device reset.\n");
 1270                                 ENA_FLAG_SET_ATOMIC(
 1271                                     ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
 1272                                 ena_trigger_reset(adapter,
 1273                                     ENA_REGS_RESET_OS_TRIGGER);
 1274                         }
 1275                 }
 1276         }
 1277 
 1278         return (rc);
 1279 }
 1280 
 1281 static void
 1282 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
 1283 {
 1284         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
 1285         unsigned int i;
 1286 
 1287         for (i = 0; i < rx_ring->ring_size; i++) {
 1288                 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
 1289 
 1290                 if (rx_info->mbuf != NULL)
 1291                         ena_free_rx_mbuf(adapter, rx_ring, rx_info);
 1292 #ifdef DEV_NETMAP
 1293                 if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
 1294                     (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
 1295                         if (rx_info->netmap_buf_idx != 0)
 1296                                 ena_netmap_free_rx_slot(adapter, rx_ring,
 1297                                     rx_info);
 1298                 }
 1299 #endif /* DEV_NETMAP */
 1300         }
 1301 }
 1302 
 1303 /**
 1304  * ena_refill_all_rx_bufs - allocate all queues Rx buffers
 1305  * @adapter: network interface device structure
 1306  *
 1307  */
 1308 static void
 1309 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
 1310 {
 1311         struct ena_ring *rx_ring;
 1312         int i, rc, bufs_num;
 1313 
 1314         for (i = 0; i < adapter->num_io_queues; i++) {
 1315                 rx_ring = &adapter->rx_ring[i];
 1316                 bufs_num = rx_ring->ring_size - 1;
 1317                 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
 1318                 if (unlikely(rc != bufs_num))
 1319                         ena_log_io(adapter->pdev, WARN,
 1320                             "refilling Queue %d failed. "
 1321                             "Allocated %d buffers from: %d\n", i, rc, bufs_num);
 1322 #ifdef DEV_NETMAP
 1323                 rx_ring->initialized = true;
 1324 #endif /* DEV_NETMAP */
 1325         }
 1326 }
 1327 
 1328 static void
 1329 ena_free_all_rx_bufs(struct ena_adapter *adapter)
 1330 {
 1331         int i;
 1332 
 1333         for (i = 0; i < adapter->num_io_queues; i++)
 1334                 ena_free_rx_bufs(adapter, i);
 1335 }
 1336 
 1337 /**
 1338  * ena_free_tx_bufs - Free Tx Buffers per Queue
 1339  * @adapter: network interface device structure
 1340  * @qid: queue index
 1341  **/
 1342 static void
 1343 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
 1344 {
 1345         bool print_once = true;
 1346         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
 1347 
 1348         ENA_RING_MTX_LOCK(tx_ring);
 1349         for (int i = 0; i < tx_ring->ring_size; i++) {
 1350                 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
 1351 
 1352                 if (tx_info->mbuf == NULL)
 1353                         continue;
 1354 
 1355                 if (print_once) {
 1356                         ena_log(adapter->pdev, WARN,
 1357                             "free uncompleted tx mbuf qid %d idx 0x%x\n",
 1358                             qid, i);
 1359                         print_once = false;
 1360                 } else {
 1361                         ena_log(adapter->pdev, DBG,
 1362                             "free uncompleted tx mbuf qid %d idx 0x%x\n",
 1363                              qid, i);
 1364                 }
 1365 
 1366                 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
 1367                     BUS_DMASYNC_POSTWRITE);
 1368                 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
 1369 
 1370                 m_free(tx_info->mbuf);
 1371                 tx_info->mbuf = NULL;
 1372         }
 1373         ENA_RING_MTX_UNLOCK(tx_ring);
 1374 }
 1375 
 1376 static void
 1377 ena_free_all_tx_bufs(struct ena_adapter *adapter)
 1378 {
 1379 
 1380         for (int i = 0; i < adapter->num_io_queues; i++)
 1381                 ena_free_tx_bufs(adapter, i);
 1382 }
 1383 
 1384 static void
 1385 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
 1386 {
 1387         uint16_t ena_qid;
 1388         int i;
 1389 
 1390         for (i = 0; i < adapter->num_io_queues; i++) {
 1391                 ena_qid = ENA_IO_TXQ_IDX(i);
 1392                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
 1393         }
 1394 }
 1395 
 1396 static void
 1397 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
 1398 {
 1399         uint16_t ena_qid;
 1400         int i;
 1401 
 1402         for (i = 0; i < adapter->num_io_queues; i++) {
 1403                 ena_qid = ENA_IO_RXQ_IDX(i);
 1404                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
 1405         }
 1406 }
 1407 
 1408 static void
 1409 ena_destroy_all_io_queues(struct ena_adapter *adapter)
 1410 {
 1411         struct ena_que *queue;
 1412         int i;
 1413 
 1414         for (i = 0; i < adapter->num_io_queues; i++) {
 1415                 queue = &adapter->que[i];
 1416                 while (taskqueue_cancel(queue->cleanup_tq,
 1417                     &queue->cleanup_task, NULL))
 1418                         taskqueue_drain(queue->cleanup_tq,
 1419                             &queue->cleanup_task);
 1420                 taskqueue_free(queue->cleanup_tq);
 1421         }
 1422 
 1423         ena_destroy_all_tx_queues(adapter);
 1424         ena_destroy_all_rx_queues(adapter);
 1425 }
 1426 
 1427 static int
 1428 ena_create_io_queues(struct ena_adapter *adapter)
 1429 {
 1430         struct ena_com_dev *ena_dev = adapter->ena_dev;
 1431         struct ena_com_create_io_ctx ctx;
 1432         struct ena_ring *ring;
 1433         struct ena_que *queue;
 1434         uint16_t ena_qid;
 1435         uint32_t msix_vector;
 1436         cpuset_t *cpu_mask = NULL;
 1437         int rc, i;
 1438 
 1439         /* Create TX queues */
 1440         for (i = 0; i < adapter->num_io_queues; i++) {
 1441                 msix_vector = ENA_IO_IRQ_IDX(i);
 1442                 ena_qid = ENA_IO_TXQ_IDX(i);
 1443                 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
 1444                 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
 1445                 ctx.queue_size = adapter->requested_tx_ring_size;
 1446                 ctx.msix_vector = msix_vector;
 1447                 ctx.qid = ena_qid;
 1448                 rc = ena_com_create_io_queue(ena_dev, &ctx);
 1449                 if (rc != 0) {
 1450                         ena_log(adapter->pdev, ERR,
 1451                             "Failed to create io TX queue #%d rc: %d\n", i, rc);
 1452                         goto err_tx;
 1453                 }
 1454                 ring = &adapter->tx_ring[i];
 1455                 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
 1456                     &ring->ena_com_io_sq,
 1457                     &ring->ena_com_io_cq);
 1458                 if (rc != 0) {
 1459                         ena_log(adapter->pdev, ERR,
 1460                             "Failed to get TX queue handlers. TX queue num"
 1461                             " %d rc: %d\n", i, rc);
 1462                         ena_com_destroy_io_queue(ena_dev, ena_qid);
 1463                         goto err_tx;
 1464                 }
 1465         }
 1466 
 1467         /* Create RX queues */
 1468         for (i = 0; i < adapter->num_io_queues; i++) {
 1469                 msix_vector = ENA_IO_IRQ_IDX(i);
 1470                 ena_qid = ENA_IO_RXQ_IDX(i);
 1471                 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
 1472                 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
 1473                 ctx.queue_size = adapter->requested_rx_ring_size;
 1474                 ctx.msix_vector = msix_vector;
 1475                 ctx.qid = ena_qid;
 1476                 rc = ena_com_create_io_queue(ena_dev, &ctx);
 1477                 if (unlikely(rc != 0)) {
 1478                         ena_log(adapter->pdev, ERR,
 1479                             "Failed to create io RX queue[%d] rc: %d\n", i, rc);
 1480                         goto err_rx;
 1481                 }
 1482 
 1483                 ring = &adapter->rx_ring[i];
 1484                 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
 1485                     &ring->ena_com_io_sq,
 1486                     &ring->ena_com_io_cq);
 1487                 if (unlikely(rc != 0)) {
 1488                         ena_log(adapter->pdev, ERR,
 1489                             "Failed to get RX queue handlers. RX queue num"
 1490                             " %d rc: %d\n", i, rc);
 1491                         ena_com_destroy_io_queue(ena_dev, ena_qid);
 1492                         goto err_rx;
 1493                 }
 1494         }
 1495 
 1496         for (i = 0; i < adapter->num_io_queues; i++) {
 1497                 queue = &adapter->que[i];
 1498 
 1499                 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
 1500                 queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
 1501                     M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
 1502 
 1503 #ifdef RSS
 1504                 cpu_mask = &queue->cpu_mask;
 1505 #endif
 1506                 taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
 1507                     cpu_mask,
 1508                     "%s queue %d cleanup",
 1509                     device_get_nameunit(adapter->pdev), i);
 1510         }
 1511 
 1512         return (0);
 1513 
 1514 err_rx:
 1515         while (i--)
 1516                 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
 1517         i = adapter->num_io_queues;
 1518 err_tx:
 1519         while (i--)
 1520                 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
 1521 
 1522         return (ENXIO);
 1523 }
 1524 
 1525 /*********************************************************************
 1526  *
 1527  *  MSIX & Interrupt Service routine
 1528  *
 1529  **********************************************************************/
 1530 
 1531 /**
 1532  * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
 1533  * @arg: interrupt number
 1534  **/
 1535 static void
 1536 ena_intr_msix_mgmnt(void *arg)
 1537 {
 1538         struct ena_adapter *adapter = (struct ena_adapter *)arg;
 1539 
 1540         ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
 1541         if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
 1542                 ena_com_aenq_intr_handler(adapter->ena_dev, arg);
 1543 }
 1544 
 1545 /**
 1546  * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
 1547  * @arg: queue
 1548  **/
 1549 static int
 1550 ena_handle_msix(void *arg)
 1551 {
 1552         struct ena_que *queue = arg;
 1553         struct ena_adapter *adapter = queue->adapter;
 1554         if_t ifp = adapter->ifp;
 1555 
 1556         if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
 1557                 return (FILTER_STRAY);
 1558 
 1559         taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
 1560 
 1561         return (FILTER_HANDLED);
 1562 }
 1563 
 1564 static int
 1565 ena_enable_msix(struct ena_adapter *adapter)
 1566 {
 1567         device_t dev = adapter->pdev;
 1568         int msix_vecs, msix_req;
 1569         int i, rc = 0;
 1570 
 1571         if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
 1572                 ena_log(dev, ERR, "Error, MSI-X is already enabled\n");
 1573                 return (EINVAL);
 1574         }
 1575 
 1576         /* Reserved the max msix vectors we might need */
 1577         msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
 1578 
 1579         adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
 1580             M_DEVBUF, M_WAITOK | M_ZERO);
 1581 
 1582         ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n",
 1583             msix_vecs);
 1584 
 1585         for (i = 0; i < msix_vecs; i++) {
 1586                 adapter->msix_entries[i].entry = i;
 1587                 /* Vectors must start from 1 */
 1588                 adapter->msix_entries[i].vector = i + 1;
 1589         }
 1590 
 1591         msix_req = msix_vecs;
 1592         rc = pci_alloc_msix(dev, &msix_vecs);
 1593         if (unlikely(rc != 0)) {
 1594                 ena_log(dev, ERR,
 1595                     "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
 1596 
 1597                 rc = ENOSPC;
 1598                 goto err_msix_free;
 1599         }
 1600 
 1601         if (msix_vecs != msix_req) {
 1602                 if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
 1603                         ena_log(dev, ERR,
 1604                             "Not enough number of MSI-x allocated: %d\n",
 1605                             msix_vecs);
 1606                         pci_release_msi(dev);
 1607                         rc = ENOSPC;
 1608                         goto err_msix_free;
 1609                 }
 1610                 ena_log(dev, ERR, "Enable only %d MSI-x (out of %d), reduce "
 1611                     "the number of queues\n", msix_vecs, msix_req);
 1612         }
 1613 
 1614         adapter->msix_vecs = msix_vecs;
 1615         ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
 1616 
 1617         return (0);
 1618 
 1619 err_msix_free:
 1620         free(adapter->msix_entries, M_DEVBUF);
 1621         adapter->msix_entries = NULL;
 1622 
 1623         return (rc);
 1624 }
 1625 
 1626 static void
 1627 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
 1628 {
 1629 
 1630         snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
 1631             ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
 1632             device_get_nameunit(adapter->pdev));
 1633         /*
 1634          * Handler is NULL on purpose, it will be set
 1635          * when mgmnt interrupt is acquired
 1636          */
 1637         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
 1638         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
 1639         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
 1640             adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
 1641 }
 1642 
 1643 static int
 1644 ena_setup_io_intr(struct ena_adapter *adapter)
 1645 {
 1646 #ifdef RSS
 1647         int num_buckets = rss_getnumbuckets();
 1648         static int last_bind = 0;
 1649 #endif
 1650         int irq_idx;
 1651 
 1652         if (adapter->msix_entries == NULL)
 1653                 return (EINVAL);
 1654 
 1655         for (int i = 0; i < adapter->num_io_queues; i++) {
 1656                 irq_idx = ENA_IO_IRQ_IDX(i);
 1657 
 1658                 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
 1659                     "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
 1660                 adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
 1661                 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
 1662                 adapter->irq_tbl[irq_idx].vector =
 1663                     adapter->msix_entries[irq_idx].vector;
 1664                 ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n",
 1665                     adapter->msix_entries[irq_idx].vector);
 1666 
 1667 #ifdef RSS
 1668                 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
 1669                     rss_getcpu(last_bind);
 1670                 last_bind = (last_bind + 1) % num_buckets;
 1671                 CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
 1672 #endif
 1673         }
 1674 
 1675         return (0);
 1676 }
 1677 
 1678 static int
 1679 ena_request_mgmnt_irq(struct ena_adapter *adapter)
 1680 {
 1681         device_t pdev = adapter->pdev;
 1682         struct ena_irq *irq;
 1683         unsigned long flags;
 1684         int rc, rcc;
 1685 
 1686         flags = RF_ACTIVE | RF_SHAREABLE;
 1687 
 1688         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
 1689         irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
 1690             &irq->vector, flags);
 1691 
 1692         if (unlikely(irq->res == NULL)) {
 1693                 ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
 1694                     irq->vector);
 1695                 return (ENXIO);
 1696         }
 1697 
 1698         rc = bus_setup_intr(adapter->pdev, irq->res,
 1699             INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
 1700             irq->data, &irq->cookie);
 1701         if (unlikely(rc != 0)) {
 1702                 ena_log(pdev, ERR, "failed to register "
 1703                     "interrupt handler for irq %ju: %d\n",
 1704                     rman_get_start(irq->res), rc);
 1705                 goto err_res_free;
 1706         }
 1707         irq->requested = true;
 1708 
 1709         return (rc);
 1710 
 1711 err_res_free:
 1712         ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector);
 1713         rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
 1714             irq->vector, irq->res);
 1715         if (unlikely(rcc != 0))
 1716                 ena_log(pdev, ERR, "dev has no parent while "
 1717                     "releasing res for irq: %d\n", irq->vector);
 1718         irq->res = NULL;
 1719 
 1720         return (rc);
 1721 }
 1722 
 1723 static int
 1724 ena_request_io_irq(struct ena_adapter *adapter)
 1725 {
 1726         device_t pdev = adapter->pdev;
 1727         struct ena_irq *irq;
 1728         unsigned long flags = 0;
 1729         int rc = 0, i, rcc;
 1730 
 1731         if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
 1732                 ena_log(pdev, ERR,
 1733                     "failed to request I/O IRQ: MSI-X is not enabled\n");
 1734                 return (EINVAL);
 1735         } else {
 1736                 flags = RF_ACTIVE | RF_SHAREABLE;
 1737         }
 1738 
 1739         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
 1740                 irq = &adapter->irq_tbl[i];
 1741 
 1742                 if (unlikely(irq->requested))
 1743                         continue;
 1744 
 1745                 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
 1746                     &irq->vector, flags);
 1747                 if (unlikely(irq->res == NULL)) {
 1748                         rc = ENOMEM;
 1749                         ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
 1750                             irq->vector);
 1751                         goto err;
 1752                 }
 1753 
 1754                 rc = bus_setup_intr(adapter->pdev, irq->res,
 1755                     INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL,
 1756                     irq->data, &irq->cookie);
 1757                  if (unlikely(rc != 0)) {
 1758                         ena_log(pdev, ERR, "failed to register "
 1759                             "interrupt handler for irq %ju: %d\n",
 1760                             rman_get_start(irq->res), rc);
 1761                         goto err;
 1762                 }
 1763                 irq->requested = true;
 1764 
 1765 #ifdef RSS
 1766                 rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu);
 1767                 if (unlikely(rc != 0)) {
 1768                         ena_log(pdev, ERR, "failed to bind "
 1769                             "interrupt handler for irq %ju to cpu %d: %d\n",
 1770                             rman_get_start(irq->res), irq->cpu, rc);
 1771                         goto err;
 1772                 }
 1773 
 1774                 ena_log(pdev, INFO, "queue %d - cpu %d\n",
 1775                     i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
 1776 #endif
 1777         }
 1778 
 1779         return (rc);
 1780 
 1781 err:
 1782 
 1783         for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
 1784                 irq = &adapter->irq_tbl[i];
 1785                 rcc = 0;
 1786 
 1787                 /* Once we entered err: section and irq->requested is true we
 1788                    free both intr and resources */
 1789                 if (irq->requested)
 1790                         rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
 1791                 if (unlikely(rcc != 0))
 1792                         ena_log(pdev, ERR, "could not release irq: %d, error: %d\n",
 1793                             irq->vector, rcc);
 1794 
 1795                 /* If we entred err: section without irq->requested set we know
 1796                    it was bus_alloc_resource_any() that needs cleanup, provided
 1797                    res is not NULL. In case res is NULL no work in needed in
 1798                    this iteration */
 1799                 rcc = 0;
 1800                 if (irq->res != NULL) {
 1801                         rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
 1802                             irq->vector, irq->res);
 1803                 }
 1804                 if (unlikely(rcc != 0))
 1805                         ena_log(pdev, ERR, "dev has no parent while "
 1806                             "releasing res for irq: %d\n", irq->vector);
 1807                 irq->requested = false;
 1808                 irq->res = NULL;
 1809         }
 1810 
 1811         return (rc);
 1812 }
 1813 
 1814 static void
 1815 ena_free_mgmnt_irq(struct ena_adapter *adapter)
 1816 {
 1817         device_t pdev = adapter->pdev;
 1818         struct ena_irq *irq;
 1819         int rc;
 1820 
 1821         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
 1822         if (irq->requested) {
 1823                 ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
 1824                 rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
 1825                 if (unlikely(rc != 0))
 1826                         ena_log(pdev, ERR, "failed to tear down irq: %d\n",
 1827                             irq->vector);
 1828                 irq->requested = 0;
 1829         }
 1830 
 1831         if (irq->res != NULL) {
 1832                 ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector);
 1833                 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
 1834                     irq->vector, irq->res);
 1835                 irq->res = NULL;
 1836                 if (unlikely(rc != 0))
 1837                         ena_log(pdev, ERR, "dev has no parent while "
 1838                             "releasing res for irq: %d\n", irq->vector);
 1839         }
 1840 }
 1841 
 1842 static void
 1843 ena_free_io_irq(struct ena_adapter *adapter)
 1844 {
 1845         device_t pdev = adapter->pdev;
 1846         struct ena_irq *irq;
 1847         int rc;
 1848 
 1849         for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
 1850                 irq = &adapter->irq_tbl[i];
 1851                 if (irq->requested) {
 1852                         ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
 1853                         rc = bus_teardown_intr(adapter->pdev, irq->res,
 1854                             irq->cookie);
 1855                         if (unlikely(rc != 0)) {
 1856                                 ena_log(pdev, ERR, "failed to tear down irq: %d\n",
 1857                                     irq->vector);
 1858                         }
 1859                         irq->requested = 0;
 1860                 }
 1861 
 1862                 if (irq->res != NULL) {
 1863                         ena_log(pdev, DBG, "release resource irq: %d\n",
 1864                             irq->vector);
 1865                         rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
 1866                             irq->vector, irq->res);
 1867                         irq->res = NULL;
 1868                         if (unlikely(rc != 0)) {
 1869                                 ena_log(pdev, ERR, "dev has no parent"
 1870                                     " while releasing res for irq: %d\n",
 1871                                     irq->vector);
 1872                         }
 1873                 }
 1874         }
 1875 }
 1876 
 1877 static void
 1878 ena_free_irqs(struct ena_adapter* adapter)
 1879 {
 1880 
 1881         ena_free_io_irq(adapter);
 1882         ena_free_mgmnt_irq(adapter);
 1883         ena_disable_msix(adapter);
 1884 }
 1885 
 1886 static void
 1887 ena_disable_msix(struct ena_adapter *adapter)
 1888 {
 1889 
 1890         if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
 1891                 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
 1892                 pci_release_msi(adapter->pdev);
 1893         }
 1894 
 1895         adapter->msix_vecs = 0;
 1896         free(adapter->msix_entries, M_DEVBUF);
 1897         adapter->msix_entries = NULL;
 1898 }
 1899 
 1900 static void
 1901 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
 1902 {
 1903         struct ena_com_io_cq* io_cq;
 1904         struct ena_eth_io_intr_reg intr_reg;
 1905         struct ena_ring *tx_ring;
 1906         uint16_t ena_qid;
 1907         int i;
 1908 
 1909         /* Unmask interrupts for all queues */
 1910         for (i = 0; i < adapter->num_io_queues; i++) {
 1911                 ena_qid = ENA_IO_TXQ_IDX(i);
 1912                 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
 1913                 ena_com_update_intr_reg(&intr_reg, 0, 0, true);
 1914                 tx_ring = &adapter->tx_ring[i];
 1915                 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
 1916                 ena_com_unmask_intr(io_cq, &intr_reg);
 1917         }
 1918 }
 1919 
 1920 static int
 1921 ena_up_complete(struct ena_adapter *adapter)
 1922 {
 1923         int rc;
 1924 
 1925         if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
 1926                 rc = ena_rss_configure(adapter);
 1927                 if (rc != 0) {
 1928                         ena_log(adapter->pdev, ERR,
 1929                             "Failed to configure RSS\n");
 1930                         return (rc);
 1931                 }
 1932         }
 1933 
 1934         rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
 1935         if (unlikely(rc != 0))
 1936                 return (rc);
 1937 
 1938         ena_refill_all_rx_bufs(adapter);
 1939         ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
 1940             sizeof(adapter->hw_stats));
 1941 
 1942         return (0);
 1943 }
 1944 
 1945 static void
 1946 set_io_rings_size(struct ena_adapter *adapter, int new_tx_size,
 1947     int new_rx_size)
 1948 {
 1949         int i;
 1950 
 1951         for (i = 0; i < adapter->num_io_queues; i++) {
 1952                 adapter->tx_ring[i].ring_size = new_tx_size;
 1953                 adapter->rx_ring[i].ring_size = new_rx_size;
 1954         }
 1955 }
 1956 
 1957 static int
 1958 create_queues_with_size_backoff(struct ena_adapter *adapter)
 1959 {
 1960         device_t pdev = adapter->pdev;
 1961         int rc;
 1962         uint32_t cur_rx_ring_size, cur_tx_ring_size;
 1963         uint32_t new_rx_ring_size, new_tx_ring_size;
 1964 
 1965         /*
 1966          * Current queue sizes might be set to smaller than the requested
 1967          * ones due to past queue allocation failures.
 1968          */
 1969         set_io_rings_size(adapter, adapter->requested_tx_ring_size,
 1970             adapter->requested_rx_ring_size);
 1971 
 1972         while (1) {
 1973                 /* Allocate transmit descriptors */
 1974                 rc = ena_setup_all_tx_resources(adapter);
 1975                 if (unlikely(rc != 0)) {
 1976                         ena_log(pdev, ERR, "err_setup_tx\n");
 1977                         goto err_setup_tx;
 1978                 }
 1979 
 1980                 /* Allocate receive descriptors */
 1981                 rc = ena_setup_all_rx_resources(adapter);
 1982                 if (unlikely(rc != 0)) {
 1983                         ena_log(pdev, ERR, "err_setup_rx\n");
 1984                         goto err_setup_rx;
 1985                 }
 1986 
 1987                 /* Create IO queues for Rx & Tx */
 1988                 rc = ena_create_io_queues(adapter);
 1989                 if (unlikely(rc != 0)) {
 1990                         ena_log(pdev, ERR,
 1991                             "create IO queues failed\n");
 1992                         goto err_io_que;
 1993                 }
 1994 
 1995                 return (0);
 1996 
 1997 err_io_que:
 1998                 ena_free_all_rx_resources(adapter);
 1999 err_setup_rx:
 2000                 ena_free_all_tx_resources(adapter);
 2001 err_setup_tx:
 2002                 /*
 2003                  * Lower the ring size if ENOMEM. Otherwise, return the
 2004                  * error straightaway.
 2005                  */
 2006                 if (unlikely(rc != ENOMEM)) {
 2007                         ena_log(pdev, ERR,
 2008                             "Queue creation failed with error code: %d\n", rc);
 2009                         return (rc);
 2010                 }
 2011 
 2012                 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
 2013                 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
 2014 
 2015                 ena_log(pdev, ERR,
 2016                     "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
 2017                     cur_tx_ring_size, cur_rx_ring_size);
 2018 
 2019                 new_tx_ring_size = cur_tx_ring_size;
 2020                 new_rx_ring_size = cur_rx_ring_size;
 2021 
 2022                 /*
 2023                  * Decrease the size of a larger queue, or decrease both if they are
 2024                  * the same size.
 2025                  */
 2026                 if (cur_rx_ring_size <= cur_tx_ring_size)
 2027                         new_tx_ring_size = cur_tx_ring_size / 2;
 2028                 if (cur_rx_ring_size >= cur_tx_ring_size)
 2029                         new_rx_ring_size = cur_rx_ring_size / 2;
 2030 
 2031                 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
 2032                     new_rx_ring_size < ENA_MIN_RING_SIZE) {
 2033                         ena_log(pdev, ERR,
 2034                             "Queue creation failed with the smallest possible queue size"
 2035                             "of %d for both queues. Not retrying with smaller queues\n",
 2036                             ENA_MIN_RING_SIZE);
 2037                         return (rc);
 2038                 }
 2039 
 2040                 ena_log(pdev, INFO,
 2041                     "Retrying queue creation with sizes TX=%d, RX=%d\n",
 2042                     new_tx_ring_size, new_rx_ring_size);
 2043 
 2044                 set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
 2045         }
 2046 }
 2047 
 2048 int
 2049 ena_up(struct ena_adapter *adapter)
 2050 {
 2051         int rc = 0;
 2052 
 2053         ENA_LOCK_ASSERT();
 2054 
 2055         if (unlikely(device_is_attached(adapter->pdev) == 0)) {
 2056                 ena_log(adapter->pdev, ERR, "device is not attached!\n");
 2057                 return (ENXIO);
 2058         }
 2059 
 2060         if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
 2061                 return (0);
 2062 
 2063         ena_log(adapter->pdev, INFO, "device is going UP\n");
 2064 
 2065         /* setup interrupts for IO queues */
 2066         rc = ena_setup_io_intr(adapter);
 2067         if (unlikely(rc != 0)) {
 2068                 ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n");
 2069                 goto error;
 2070         }
 2071         rc = ena_request_io_irq(adapter);
 2072         if (unlikely(rc != 0)) {
 2073                 ena_log(adapter->pdev, ERR, "err_req_irq\n");
 2074                 goto error;
 2075         }
 2076 
 2077         ena_log(adapter->pdev, INFO,
 2078             "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, "
 2079             "LLQ is %s\n",
 2080             adapter->num_io_queues,
 2081             adapter->requested_rx_ring_size,
 2082             adapter->requested_tx_ring_size,
 2083             (adapter->ena_dev->tx_mem_queue_type ==
 2084                 ENA_ADMIN_PLACEMENT_POLICY_DEV) ?  "ENABLED" : "DISABLED");
 2085 
 2086         rc = create_queues_with_size_backoff(adapter);
 2087         if (unlikely(rc != 0)) {
 2088                 ena_log(adapter->pdev, ERR,
 2089                     "error creating queues with size backoff\n");
 2090                 goto err_create_queues_with_backoff;
 2091         }
 2092 
 2093         if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
 2094                 if_link_state_change(adapter->ifp, LINK_STATE_UP);
 2095 
 2096         rc = ena_up_complete(adapter);
 2097         if (unlikely(rc != 0))
 2098                 goto err_up_complete;
 2099 
 2100         counter_u64_add(adapter->dev_stats.interface_up, 1);
 2101 
 2102         ena_update_hwassist(adapter);
 2103 
 2104         if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
 2105                 IFF_DRV_OACTIVE);
 2106 
 2107         /* Activate timer service only if the device is running.
 2108                 * If this flag is not set, it means that the driver is being
 2109                 * reset and timer service will be activated afterwards.
 2110                 */
 2111         if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) {
 2112                 callout_reset_sbt(&adapter->timer_service, SBT_1S,
 2113                         SBT_1S, ena_timer_service, (void *)adapter, 0);
 2114         }
 2115 
 2116         ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
 2117 
 2118         ena_unmask_all_io_irqs(adapter);
 2119 
 2120         return (0);
 2121 
 2122 err_up_complete:
 2123         ena_destroy_all_io_queues(adapter);
 2124         ena_free_all_rx_resources(adapter);
 2125         ena_free_all_tx_resources(adapter);
 2126 err_create_queues_with_backoff:
 2127         ena_free_io_irq(adapter);
 2128 error:
 2129         return (rc);
 2130 }
 2131 
 2132 static uint64_t
 2133 ena_get_counter(if_t ifp, ift_counter cnt)
 2134 {
 2135         struct ena_adapter *adapter;
 2136         struct ena_hw_stats *stats;
 2137 
 2138         adapter = if_getsoftc(ifp);
 2139         stats = &adapter->hw_stats;
 2140 
 2141         switch (cnt) {
 2142         case IFCOUNTER_IPACKETS:
 2143                 return (counter_u64_fetch(stats->rx_packets));
 2144         case IFCOUNTER_OPACKETS:
 2145                 return (counter_u64_fetch(stats->tx_packets));
 2146         case IFCOUNTER_IBYTES:
 2147                 return (counter_u64_fetch(stats->rx_bytes));
 2148         case IFCOUNTER_OBYTES:
 2149                 return (counter_u64_fetch(stats->tx_bytes));
 2150         case IFCOUNTER_IQDROPS:
 2151                 return (counter_u64_fetch(stats->rx_drops));
 2152         case IFCOUNTER_OQDROPS:
 2153                 return (counter_u64_fetch(stats->tx_drops));
 2154         default:
 2155                 return (if_get_counter_default(ifp, cnt));
 2156         }
 2157 }
 2158 
 2159 static int
 2160 ena_media_change(if_t ifp)
 2161 {
 2162         /* Media Change is not supported by firmware */
 2163         return (0);
 2164 }
 2165 
 2166 static void
 2167 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
 2168 {
 2169         struct ena_adapter *adapter = if_getsoftc(ifp);
 2170         ena_log(adapter->pdev, DBG, "Media status update\n");
 2171 
 2172         ENA_LOCK_LOCK();
 2173 
 2174         ifmr->ifm_status = IFM_AVALID;
 2175         ifmr->ifm_active = IFM_ETHER;
 2176 
 2177         if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
 2178                 ENA_LOCK_UNLOCK();
 2179                 ena_log(adapter->pdev, INFO, "Link is down\n");
 2180                 return;
 2181         }
 2182 
 2183         ifmr->ifm_status |= IFM_ACTIVE;
 2184         ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
 2185 
 2186         ENA_LOCK_UNLOCK();
 2187 }
 2188 
 2189 static void
 2190 ena_init(void *arg)
 2191 {
 2192         struct ena_adapter *adapter = (struct ena_adapter *)arg;
 2193 
 2194         if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
 2195                 ENA_LOCK_LOCK();
 2196                 ena_up(adapter);
 2197                 ENA_LOCK_UNLOCK();
 2198         }
 2199 }
 2200 
 2201 static int
 2202 ena_ioctl(if_t ifp, u_long command, caddr_t data)
 2203 {
 2204         struct ena_adapter *adapter;
 2205         struct ifreq *ifr;
 2206         int rc;
 2207 
 2208         adapter = ifp->if_softc;
 2209         ifr = (struct ifreq *)data;
 2210 
 2211         /*
 2212          * Acquiring lock to prevent from running up and down routines parallel.
 2213          */
 2214         rc = 0;
 2215         switch (command) {
 2216         case SIOCSIFMTU:
 2217                 if (ifp->if_mtu == ifr->ifr_mtu)
 2218                         break;
 2219                 ENA_LOCK_LOCK();
 2220                 ena_down(adapter);
 2221 
 2222                 ena_change_mtu(ifp, ifr->ifr_mtu);
 2223 
 2224                 rc = ena_up(adapter);
 2225                 ENA_LOCK_UNLOCK();
 2226                 break;
 2227 
 2228         case SIOCSIFFLAGS:
 2229                 if ((ifp->if_flags & IFF_UP) != 0) {
 2230                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 2231                                 if ((ifp->if_flags & (IFF_PROMISC |
 2232                                     IFF_ALLMULTI)) != 0) {
 2233                                         ena_log(adapter->pdev, INFO,
 2234                                             "ioctl promisc/allmulti\n");
 2235                                 }
 2236                         } else {
 2237                                 ENA_LOCK_LOCK();
 2238                                 rc = ena_up(adapter);
 2239                                 ENA_LOCK_UNLOCK();
 2240                         }
 2241                 } else {
 2242                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 2243                                 ENA_LOCK_LOCK();
 2244                                 ena_down(adapter);
 2245                                 ENA_LOCK_UNLOCK();
 2246                         }
 2247                 }
 2248                 break;
 2249 
 2250         case SIOCADDMULTI:
 2251         case SIOCDELMULTI:
 2252                 break;
 2253 
 2254         case SIOCSIFMEDIA:
 2255         case SIOCGIFMEDIA:
 2256                 rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
 2257                 break;
 2258 
 2259         case SIOCSIFCAP:
 2260                 {
 2261                         int reinit = 0;
 2262 
 2263                         if (ifr->ifr_reqcap != ifp->if_capenable) {
 2264                                 ifp->if_capenable = ifr->ifr_reqcap;
 2265                                 reinit = 1;
 2266                         }
 2267 
 2268                         if ((reinit != 0) &&
 2269                             ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
 2270                                 ENA_LOCK_LOCK();
 2271                                 ena_down(adapter);
 2272                                 rc = ena_up(adapter);
 2273                                 ENA_LOCK_UNLOCK();
 2274                         }
 2275                 }
 2276 
 2277                 break;
 2278         default:
 2279                 rc = ether_ioctl(ifp, command, data);
 2280                 break;
 2281         }
 2282 
 2283         return (rc);
 2284 }
 2285 
 2286 static int
 2287 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
 2288 {
 2289         int caps = 0;
 2290 
 2291         if ((feat->offload.tx &
 2292             (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
 2293             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
 2294             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
 2295                 caps |= IFCAP_TXCSUM;
 2296 
 2297         if ((feat->offload.tx &
 2298             (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
 2299             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
 2300                 caps |= IFCAP_TXCSUM_IPV6;
 2301 
 2302         if ((feat->offload.tx &
 2303             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
 2304                 caps |= IFCAP_TSO4;
 2305 
 2306         if ((feat->offload.tx &
 2307             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
 2308                 caps |= IFCAP_TSO6;
 2309 
 2310         if ((feat->offload.rx_supported &
 2311             (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
 2312             ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
 2313                 caps |= IFCAP_RXCSUM;
 2314 
 2315         if ((feat->offload.rx_supported &
 2316             ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
 2317                 caps |= IFCAP_RXCSUM_IPV6;
 2318 
 2319         caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
 2320 
 2321         return (caps);
 2322 }
 2323 
 2324 static void
 2325 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
 2326 {
 2327 
 2328         host_info->supported_network_features[0] =
 2329             (uint32_t)if_getcapabilities(ifp);
 2330 }
 2331 
 2332 static void
 2333 ena_update_hwassist(struct ena_adapter *adapter)
 2334 {
 2335         if_t ifp = adapter->ifp;
 2336         uint32_t feat = adapter->tx_offload_cap;
 2337         int cap = if_getcapenable(ifp);
 2338         int flags = 0;
 2339 
 2340         if_clearhwassist(ifp);
 2341 
 2342         if ((cap & IFCAP_TXCSUM) != 0) {
 2343                 if ((feat &
 2344                     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
 2345                         flags |= CSUM_IP;
 2346                 if ((feat &
 2347                     (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
 2348                     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
 2349                         flags |= CSUM_IP_UDP | CSUM_IP_TCP;
 2350         }
 2351 
 2352         if ((cap & IFCAP_TXCSUM_IPV6) != 0)
 2353                 flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
 2354 
 2355         if ((cap & IFCAP_TSO4) != 0)
 2356                 flags |= CSUM_IP_TSO;
 2357 
 2358         if ((cap & IFCAP_TSO6) != 0)
 2359                 flags |= CSUM_IP6_TSO;
 2360 
 2361         if_sethwassistbits(ifp, flags, 0);
 2362 }
 2363 
 2364 static int
 2365 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
 2366     struct ena_com_dev_get_features_ctx *feat)
 2367 {
 2368         if_t ifp;
 2369         int caps = 0;
 2370 
 2371         ifp = adapter->ifp = if_gethandle(IFT_ETHER);
 2372         if (unlikely(ifp == NULL)) {
 2373                 ena_log(pdev, ERR, "can not allocate ifnet structure\n");
 2374                 return (ENXIO);
 2375         }
 2376         if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
 2377         if_setdev(ifp, pdev);
 2378         if_setsoftc(ifp, adapter);
 2379 
 2380         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
 2381             IFF_KNOWSEPOCH);
 2382         if_setinitfn(ifp, ena_init);
 2383         if_settransmitfn(ifp, ena_mq_start);
 2384         if_setqflushfn(ifp, ena_qflush);
 2385         if_setioctlfn(ifp, ena_ioctl);
 2386         if_setgetcounterfn(ifp, ena_get_counter);
 2387 
 2388         if_setsendqlen(ifp, adapter->requested_tx_ring_size);
 2389         if_setsendqready(ifp);
 2390         if_setmtu(ifp, ETHERMTU);
 2391         if_setbaudrate(ifp, 0);
 2392         /* Zeroize capabilities... */
 2393         if_setcapabilities(ifp, 0);
 2394         if_setcapenable(ifp, 0);
 2395         /* check hardware support */
 2396         caps = ena_get_dev_offloads(feat);
 2397         /* ... and set them */
 2398         if_setcapabilitiesbit(ifp, caps, 0);
 2399 
 2400         /* TSO parameters */
 2401         ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
 2402             (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
 2403         ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
 2404         ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
 2405 
 2406         if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
 2407         if_setcapenable(ifp, if_getcapabilities(ifp));
 2408 
 2409         /*
 2410          * Specify the media types supported by this adapter and register
 2411          * callbacks to update media and link information
 2412          */
 2413         ifmedia_init(&adapter->media, IFM_IMASK,
 2414             ena_media_change, ena_media_status);
 2415         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
 2416         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
 2417 
 2418         ether_ifattach(ifp, adapter->mac_addr);
 2419 
 2420         return (0);
 2421 }
 2422 
 2423 void
 2424 ena_down(struct ena_adapter *adapter)
 2425 {
 2426         int rc;
 2427 
 2428         ENA_LOCK_ASSERT();
 2429 
 2430         if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
 2431                 return;
 2432 
 2433         ena_log(adapter->pdev, INFO, "device is going DOWN\n");
 2434 
 2435         callout_drain(&adapter->timer_service);
 2436 
 2437         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
 2438         if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
 2439                 IFF_DRV_RUNNING);
 2440 
 2441         ena_free_io_irq(adapter);
 2442 
 2443         if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
 2444                 rc = ena_com_dev_reset(adapter->ena_dev,
 2445                         adapter->reset_reason);
 2446                 if (unlikely(rc != 0))
 2447                         ena_log(adapter->pdev, ERR,
 2448                                 "Device reset failed\n");
 2449         }
 2450 
 2451         ena_destroy_all_io_queues(adapter);
 2452 
 2453         ena_free_all_tx_bufs(adapter);
 2454         ena_free_all_rx_bufs(adapter);
 2455         ena_free_all_tx_resources(adapter);
 2456         ena_free_all_rx_resources(adapter);
 2457 
 2458         counter_u64_add(adapter->dev_stats.interface_down, 1);
 2459 }
 2460 
 2461 static uint32_t
 2462 ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
 2463     struct ena_com_dev_get_features_ctx *get_feat_ctx)
 2464 {
 2465         uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
 2466 
 2467         /* Regular queues capabilities */
 2468         if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
 2469                 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
 2470                     &get_feat_ctx->max_queue_ext.max_queue_ext;
 2471                 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
 2472                         max_queue_ext->max_rx_cq_num);
 2473 
 2474                 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
 2475                 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
 2476         } else {
 2477                 struct ena_admin_queue_feature_desc *max_queues =
 2478                     &get_feat_ctx->max_queues;
 2479                 io_tx_sq_num = max_queues->max_sq_num;
 2480                 io_tx_cq_num = max_queues->max_cq_num;
 2481                 io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
 2482         }
 2483 
 2484         /* In case of LLQ use the llq fields for the tx SQ/CQ */
 2485         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
 2486                 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
 2487 
 2488         max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
 2489         max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
 2490         max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
 2491         max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
 2492         /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
 2493         max_num_io_queues = min_t(uint32_t, max_num_io_queues,
 2494             pci_msix_count(pdev) - 1);
 2495 #ifdef RSS
 2496         max_num_io_queues = min_t(uint32_t, max_num_io_queues,
 2497             rss_getnumbuckets());
 2498 #endif
 2499 
 2500         return (max_num_io_queues);
 2501 }
 2502 
 2503 static int
 2504 ena_enable_wc(device_t pdev, struct resource *res)
 2505 {
 2506 #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
 2507         vm_offset_t va;
 2508         vm_size_t len;
 2509         int rc;
 2510 
 2511         va = (vm_offset_t)rman_get_virtual(res);
 2512         len = rman_get_size(res);
 2513         /* Enable write combining */
 2514         rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
 2515         if (unlikely(rc != 0)) {
 2516                 ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc);
 2517                 return (rc);
 2518         }
 2519 
 2520         return (0);
 2521 #endif
 2522         return (EOPNOTSUPP);
 2523 }
 2524 
 2525 static int
 2526 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
 2527     struct ena_admin_feature_llq_desc *llq,
 2528     struct ena_llq_configurations *llq_default_configurations)
 2529 {
 2530         struct ena_adapter *adapter = device_get_softc(pdev);
 2531         int rc, rid;
 2532         uint32_t llq_feature_mask;
 2533 
 2534         llq_feature_mask = 1 << ENA_ADMIN_LLQ;
 2535         if (!(ena_dev->supported_features & llq_feature_mask)) {
 2536                 ena_log(pdev, WARN,
 2537                     "LLQ is not supported. Fallback to host mode policy.\n");
 2538                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
 2539                 return (0);
 2540         }
 2541 
 2542         rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
 2543         if (unlikely(rc != 0)) {
 2544                 ena_log(pdev, WARN, "Failed to configure the device mode. "
 2545                     "Fallback to host mode policy.\n");
 2546                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
 2547                 return (0);
 2548         }
 2549 
 2550         /* Nothing to config, exit */
 2551         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
 2552                 return (0);
 2553 
 2554         /* Try to allocate resources for LLQ bar */
 2555         rid = PCIR_BAR(ENA_MEM_BAR);
 2556         adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
 2557             &rid, RF_ACTIVE);
 2558         if (unlikely(adapter->memory == NULL)) {
 2559                 ena_log(pdev, WARN, "unable to allocate LLQ bar resource. "
 2560                     "Fallback to host mode policy.\n");
 2561                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
 2562                 return (0);
 2563         }
 2564 
 2565         /* Enable write combining for better LLQ performance */
 2566         rc = ena_enable_wc(adapter->pdev, adapter->memory);
 2567         if (unlikely(rc != 0)) {
 2568                 ena_log(pdev, ERR, "failed to enable write combining.\n");
 2569                 return (rc);
 2570         }
 2571 
 2572         /*
 2573          * Save virtual address of the device's memory region
 2574          * for the ena_com layer.
 2575          */
 2576         ena_dev->mem_bar = rman_get_virtual(adapter->memory);
 2577 
 2578         return (0);
 2579 }
 2580 
 2581 static inline
 2582 void set_default_llq_configurations(struct ena_llq_configurations *llq_config,
 2583         struct ena_admin_feature_llq_desc *llq)
 2584 {
 2585 
 2586         llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
 2587         llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
 2588         llq_config->llq_num_decs_before_header =
 2589             ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
 2590         if ((llq->entry_size_ctrl_supported &
 2591              ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
 2592             ena_force_large_llq_header) {
 2593                 llq_config->llq_ring_entry_size =
 2594                     ENA_ADMIN_LIST_ENTRY_SIZE_256B;
 2595                 llq_config->llq_ring_entry_size_value = 256;
 2596         } else {
 2597                 llq_config->llq_ring_entry_size =
 2598                     ENA_ADMIN_LIST_ENTRY_SIZE_128B;
 2599                 llq_config->llq_ring_entry_size_value = 128;
 2600         }
 2601 }
 2602 
 2603 static int
 2604 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
 2605 {
 2606         struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
 2607         struct ena_com_dev *ena_dev = ctx->ena_dev;
 2608         uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
 2609         uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
 2610         uint32_t max_tx_queue_size;
 2611         uint32_t max_rx_queue_size;
 2612 
 2613         if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
 2614                 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
 2615                     &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
 2616                 max_rx_queue_size = min_t(uint32_t,
 2617                     max_queue_ext->max_rx_cq_depth,
 2618                     max_queue_ext->max_rx_sq_depth);
 2619                 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
 2620 
 2621                 if (ena_dev->tx_mem_queue_type ==
 2622                     ENA_ADMIN_PLACEMENT_POLICY_DEV)
 2623                         max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
 2624                             llq->max_llq_depth);
 2625                 else
 2626                         max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
 2627                             max_queue_ext->max_tx_sq_depth);
 2628 
 2629                 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
 2630                     max_queue_ext->max_per_packet_tx_descs);
 2631                 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
 2632                     max_queue_ext->max_per_packet_rx_descs);
 2633         } else {
 2634                 struct ena_admin_queue_feature_desc *max_queues =
 2635                     &ctx->get_feat_ctx->max_queues;
 2636                 max_rx_queue_size = min_t(uint32_t,
 2637                     max_queues->max_cq_depth,
 2638                     max_queues->max_sq_depth);
 2639                 max_tx_queue_size = max_queues->max_cq_depth;
 2640 
 2641                 if (ena_dev->tx_mem_queue_type ==
 2642                     ENA_ADMIN_PLACEMENT_POLICY_DEV)
 2643                         max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
 2644                             llq->max_llq_depth);
 2645                 else
 2646                         max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
 2647                             max_queues->max_sq_depth);
 2648 
 2649                 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
 2650                     max_queues->max_packet_tx_descs);
 2651                 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
 2652                     max_queues->max_packet_rx_descs);
 2653         }
 2654 
 2655         /* round down to the nearest power of 2 */
 2656         max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
 2657         max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
 2658 
 2659         /*
 2660          * When forcing large headers, we multiply the entry size by 2,
 2661          * and therefore divide the queue size by 2, leaving the amount
 2662          * of memory used by the queues unchanged.
 2663          */
 2664         if (ena_force_large_llq_header) {
 2665                 if ((llq->entry_size_ctrl_supported &
 2666                      ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
 2667                     ena_dev->tx_mem_queue_type ==
 2668                      ENA_ADMIN_PLACEMENT_POLICY_DEV) {
 2669                         max_tx_queue_size /= 2;
 2670                         ena_log(ctx->pdev, INFO,
 2671                             "Forcing large headers and decreasing maximum Tx queue size to %d\n",
 2672                             max_tx_queue_size);
 2673                 } else {
 2674                         ena_log(ctx->pdev, WARN,
 2675                             "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
 2676                 }
 2677         }
 2678 
 2679         tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
 2680             max_tx_queue_size);
 2681         rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
 2682             max_rx_queue_size);
 2683 
 2684         tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
 2685         rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
 2686 
 2687         ctx->max_tx_queue_size = max_tx_queue_size;
 2688         ctx->max_rx_queue_size = max_rx_queue_size;
 2689         ctx->tx_queue_size = tx_queue_size;
 2690         ctx->rx_queue_size = rx_queue_size;
 2691 
 2692         return (0);
 2693 }
 2694 
 2695 static void
 2696 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
 2697 {
 2698         struct ena_admin_host_info *host_info;
 2699         uintptr_t rid;
 2700         int rc;
 2701 
 2702         /* Allocate only the host info */
 2703         rc = ena_com_allocate_host_info(ena_dev);
 2704         if (unlikely(rc != 0)) {
 2705                 ena_log(dev, ERR, "Cannot allocate host info\n");
 2706                 return;
 2707         }
 2708 
 2709         host_info = ena_dev->host_attr.host_info;
 2710 
 2711         if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
 2712                 host_info->bdf = rid;
 2713         host_info->os_type = ENA_ADMIN_OS_FREEBSD;
 2714         host_info->kernel_ver = osreldate;
 2715 
 2716         sprintf(host_info->kernel_ver_str, "%d", osreldate);
 2717         host_info->os_dist = 0;
 2718         strncpy(host_info->os_dist_str, osrelease,
 2719             sizeof(host_info->os_dist_str) - 1);
 2720 
 2721         host_info->driver_version =
 2722                 (DRV_MODULE_VER_MAJOR) |
 2723                 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
 2724                 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
 2725         host_info->num_cpus = mp_ncpus;
 2726         host_info->driver_supported_features =
 2727             ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
 2728             ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
 2729 
 2730         rc = ena_com_set_host_attributes(ena_dev);
 2731         if (unlikely(rc != 0)) {
 2732                 if (rc == EOPNOTSUPP)
 2733                         ena_log(dev, WARN, "Cannot set host attributes\n");
 2734                 else
 2735                         ena_log(dev, ERR, "Cannot set host attributes\n");
 2736 
 2737                 goto err;
 2738         }
 2739 
 2740         return;
 2741 
 2742 err:
 2743         ena_com_delete_host_info(ena_dev);
 2744 }
 2745 
 2746 static int
 2747 ena_device_init(struct ena_adapter *adapter, device_t pdev,
 2748     struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
 2749 {
 2750         struct ena_com_dev* ena_dev = adapter->ena_dev;
 2751         bool readless_supported;
 2752         uint32_t aenq_groups;
 2753         int dma_width;
 2754         int rc;
 2755 
 2756         rc = ena_com_mmio_reg_read_request_init(ena_dev);
 2757         if (unlikely(rc != 0)) {
 2758                 ena_log(pdev, ERR, "failed to init mmio read less\n");
 2759                 return (rc);
 2760         }
 2761 
 2762         /*
 2763          * The PCIe configuration space revision id indicate if mmio reg
 2764          * read is disabled
 2765          */
 2766         readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
 2767         ena_com_set_mmio_read_mode(ena_dev, readless_supported);
 2768 
 2769         rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
 2770         if (unlikely(rc != 0)) {
 2771                 ena_log(pdev, ERR, "Can not reset device\n");
 2772                 goto err_mmio_read_less;
 2773         }
 2774 
 2775         rc = ena_com_validate_version(ena_dev);
 2776         if (unlikely(rc != 0)) {
 2777                 ena_log(pdev, ERR, "device version is too low\n");
 2778                 goto err_mmio_read_less;
 2779         }
 2780 
 2781         dma_width = ena_com_get_dma_width(ena_dev);
 2782         if (unlikely(dma_width < 0)) {
 2783                 ena_log(pdev, ERR, "Invalid dma width value %d", dma_width);
 2784                 rc = dma_width;
 2785                 goto err_mmio_read_less;
 2786         }
 2787         adapter->dma_width = dma_width;
 2788 
 2789         /* ENA admin level init */
 2790         rc = ena_com_admin_init(ena_dev, &aenq_handlers);
 2791         if (unlikely(rc != 0)) {
 2792                 ena_log(pdev, ERR,
 2793                     "Can not initialize ena admin queue with device\n");
 2794                 goto err_mmio_read_less;
 2795         }
 2796 
 2797         /*
 2798          * To enable the msix interrupts the driver needs to know the number
 2799          * of queues. So the driver uses polling mode to retrieve this
 2800          * information
 2801          */
 2802         ena_com_set_admin_polling_mode(ena_dev, true);
 2803 
 2804         ena_config_host_info(ena_dev, pdev);
 2805 
 2806         /* Get Device Attributes */
 2807         rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
 2808         if (unlikely(rc != 0)) {
 2809                 ena_log(pdev, ERR,
 2810                     "Cannot get attribute for ena device rc: %d\n", rc);
 2811                 goto err_admin_init;
 2812         }
 2813 
 2814         aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
 2815             BIT(ENA_ADMIN_FATAL_ERROR) |
 2816             BIT(ENA_ADMIN_WARNING) |
 2817             BIT(ENA_ADMIN_NOTIFICATION) |
 2818             BIT(ENA_ADMIN_KEEP_ALIVE);
 2819 
 2820         aenq_groups &= get_feat_ctx->aenq.supported_groups;
 2821         rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
 2822         if (unlikely(rc != 0)) {
 2823                 ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc);
 2824                 goto err_admin_init;
 2825         }
 2826 
 2827         *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
 2828 
 2829         return (0);
 2830 
 2831 err_admin_init:
 2832         ena_com_delete_host_info(ena_dev);
 2833         ena_com_admin_destroy(ena_dev);
 2834 err_mmio_read_less:
 2835         ena_com_mmio_reg_read_request_destroy(ena_dev);
 2836 
 2837         return (rc);
 2838 }
 2839 
 2840 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
 2841 {
 2842         struct ena_com_dev *ena_dev = adapter->ena_dev;
 2843         int rc;
 2844 
 2845         rc = ena_enable_msix(adapter);
 2846         if (unlikely(rc != 0)) {
 2847                 ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n");
 2848                 return (rc);
 2849         }
 2850 
 2851         ena_setup_mgmnt_intr(adapter);
 2852 
 2853         rc = ena_request_mgmnt_irq(adapter);
 2854         if (unlikely(rc != 0)) {
 2855                 ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n");
 2856                 goto err_disable_msix;
 2857         }
 2858 
 2859         ena_com_set_admin_polling_mode(ena_dev, false);
 2860 
 2861         ena_com_admin_aenq_enable(ena_dev);
 2862 
 2863         return (0);
 2864 
 2865 err_disable_msix:
 2866         ena_disable_msix(adapter);
 2867 
 2868         return (rc);
 2869 }
 2870 
 2871 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
 2872 static void ena_keep_alive_wd(void *adapter_data,
 2873     struct ena_admin_aenq_entry *aenq_e)
 2874 {
 2875         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
 2876         struct ena_admin_aenq_keep_alive_desc *desc;
 2877         sbintime_t stime;
 2878         uint64_t rx_drops;
 2879         uint64_t tx_drops;
 2880 
 2881         desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
 2882 
 2883         rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
 2884         tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
 2885         counter_u64_zero(adapter->hw_stats.rx_drops);
 2886         counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
 2887         counter_u64_zero(adapter->hw_stats.tx_drops);
 2888         counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
 2889 
 2890         stime = getsbinuptime();
 2891         atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
 2892 }
 2893 
 2894 /* Check for keep alive expiration */
 2895 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
 2896 {
 2897         sbintime_t timestamp, time;
 2898 
 2899         if (adapter->wd_active == 0)
 2900                 return;
 2901 
 2902         if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
 2903                 return;
 2904 
 2905         timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
 2906         time = getsbinuptime() - timestamp;
 2907         if (unlikely(time > adapter->keep_alive_timeout)) {
 2908                 ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
 2909                 counter_u64_add(adapter->dev_stats.wd_expired, 1);
 2910                 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
 2911         }
 2912 }
 2913 
 2914 /* Check if admin queue is enabled */
 2915 static void check_for_admin_com_state(struct ena_adapter *adapter)
 2916 {
 2917         if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
 2918             false)) {
 2919                 ena_log(adapter->pdev, ERR,
 2920                     "ENA admin queue is not in running state!\n");
 2921                 counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
 2922                 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
 2923         }
 2924 }
 2925 
 2926 static int
 2927 check_for_rx_interrupt_queue(struct ena_adapter *adapter,
 2928     struct ena_ring *rx_ring)
 2929 {
 2930         if (likely(rx_ring->first_interrupt))
 2931                 return (0);
 2932 
 2933         if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
 2934                 return (0);
 2935 
 2936         rx_ring->no_interrupt_event_cnt++;
 2937 
 2938         if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
 2939                 ena_log(adapter->pdev, ERR, "Potential MSIX issue on Rx side "
 2940                     "Queue = %d. Reset the device\n", rx_ring->qid);
 2941                 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
 2942                 return (EIO);
 2943         }
 2944 
 2945         return (0);
 2946 }
 2947 
 2948 static int
 2949 check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
 2950     struct ena_ring *tx_ring)
 2951 {
 2952         device_t pdev = adapter->pdev;
 2953         struct bintime curtime, time;
 2954         struct ena_tx_buffer *tx_buf;
 2955         sbintime_t time_offset;
 2956         uint32_t missed_tx = 0;
 2957         int i, rc = 0;
 2958 
 2959         getbinuptime(&curtime);
 2960 
 2961         for (i = 0; i < tx_ring->ring_size; i++) {
 2962                 tx_buf = &tx_ring->tx_buffer_info[i];
 2963 
 2964                 if (bintime_isset(&tx_buf->timestamp) == 0)
 2965                         continue;
 2966 
 2967                 time = curtime;
 2968                 bintime_sub(&time, &tx_buf->timestamp);
 2969                 time_offset = bttosbt(time);
 2970 
 2971                 if (unlikely(!tx_ring->first_interrupt &&
 2972                     time_offset > 2 * adapter->missing_tx_timeout)) {
 2973                         /*
 2974                          * If after graceful period interrupt is still not
 2975                          * received, we schedule a reset.
 2976                          */
 2977                         ena_log(pdev, ERR,
 2978                             "Potential MSIX issue on Tx side Queue = %d. "
 2979                             "Reset the device\n", tx_ring->qid);
 2980                         ena_trigger_reset(adapter,
 2981                             ENA_REGS_RESET_MISS_INTERRUPT);
 2982                         return (EIO);
 2983                 }
 2984 
 2985                 /* Check again if packet is still waiting */
 2986                 if (unlikely(time_offset > adapter->missing_tx_timeout)) {
 2987 
 2988                         if (!tx_buf->print_once)
 2989                                 ena_log(pdev, WARN, "Found a Tx that wasn't "
 2990                                     "completed on time, qid %d, index %d.\n",
 2991                                     tx_ring->qid, i);
 2992 
 2993                         tx_buf->print_once = true;
 2994                         missed_tx++;
 2995                 }
 2996         }
 2997 
 2998         if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
 2999                 ena_log(pdev, ERR,
 3000                     "The number of lost tx completion is above the threshold "
 3001                     "(%d > %d). Reset the device\n",
 3002                     missed_tx, adapter->missing_tx_threshold);
 3003                 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
 3004                 rc = EIO;
 3005         }
 3006 
 3007         counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
 3008 
 3009         return (rc);
 3010 }
 3011 
 3012 /*
 3013  * Check for TX which were not completed on time.
 3014  * Timeout is defined by "missing_tx_timeout".
 3015  * Reset will be performed if number of incompleted
 3016  * transactions exceeds "missing_tx_threshold".
 3017  */
 3018 static void
 3019 check_for_missing_completions(struct ena_adapter *adapter)
 3020 {
 3021         struct ena_ring *tx_ring;
 3022         struct ena_ring *rx_ring;
 3023         int i, budget, rc;
 3024 
 3025         /* Make sure the driver doesn't turn the device in other process */
 3026         rmb();
 3027 
 3028         if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
 3029                 return;
 3030 
 3031         if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
 3032                 return;
 3033 
 3034         if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
 3035                 return;
 3036 
 3037         budget = adapter->missing_tx_max_queues;
 3038 
 3039         for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
 3040                 tx_ring = &adapter->tx_ring[i];
 3041                 rx_ring = &adapter->rx_ring[i];
 3042 
 3043                 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
 3044                 if (unlikely(rc != 0))
 3045                         return;
 3046 
 3047                 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
 3048                 if (unlikely(rc != 0))
 3049                         return;
 3050 
 3051                 budget--;
 3052                 if (budget == 0) {
 3053                         i++;
 3054                         break;
 3055                 }
 3056         }
 3057 
 3058         adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
 3059 }
 3060 
 3061 /* trigger rx cleanup after 2 consecutive detections */
 3062 #define EMPTY_RX_REFILL 2
 3063 /* For the rare case where the device runs out of Rx descriptors and the
 3064  * msix handler failed to refill new Rx descriptors (due to a lack of memory
 3065  * for example).
 3066  * This case will lead to a deadlock:
 3067  * The device won't send interrupts since all the new Rx packets will be dropped
 3068  * The msix handler won't allocate new Rx descriptors so the device won't be
 3069  * able to send new packets.
 3070  *
 3071  * When such a situation is detected - execute rx cleanup task in another thread
 3072  */
 3073 static void
 3074 check_for_empty_rx_ring(struct ena_adapter *adapter)
 3075 {
 3076         struct ena_ring *rx_ring;
 3077         int i, refill_required;
 3078 
 3079         if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
 3080                 return;
 3081 
 3082         if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
 3083                 return;
 3084 
 3085         for (i = 0; i < adapter->num_io_queues; i++) {
 3086                 rx_ring = &adapter->rx_ring[i];
 3087 
 3088                 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
 3089                 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
 3090                         rx_ring->empty_rx_queue++;
 3091 
 3092                         if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
 3093                                 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
 3094                                     1);
 3095 
 3096                                 ena_log(adapter->pdev, WARN,
 3097                                     "Rx ring %d is stalled. Triggering the refill function\n",
 3098                                     i);
 3099 
 3100                                 taskqueue_enqueue(rx_ring->que->cleanup_tq,
 3101                                     &rx_ring->que->cleanup_task);
 3102                                 rx_ring->empty_rx_queue = 0;
 3103                         }
 3104                 } else {
 3105                         rx_ring->empty_rx_queue = 0;
 3106                 }
 3107         }
 3108 }
 3109 
 3110 static void ena_update_hints(struct ena_adapter *adapter,
 3111                              struct ena_admin_ena_hw_hints *hints)
 3112 {
 3113         struct ena_com_dev *ena_dev = adapter->ena_dev;
 3114 
 3115         if (hints->admin_completion_tx_timeout)
 3116                 ena_dev->admin_queue.completion_timeout =
 3117                     hints->admin_completion_tx_timeout * 1000;
 3118 
 3119         if (hints->mmio_read_timeout)
 3120                 /* convert to usec */
 3121                 ena_dev->mmio_read.reg_read_to =
 3122                     hints->mmio_read_timeout * 1000;
 3123 
 3124         if (hints->missed_tx_completion_count_threshold_to_reset)
 3125                 adapter->missing_tx_threshold =
 3126                     hints->missed_tx_completion_count_threshold_to_reset;
 3127 
 3128         if (hints->missing_tx_completion_timeout) {
 3129                 if (hints->missing_tx_completion_timeout ==
 3130                      ENA_HW_HINTS_NO_TIMEOUT)
 3131                         adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
 3132                 else
 3133                         adapter->missing_tx_timeout =
 3134                             SBT_1MS * hints->missing_tx_completion_timeout;
 3135         }
 3136 
 3137         if (hints->driver_watchdog_timeout) {
 3138                 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
 3139                         adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
 3140                 else
 3141                         adapter->keep_alive_timeout =
 3142                             SBT_1MS * hints->driver_watchdog_timeout;
 3143         }
 3144 }
 3145 
 3146 /**
 3147  * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
 3148  * @adapter: ENA device adapter
 3149  *
 3150  * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
 3151  * and other error codes on failure.
 3152  *
 3153  * This function can possibly cause a race with other calls to the admin queue.
 3154  * Because of that, the caller should either lock this function or make sure
 3155  * that there is no race in the current context.
 3156  */
 3157 static int
 3158 ena_copy_eni_metrics(struct ena_adapter *adapter)
 3159 {
 3160         static bool print_once = true;
 3161         int rc;
 3162 
 3163         rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
 3164 
 3165         if (rc != 0) {
 3166                 if (rc == ENA_COM_UNSUPPORTED) {
 3167                         if (print_once) {
 3168                                 ena_log(adapter->pdev, WARN,
 3169                                     "Retrieving ENI metrics is not supported.\n");
 3170                                 print_once = false;
 3171                         } else {
 3172                                 ena_log(adapter->pdev, DBG,
 3173                                     "Retrieving ENI metrics is not supported.\n");
 3174                         }
 3175                 } else {
 3176                         ena_log(adapter->pdev, ERR,
 3177                             "Failed to get ENI metrics: %d\n", rc);
 3178                 }
 3179         }
 3180 
 3181         return (rc);
 3182 }
 3183 
 3184 static void
 3185 ena_timer_service(void *data)
 3186 {
 3187         struct ena_adapter *adapter = (struct ena_adapter *)data;
 3188         struct ena_admin_host_info *host_info =
 3189             adapter->ena_dev->host_attr.host_info;
 3190 
 3191         check_for_missing_keep_alive(adapter);
 3192 
 3193         check_for_admin_com_state(adapter);
 3194 
 3195         check_for_missing_completions(adapter);
 3196 
 3197         check_for_empty_rx_ring(adapter);
 3198 
 3199         /*
 3200          * User controller update of the ENI metrics.
 3201          * If the delay was set to 0, then the stats shouldn't be updated at
 3202          * all.
 3203          * Otherwise, wait 'eni_metrics_sample_interval' seconds, before
 3204          * updating stats.
 3205          * As timer service is executed every second, it's enough to increment
 3206          * appropriate counter each time the timer service is executed.
 3207          */
 3208         if ((adapter->eni_metrics_sample_interval != 0) &&
 3209             (++adapter->eni_metrics_sample_interval_cnt >=
 3210              adapter->eni_metrics_sample_interval)) {
 3211                 /*
 3212                  * There is no race with other admin queue calls, as:
 3213                  *   - Timer service runs after interface is up, so all
 3214                  *     configuration calls to the admin queue are finished.
 3215                  *   - After interface is up, the driver doesn't use (at least
 3216                  *     for now) other functions writing to the admin queue.
 3217                  *
 3218                  * It may change in the future, so in that situation, the lock
 3219                  * will be needed. ENA_LOCK_*() cannot be used for that purpose,
 3220                  * as callout ena_timer_service is protected by them. It could
 3221                  * lead to the deadlock if callout_drain() would hold the lock
 3222                  * before ena_copy_eni_metrics() was executed. It's advised to
 3223                  * use separate lock in that situation which will be used only
 3224                  * for the admin queue.
 3225                  */
 3226                 (void)ena_copy_eni_metrics(adapter);
 3227                 adapter->eni_metrics_sample_interval_cnt = 0;
 3228         }
 3229 
 3230 
 3231         if (host_info != NULL)
 3232                 ena_update_host_info(host_info, adapter->ifp);
 3233 
 3234         if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
 3235                 ena_log(adapter->pdev, WARN, "Trigger reset is on\n");
 3236                 taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
 3237                 return;
 3238         }
 3239 
 3240         /*
 3241          * Schedule another timeout one second from now.
 3242          */
 3243         callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
 3244 }
 3245 
 3246 void
 3247 ena_destroy_device(struct ena_adapter *adapter, bool graceful)
 3248 {
 3249         if_t ifp = adapter->ifp;
 3250         struct ena_com_dev *ena_dev = adapter->ena_dev;
 3251         bool dev_up;
 3252 
 3253         if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
 3254                 return;
 3255 
 3256         if_link_state_change(ifp, LINK_STATE_DOWN);
 3257 
 3258         callout_drain(&adapter->timer_service);
 3259 
 3260         dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
 3261         if (dev_up)
 3262                 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
 3263 
 3264         if (!graceful)
 3265                 ena_com_set_admin_running_state(ena_dev, false);
 3266 
 3267         if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
 3268                 ena_down(adapter);
 3269 
 3270         /*
 3271          * Stop the device from sending AENQ events (if the device was up, and
 3272          * the trigger reset was on, ena_down already performs device reset)
 3273          */
 3274         if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
 3275                 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
 3276 
 3277         ena_free_mgmnt_irq(adapter);
 3278 
 3279         ena_disable_msix(adapter);
 3280 
 3281         /*
 3282          * IO rings resources should be freed because `ena_restore_device()`
 3283          * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
 3284          * vectors. The amount of MSIX vectors after destroy-restore may be
 3285          * different than before. Therefore, IO rings resources should be
 3286          * established from scratch each time.
 3287          */
 3288         ena_free_all_io_rings_resources(adapter);
 3289 
 3290         ena_com_abort_admin_commands(ena_dev);
 3291 
 3292         ena_com_wait_for_abort_completion(ena_dev);
 3293 
 3294         ena_com_admin_destroy(ena_dev);
 3295 
 3296         ena_com_mmio_reg_read_request_destroy(ena_dev);
 3297 
 3298         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
 3299 
 3300         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
 3301         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
 3302 }
 3303 
 3304 static int
 3305 ena_device_validate_params(struct ena_adapter *adapter,
 3306     struct ena_com_dev_get_features_ctx *get_feat_ctx)
 3307 {
 3308 
 3309         if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
 3310             ETHER_ADDR_LEN) != 0) {
 3311                 ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n");
 3312                 return (EINVAL);
 3313         }
 3314 
 3315         if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
 3316                 ena_log(adapter->pdev, ERR,
 3317                     "Error, device max mtu is smaller than ifp MTU\n");
 3318                 return (EINVAL);
 3319         }
 3320 
 3321         return 0;
 3322 }
 3323 
 3324 int
 3325 ena_restore_device(struct ena_adapter *adapter)
 3326 {
 3327         struct ena_com_dev_get_features_ctx get_feat_ctx;
 3328         struct ena_com_dev *ena_dev = adapter->ena_dev;
 3329         if_t ifp = adapter->ifp;
 3330         device_t dev = adapter->pdev;
 3331         int wd_active;
 3332         int rc;
 3333 
 3334         ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
 3335 
 3336         rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
 3337         if (rc != 0) {
 3338                 ena_log(dev, ERR, "Cannot initialize device\n");
 3339                 goto err;
 3340         }
 3341         /*
 3342          * Only enable WD if it was enabled before reset, so it won't override
 3343          * value set by the user by the sysctl.
 3344          */
 3345         if (adapter->wd_active != 0)
 3346                 adapter->wd_active = wd_active;
 3347 
 3348         rc = ena_device_validate_params(adapter, &get_feat_ctx);
 3349         if (rc != 0) {
 3350                 ena_log(dev, ERR, "Validation of device parameters failed\n");
 3351                 goto err_device_destroy;
 3352         }
 3353 
 3354         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
 3355         /* Make sure we don't have a race with AENQ Links state handler */
 3356         if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
 3357                 if_link_state_change(ifp, LINK_STATE_UP);
 3358 
 3359         rc = ena_enable_msix_and_set_admin_interrupts(adapter);
 3360         if (rc != 0) {
 3361                 ena_log(dev, ERR, "Enable MSI-X failed\n");
 3362                 goto err_device_destroy;
 3363         }
 3364 
 3365         /*
 3366          * Effective value of used MSIX vectors should be the same as before
 3367          * `ena_destroy_device()`, if possible, or closest to it if less vectors
 3368          * are available.
 3369          */
 3370         if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
 3371                 adapter->num_io_queues =
 3372                     adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
 3373 
 3374         /* Re-initialize rings basic information */
 3375         ena_init_io_rings(adapter);
 3376 
 3377         /* If the interface was up before the reset bring it up */
 3378         if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
 3379                 rc = ena_up(adapter);
 3380                 if (rc != 0) {
 3381                         ena_log(dev, ERR, "Failed to create I/O queues\n");
 3382                         goto err_disable_msix;
 3383                 }
 3384         }
 3385 
 3386         /* Indicate that device is running again and ready to work */
 3387         ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
 3388 
 3389         if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
 3390                 /*
 3391                  * As the AENQ handlers weren't executed during reset because
 3392                  * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
 3393                  * timestamp must be updated again That will prevent next reset
 3394                  * caused by missing keep alive.
 3395                  */
 3396                 adapter->keep_alive_timestamp = getsbinuptime();
 3397                 callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
 3398                     ena_timer_service, (void *)adapter, 0);
 3399         }
 3400         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
 3401 
 3402         ena_log(dev, INFO,
 3403             "Device reset completed successfully, Driver info: %s\n", ena_version);
 3404 
 3405         return (rc);
 3406 
 3407 err_disable_msix:
 3408         ena_free_mgmnt_irq(adapter);
 3409         ena_disable_msix(adapter);
 3410 err_device_destroy:
 3411         ena_com_abort_admin_commands(ena_dev);
 3412         ena_com_wait_for_abort_completion(ena_dev);
 3413         ena_com_admin_destroy(ena_dev);
 3414         ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
 3415         ena_com_mmio_reg_read_request_destroy(ena_dev);
 3416 err:
 3417         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
 3418         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
 3419         ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n");
 3420 
 3421         return (rc);
 3422 }
 3423 
 3424 static void
 3425 ena_reset_task(void *arg, int pending)
 3426 {
 3427         struct ena_adapter *adapter = (struct ena_adapter *)arg;
 3428 
 3429         ENA_LOCK_LOCK();
 3430         if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
 3431                 ena_destroy_device(adapter, false);
 3432                 ena_restore_device(adapter);
 3433         }
 3434         ENA_LOCK_UNLOCK();
 3435 }
 3436 
 3437 /**
 3438  * ena_attach - Device Initialization Routine
 3439  * @pdev: device information struct
 3440  *
 3441  * Returns 0 on success, otherwise on failure.
 3442  *
 3443  * ena_attach initializes an adapter identified by a device structure.
 3444  * The OS initialization, configuring of the adapter private structure,
 3445  * and a hardware reset occur.
 3446  **/
 3447 static int
 3448 ena_attach(device_t pdev)
 3449 {
 3450         struct ena_com_dev_get_features_ctx get_feat_ctx;
 3451         struct ena_llq_configurations llq_config;
 3452         struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
 3453         static int version_printed;
 3454         struct ena_adapter *adapter;
 3455         struct ena_com_dev *ena_dev = NULL;
 3456         uint32_t max_num_io_queues;
 3457         int msix_rid;
 3458         int rid, rc;
 3459 
 3460         adapter = device_get_softc(pdev);
 3461         adapter->pdev = pdev;
 3462 
 3463         /*
 3464          * Set up the timer service - driver is responsible for avoiding
 3465          * concurrency, as the callout won't be using any locking inside.
 3466          */
 3467         callout_init(&adapter->timer_service, true);
 3468         adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
 3469         adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
 3470         adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
 3471         adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
 3472 
 3473         if (version_printed++ == 0)
 3474                 ena_log(pdev, INFO, "%s\n", ena_version);
 3475 
 3476         /* Allocate memory for ena_dev structure */
 3477         ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
 3478             M_WAITOK | M_ZERO);
 3479 
 3480         adapter->ena_dev = ena_dev;
 3481         ena_dev->dmadev = pdev;
 3482 
 3483         rid = PCIR_BAR(ENA_REG_BAR);
 3484         adapter->memory = NULL;
 3485         adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
 3486             &rid, RF_ACTIVE);
 3487         if (unlikely(adapter->registers == NULL)) {
 3488                 ena_log(pdev, ERR,
 3489                     "unable to allocate bus resource: registers!\n");
 3490                 rc = ENOMEM;
 3491                 goto err_dev_free;
 3492         }
 3493 
 3494         /* MSIx vector table may reside on BAR0 with registers or on BAR1. */
 3495         msix_rid = pci_msix_table_bar(pdev);
 3496         if (msix_rid != rid) {
 3497                 adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
 3498                     &msix_rid, RF_ACTIVE);
 3499                 if (unlikely(adapter->msix == NULL)) {
 3500                         ena_log(pdev, ERR,
 3501                             "unable to allocate bus resource: msix!\n");
 3502                         rc = ENOMEM;
 3503                         goto err_pci_free;
 3504                 }
 3505                 adapter->msix_rid = msix_rid;
 3506         }
 3507 
 3508         ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
 3509             M_WAITOK | M_ZERO);
 3510 
 3511         /* Store register resources */
 3512         ((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
 3513             rman_get_bustag(adapter->registers);
 3514         ((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
 3515             rman_get_bushandle(adapter->registers);
 3516 
 3517         if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
 3518                 ena_log(pdev, ERR, "failed to pmap registers bar\n");
 3519                 rc = ENXIO;
 3520                 goto err_bus_free;
 3521         }
 3522 
 3523         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
 3524 
 3525         /* Initially clear all the flags */
 3526         ENA_FLAG_ZERO(adapter);
 3527 
 3528         /* Device initialization */
 3529         rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
 3530         if (unlikely(rc != 0)) {
 3531                 ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc);
 3532                 rc = ENXIO;
 3533                 goto err_bus_free;
 3534         }
 3535 
 3536         set_default_llq_configurations(&llq_config, &get_feat_ctx.llq);
 3537 
 3538         rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
 3539              &llq_config);
 3540         if (unlikely(rc != 0)) {
 3541                 ena_log(pdev, ERR, "failed to set placement policy\n");
 3542                 goto err_com_free;
 3543         }
 3544 
 3545         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
 3546                 adapter->disable_meta_caching =
 3547                     !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
 3548                     BIT(ENA_ADMIN_DISABLE_META_CACHING));
 3549 
 3550         adapter->keep_alive_timestamp = getsbinuptime();
 3551 
 3552         adapter->tx_offload_cap = get_feat_ctx.offload.tx;
 3553 
 3554         memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
 3555             ETHER_ADDR_LEN);
 3556 
 3557         calc_queue_ctx.pdev = pdev;
 3558         calc_queue_ctx.ena_dev = ena_dev;
 3559         calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
 3560 
 3561         /* Calculate initial and maximum IO queue number and size */
 3562         max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
 3563             &get_feat_ctx);
 3564         rc = ena_calc_io_queue_size(&calc_queue_ctx);
 3565         if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
 3566                 rc = EFAULT;
 3567                 goto err_com_free;
 3568         }
 3569 
 3570         adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
 3571         adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
 3572         adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
 3573         adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
 3574         adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
 3575         adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
 3576 
 3577         adapter->max_num_io_queues = max_num_io_queues;
 3578 
 3579         adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
 3580 
 3581         adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
 3582 
 3583         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
 3584 
 3585         /* set up dma tags for rx and tx buffers */
 3586         rc = ena_setup_tx_dma_tag(adapter);
 3587         if (unlikely(rc != 0)) {
 3588                 ena_log(pdev, ERR, "Failed to create TX DMA tag\n");
 3589                 goto err_com_free;
 3590         }
 3591 
 3592         rc = ena_setup_rx_dma_tag(adapter);
 3593         if (unlikely(rc != 0)) {
 3594                 ena_log(pdev, ERR, "Failed to create RX DMA tag\n");
 3595                 goto err_tx_tag_free;
 3596         }
 3597 
 3598         /*
 3599          * The amount of requested MSIX vectors is equal to
 3600          * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
 3601          * number of admin queue interrupts. The former is initially determined
 3602          * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
 3603          * achieved if there are not enough system resources. By default, the
 3604          * number of effectively used IO queues is the same but later on it can
 3605          * be limited by the user using sysctl interface.
 3606          */
 3607         rc = ena_enable_msix_and_set_admin_interrupts(adapter);
 3608         if (unlikely(rc != 0)) {
 3609                 ena_log(pdev, ERR,
 3610                     "Failed to enable and set the admin interrupts\n");
 3611                 goto err_io_free;
 3612         }
 3613         /* By default all of allocated MSIX vectors are actively used */
 3614         adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
 3615 
 3616         /* initialize rings basic information */
 3617         ena_init_io_rings(adapter);
 3618 
 3619         /* setup network interface */
 3620         rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
 3621         if (unlikely(rc != 0)) {
 3622                 ena_log(pdev, ERR, "Error with network interface setup\n");
 3623                 goto err_msix_free;
 3624         }
 3625 
 3626         /* Initialize reset task queue */
 3627         TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
 3628         adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
 3629             M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
 3630         taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
 3631             "%s rstq", device_get_nameunit(adapter->pdev));
 3632 
 3633         /* Initialize statistics */
 3634         ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
 3635             sizeof(struct ena_stats_dev));
 3636         ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
 3637             sizeof(struct ena_hw_stats));
 3638         ena_sysctl_add_nodes(adapter);
 3639 
 3640 #ifdef DEV_NETMAP
 3641         rc = ena_netmap_attach(adapter);
 3642         if (rc != 0) {
 3643                 ena_log(pdev, ERR, "netmap attach failed: %d\n", rc);
 3644                 goto err_detach;
 3645         }
 3646 #endif /* DEV_NETMAP */
 3647 
 3648         /* Tell the stack that the interface is not active */
 3649         if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
 3650         ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
 3651 
 3652         return (0);
 3653 
 3654 #ifdef DEV_NETMAP
 3655 err_detach:
 3656         ether_ifdetach(adapter->ifp);
 3657 #endif /* DEV_NETMAP */
 3658 err_msix_free:
 3659         ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
 3660         ena_free_mgmnt_irq(adapter);
 3661         ena_disable_msix(adapter);
 3662 err_io_free:
 3663         ena_free_all_io_rings_resources(adapter);
 3664         ena_free_rx_dma_tag(adapter);
 3665 err_tx_tag_free:
 3666         ena_free_tx_dma_tag(adapter);
 3667 err_com_free:
 3668         ena_com_admin_destroy(ena_dev);
 3669         ena_com_delete_host_info(ena_dev);
 3670         ena_com_mmio_reg_read_request_destroy(ena_dev);
 3671 err_bus_free:
 3672         free(ena_dev->bus, M_DEVBUF);
 3673 err_pci_free:
 3674         ena_free_pci_resources(adapter);
 3675 err_dev_free:
 3676         free(ena_dev, M_DEVBUF);
 3677 
 3678         return (rc);
 3679 }
 3680 
 3681 /**
 3682  * ena_detach - Device Removal Routine
 3683  * @pdev: device information struct
 3684  *
 3685  * ena_detach is called by the device subsystem to alert the driver
 3686  * that it should release a PCI device.
 3687  **/
 3688 static int
 3689 ena_detach(device_t pdev)
 3690 {
 3691         struct ena_adapter *adapter = device_get_softc(pdev);
 3692         struct ena_com_dev *ena_dev = adapter->ena_dev;
 3693         int rc;
 3694 
 3695         /* Make sure VLANS are not using driver */
 3696         if (adapter->ifp->if_vlantrunk != NULL) {
 3697                 ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n");
 3698                 return (EBUSY);
 3699         }
 3700 
 3701         ether_ifdetach(adapter->ifp);
 3702 
 3703         /* Stop timer service */
 3704         ENA_LOCK_LOCK();
 3705         callout_drain(&adapter->timer_service);
 3706         ENA_LOCK_UNLOCK();
 3707 
 3708         /* Release reset task */
 3709         while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
 3710                 taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
 3711         taskqueue_free(adapter->reset_tq);
 3712 
 3713         ENA_LOCK_LOCK();
 3714         ena_down(adapter);
 3715         ena_destroy_device(adapter, true);
 3716         ENA_LOCK_UNLOCK();
 3717 
 3718         /* Restore unregistered sysctl queue nodes. */
 3719         ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues,
 3720             adapter->max_num_io_queues);
 3721 
 3722 #ifdef DEV_NETMAP
 3723         netmap_detach(adapter->ifp);
 3724 #endif /* DEV_NETMAP */
 3725 
 3726         ena_free_counters((counter_u64_t *)&adapter->hw_stats,
 3727             sizeof(struct ena_hw_stats));
 3728         ena_free_counters((counter_u64_t *)&adapter->dev_stats,
 3729             sizeof(struct ena_stats_dev));
 3730 
 3731         rc = ena_free_rx_dma_tag(adapter);
 3732         if (unlikely(rc != 0))
 3733                 ena_log(adapter->pdev, WARN,
 3734                     "Unmapped RX DMA tag associations\n");
 3735 
 3736         rc = ena_free_tx_dma_tag(adapter);
 3737         if (unlikely(rc != 0))
 3738                 ena_log(adapter->pdev, WARN,
 3739                     "Unmapped TX DMA tag associations\n");
 3740 
 3741         ena_free_irqs(adapter);
 3742 
 3743         ena_free_pci_resources(adapter);
 3744 
 3745         if (adapter->rss_indir != NULL)
 3746                 free(adapter->rss_indir, M_DEVBUF);
 3747 
 3748         if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
 3749                 ena_com_rss_destroy(ena_dev);
 3750 
 3751         ena_com_delete_host_info(ena_dev);
 3752 
 3753         if_free(adapter->ifp);
 3754 
 3755         free(ena_dev->bus, M_DEVBUF);
 3756 
 3757         free(ena_dev, M_DEVBUF);
 3758 
 3759         return (bus_generic_detach(pdev));
 3760 }
 3761 
 3762 /******************************************************************************
 3763  ******************************** AENQ Handlers *******************************
 3764  *****************************************************************************/
 3765 /**
 3766  * ena_update_on_link_change:
 3767  * Notify the network interface about the change in link status
 3768  **/
 3769 static void
 3770 ena_update_on_link_change(void *adapter_data,
 3771     struct ena_admin_aenq_entry *aenq_e)
 3772 {
 3773         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
 3774         struct ena_admin_aenq_link_change_desc *aenq_desc;
 3775         int status;
 3776         if_t ifp;
 3777 
 3778         aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
 3779         ifp = adapter->ifp;
 3780         status = aenq_desc->flags &
 3781             ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
 3782 
 3783         if (status != 0) {
 3784                 ena_log(adapter->pdev, INFO, "link is UP\n");
 3785                 ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
 3786                 if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
 3787                         if_link_state_change(ifp, LINK_STATE_UP);
 3788         } else {
 3789                 ena_log(adapter->pdev, INFO, "link is DOWN\n");
 3790                 if_link_state_change(ifp, LINK_STATE_DOWN);
 3791                 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
 3792         }
 3793 }
 3794 
 3795 static void ena_notification(void *adapter_data,
 3796     struct ena_admin_aenq_entry *aenq_e)
 3797 {
 3798         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
 3799         struct ena_admin_ena_hw_hints *hints;
 3800 
 3801         ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, adapter->ena_dev,
 3802             "Invalid group(%x) expected %x\n",  aenq_e->aenq_common_desc.group,
 3803             ENA_ADMIN_NOTIFICATION);
 3804 
 3805         switch (aenq_e->aenq_common_desc.syndrome) {
 3806         case ENA_ADMIN_UPDATE_HINTS:
 3807                 hints =
 3808                     (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
 3809                 ena_update_hints(adapter, hints);
 3810                 break;
 3811         default:
 3812                 ena_log(adapter->pdev, ERR,
 3813                     "Invalid aenq notification link state %d\n",
 3814                     aenq_e->aenq_common_desc.syndrome);
 3815         }
 3816 }
 3817 
 3818 static void
 3819 ena_lock_init(void *arg)
 3820 {
 3821         ENA_LOCK_INIT();
 3822 }
 3823 SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL);
 3824 
 3825 static void
 3826 ena_lock_uninit(void *arg)
 3827 {
 3828         ENA_LOCK_DESTROY();
 3829 }
 3830 SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL);
 3831 
 3832 /**
 3833  * This handler will called for unknown event group or unimplemented handlers
 3834  **/
 3835 static void
 3836 unimplemented_aenq_handler(void *adapter_data,
 3837     struct ena_admin_aenq_entry *aenq_e)
 3838 {
 3839         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
 3840 
 3841         ena_log(adapter->pdev, ERR,
 3842             "Unknown event was received or event with unimplemented handler\n");
 3843 }
 3844 
 3845 static struct ena_aenq_handlers aenq_handlers = {
 3846     .handlers = {
 3847             [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
 3848             [ENA_ADMIN_NOTIFICATION] = ena_notification,
 3849             [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
 3850     },
 3851     .unimplemented_handler = unimplemented_aenq_handler
 3852 };
 3853 
 3854 /*********************************************************************
 3855  *  FreeBSD Device Interface Entry Points
 3856  *********************************************************************/
 3857 
 3858 static device_method_t ena_methods[] = {
 3859     /* Device interface */
 3860     DEVMETHOD(device_probe, ena_probe),
 3861     DEVMETHOD(device_attach, ena_attach),
 3862     DEVMETHOD(device_detach, ena_detach),
 3863     DEVMETHOD_END
 3864 };
 3865 
 3866 static driver_t ena_driver = {
 3867     "ena", ena_methods, sizeof(struct ena_adapter),
 3868 };
 3869 
 3870 devclass_t ena_devclass;
 3871 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
 3872 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
 3873     nitems(ena_vendor_info_array) - 1);
 3874 MODULE_DEPEND(ena, pci, 1, 1, 1);
 3875 MODULE_DEPEND(ena, ether, 1, 1, 1);
 3876 #ifdef DEV_NETMAP
 3877 MODULE_DEPEND(ena, netmap, 1, 1, 1);
 3878 #endif /* DEV_NETMAP */
 3879 
 3880 /*********************************************************************/

Cache object: a4eee3b90e7bc23a9cb775dac9a28599


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.