The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vnic/nicvf_queues.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (C) 2015 Cavium Inc.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  *
   28  */
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include "opt_inet.h"
   33 #include "opt_inet6.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/bitset.h>
   38 #include <sys/bitstring.h>
   39 #include <sys/buf_ring.h>
   40 #include <sys/bus.h>
   41 #include <sys/endian.h>
   42 #include <sys/kernel.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/rman.h>
   46 #include <sys/pciio.h>
   47 #include <sys/pcpu.h>
   48 #include <sys/proc.h>
   49 #include <sys/sockio.h>
   50 #include <sys/socket.h>
   51 #include <sys/stdatomic.h>
   52 #include <sys/cpuset.h>
   53 #include <sys/lock.h>
   54 #include <sys/mutex.h>
   55 #include <sys/smp.h>
   56 #include <sys/taskqueue.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/pmap.h>
   60 
   61 #include <machine/bus.h>
   62 #include <machine/vmparam.h>
   63 
   64 #include <net/if.h>
   65 #include <net/if_var.h>
   66 #include <net/if_media.h>
   67 #include <net/ifq.h>
   68 #include <net/bpf.h>
   69 #include <net/ethernet.h>
   70 
   71 #include <netinet/in_systm.h>
   72 #include <netinet/in.h>
   73 #include <netinet/if_ether.h>
   74 #include <netinet/ip.h>
   75 #include <netinet/ip6.h>
   76 #include <netinet/sctp.h>
   77 #include <netinet/tcp.h>
   78 #include <netinet/tcp_lro.h>
   79 #include <netinet/udp.h>
   80 
   81 #include <netinet6/ip6_var.h>
   82 
   83 #include <dev/pci/pcireg.h>
   84 #include <dev/pci/pcivar.h>
   85 
   86 #include "thunder_bgx.h"
   87 #include "nic_reg.h"
   88 #include "nic.h"
   89 #include "q_struct.h"
   90 #include "nicvf_queues.h"
   91 
   92 #define DEBUG
   93 #undef DEBUG
   94 
   95 #ifdef DEBUG
   96 #define dprintf(dev, fmt, ...)  device_printf(dev, fmt, ##__VA_ARGS__)
   97 #else
   98 #define dprintf(dev, fmt, ...)
   99 #endif
  100 
  101 MALLOC_DECLARE(M_NICVF);
  102 
  103 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
  104 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
  105 static void nicvf_sq_disable(struct nicvf *, int);
  106 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
  107 static void nicvf_put_sq_desc(struct snd_queue *, int);
  108 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
  109     boolean_t);
  110 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
  111 
  112 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
  113 
  114 static void nicvf_rbdr_task(void *, int);
  115 static void nicvf_rbdr_task_nowait(void *, int);
  116 
  117 struct rbuf_info {
  118         bus_dma_tag_t   dmat;
  119         bus_dmamap_t    dmap;
  120         struct mbuf *   mbuf;
  121 };
  122 
  123 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
  124 
  125 /* Poll a register for a specific value */
  126 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
  127                           uint64_t reg, int bit_pos, int bits, int val)
  128 {
  129         uint64_t bit_mask;
  130         uint64_t reg_val;
  131         int timeout = 10;
  132 
  133         bit_mask = (1UL << bits) - 1;
  134         bit_mask = (bit_mask << bit_pos);
  135 
  136         while (timeout) {
  137                 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
  138                 if (((reg_val & bit_mask) >> bit_pos) == val)
  139                         return (0);
  140 
  141                 DELAY(1000);
  142                 timeout--;
  143         }
  144         device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
  145         return (ETIMEDOUT);
  146 }
  147 
  148 /* Callback for bus_dmamap_load() */
  149 static void
  150 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  151 {
  152         bus_addr_t *paddr;
  153 
  154         KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
  155         paddr = arg;
  156         *paddr = segs->ds_addr;
  157 }
  158 
  159 /* Allocate memory for a queue's descriptors */
  160 static int
  161 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
  162     int q_len, int desc_size, int align_bytes)
  163 {
  164         int err, err_dmat;
  165 
  166         /* Create DMA tag first */
  167         err = bus_dma_tag_create(
  168             bus_get_dma_tag(nic->dev),          /* parent tag */
  169             align_bytes,                        /* alignment */
  170             0,                                  /* boundary */
  171             BUS_SPACE_MAXADDR,                  /* lowaddr */
  172             BUS_SPACE_MAXADDR,                  /* highaddr */
  173             NULL, NULL,                         /* filtfunc, filtfuncarg */
  174             (q_len * desc_size),                /* maxsize */
  175             1,                                  /* nsegments */
  176             (q_len * desc_size),                /* maxsegsize */
  177             0,                                  /* flags */
  178             NULL, NULL,                         /* lockfunc, lockfuncarg */
  179             &dmem->dmat);                       /* dmat */
  180 
  181         if (err != 0) {
  182                 device_printf(nic->dev,
  183                     "Failed to create busdma tag for descriptors ring\n");
  184                 return (err);
  185         }
  186 
  187         /* Allocate segment of continuous DMA safe memory */
  188         err = bus_dmamem_alloc(
  189             dmem->dmat,                         /* DMA tag */
  190             &dmem->base,                        /* virtual address */
  191             (BUS_DMA_NOWAIT | BUS_DMA_ZERO),    /* flags */
  192             &dmem->dmap);                       /* DMA map */
  193         if (err != 0) {
  194                 device_printf(nic->dev, "Failed to allocate DMA safe memory for"
  195                     "descriptors ring\n");
  196                 goto dmamem_fail;
  197         }
  198 
  199         err = bus_dmamap_load(
  200             dmem->dmat,
  201             dmem->dmap,
  202             dmem->base,
  203             (q_len * desc_size),                /* allocation size */
  204             nicvf_dmamap_q_cb,                  /* map to DMA address cb. */
  205             &dmem->phys_base,                   /* physical address */
  206             BUS_DMA_NOWAIT);
  207         if (err != 0) {
  208                 device_printf(nic->dev,
  209                     "Cannot load DMA map of descriptors ring\n");
  210                 goto dmamap_fail;
  211         }
  212 
  213         dmem->q_len = q_len;
  214         dmem->size = (desc_size * q_len);
  215 
  216         return (0);
  217 
  218 dmamap_fail:
  219         bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
  220         dmem->phys_base = 0;
  221 dmamem_fail:
  222         err_dmat = bus_dma_tag_destroy(dmem->dmat);
  223         dmem->base = NULL;
  224         KASSERT(err_dmat == 0,
  225             ("%s: Trying to destroy BUSY DMA tag", __func__));
  226 
  227         return (err);
  228 }
  229 
  230 /* Free queue's descriptor memory */
  231 static void
  232 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
  233 {
  234         int err;
  235 
  236         if ((dmem == NULL) || (dmem->base == NULL))
  237                 return;
  238 
  239         /* Unload a map */
  240         bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
  241         bus_dmamap_unload(dmem->dmat, dmem->dmap);
  242         /* Free DMA memory */
  243         bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
  244         /* Destroy DMA tag */
  245         err = bus_dma_tag_destroy(dmem->dmat);
  246 
  247         KASSERT(err == 0,
  248             ("%s: Trying to destroy BUSY DMA tag", __func__));
  249 
  250         dmem->phys_base = 0;
  251         dmem->base = NULL;
  252 }
  253 
  254 /*
  255  * Allocate buffer for packet reception
  256  * HW returns memory address where packet is DMA'ed but not a pointer
  257  * into RBDR ring, so save buffer address at the start of fragment and
  258  * align the start address to a cache aligned address
  259  */
  260 static __inline int
  261 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
  262     bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
  263 {
  264         struct mbuf *mbuf;
  265         struct rbuf_info *rinfo;
  266         bus_dma_segment_t segs[1];
  267         int nsegs;
  268         int err;
  269 
  270         mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
  271         if (mbuf == NULL)
  272                 return (ENOMEM);
  273 
  274         /*
  275          * The length is equal to the actual length + one 128b line
  276          * used as a room for rbuf_info structure.
  277          */
  278         mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
  279 
  280         err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
  281             &nsegs, BUS_DMA_NOWAIT);
  282         if (err != 0) {
  283                 device_printf(nic->dev,
  284                     "Failed to map mbuf into DMA visible memory, err: %d\n",
  285                     err);
  286                 m_freem(mbuf);
  287                 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
  288                 return (err);
  289         }
  290         if (nsegs != 1)
  291                 panic("Unexpected number of DMA segments for RB: %d", nsegs);
  292         /*
  293          * Now use the room for rbuf_info structure
  294          * and adjust mbuf data and length.
  295          */
  296         rinfo = (struct rbuf_info *)mbuf->m_data;
  297         m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
  298 
  299         rinfo->dmat = rbdr->rbdr_buff_dmat;
  300         rinfo->dmap = dmap;
  301         rinfo->mbuf = mbuf;
  302 
  303         *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
  304 
  305         return (0);
  306 }
  307 
  308 /* Retrieve mbuf for received packet */
  309 static struct mbuf *
  310 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
  311 {
  312         struct mbuf *mbuf;
  313         struct rbuf_info *rinfo;
  314 
  315         /* Get buffer start address and alignment offset */
  316         rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
  317 
  318         /* Now retrieve mbuf to give to stack */
  319         mbuf = rinfo->mbuf;
  320         if (__predict_false(mbuf == NULL)) {
  321                 panic("%s: Received packet fragment with NULL mbuf",
  322                     device_get_nameunit(nic->dev));
  323         }
  324         /*
  325          * Clear the mbuf in the descriptor to indicate
  326          * that this slot is processed and free to use.
  327          */
  328         rinfo->mbuf = NULL;
  329 
  330         bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
  331         bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
  332 
  333         return (mbuf);
  334 }
  335 
  336 /* Allocate RBDR ring and populate receive buffers */
  337 static int
  338 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
  339     int buf_size, int qidx)
  340 {
  341         bus_dmamap_t dmap;
  342         bus_addr_t rbuf;
  343         struct rbdr_entry_t *desc;
  344         int idx;
  345         int err;
  346 
  347         /* Allocate rbdr descriptors ring */
  348         err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
  349             sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
  350         if (err != 0) {
  351                 device_printf(nic->dev,
  352                     "Failed to create RBDR descriptors ring\n");
  353                 return (err);
  354         }
  355 
  356         rbdr->desc = rbdr->dmem.base;
  357         /*
  358          * Buffer size has to be in multiples of 128 bytes.
  359          * Make room for metadata of size of one line (128 bytes).
  360          */
  361         rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
  362         rbdr->enable = TRUE;
  363         rbdr->thresh = RBDR_THRESH;
  364         rbdr->nic = nic;
  365         rbdr->idx = qidx;
  366 
  367         /*
  368          * Create DMA tag for Rx buffers.
  369          * Each map created using this tag is intended to store Rx payload for
  370          * one fragment and one header structure containing rbuf_info (thus
  371          * additional 128 byte line since RB must be a multiple of 128 byte
  372          * cache line).
  373          */
  374         if (buf_size > MCLBYTES) {
  375                 device_printf(nic->dev,
  376                     "Buffer size to large for mbuf cluster\n");
  377                 return (EINVAL);
  378         }
  379         err = bus_dma_tag_create(
  380             bus_get_dma_tag(nic->dev),          /* parent tag */
  381             NICVF_RCV_BUF_ALIGN_BYTES,          /* alignment */
  382             0,                                  /* boundary */
  383             DMAP_MAX_PHYSADDR,                  /* lowaddr */
  384             DMAP_MIN_PHYSADDR,                  /* highaddr */
  385             NULL, NULL,                         /* filtfunc, filtfuncarg */
  386             roundup2(buf_size, MCLBYTES),       /* maxsize */
  387             1,                                  /* nsegments */
  388             roundup2(buf_size, MCLBYTES),       /* maxsegsize */
  389             0,                                  /* flags */
  390             NULL, NULL,                         /* lockfunc, lockfuncarg */
  391             &rbdr->rbdr_buff_dmat);             /* dmat */
  392 
  393         if (err != 0) {
  394                 device_printf(nic->dev,
  395                     "Failed to create busdma tag for RBDR buffers\n");
  396                 return (err);
  397         }
  398 
  399         rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
  400             ring_len, M_NICVF, (M_WAITOK | M_ZERO));
  401 
  402         for (idx = 0; idx < ring_len; idx++) {
  403                 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
  404                 if (err != 0) {
  405                         device_printf(nic->dev,
  406                             "Failed to create DMA map for RB\n");
  407                         return (err);
  408                 }
  409                 rbdr->rbdr_buff_dmaps[idx] = dmap;
  410 
  411                 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
  412                     DMA_BUFFER_LEN, &rbuf);
  413                 if (err != 0)
  414                         return (err);
  415 
  416                 desc = GET_RBDR_DESC(rbdr, idx);
  417                 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
  418         }
  419 
  420         /* Allocate taskqueue */
  421         TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
  422         TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
  423         rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
  424             taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
  425         taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
  426             device_get_nameunit(nic->dev));
  427 
  428         return (0);
  429 }
  430 
  431 /* Free RBDR ring and its receive buffers */
  432 static void
  433 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
  434 {
  435         struct mbuf *mbuf;
  436         struct queue_set *qs;
  437         struct rbdr_entry_t *desc;
  438         struct rbuf_info *rinfo;
  439         bus_addr_t buf_addr;
  440         int head, tail, idx;
  441         int err;
  442 
  443         qs = nic->qs;
  444 
  445         if ((qs == NULL) || (rbdr == NULL))
  446                 return;
  447 
  448         rbdr->enable = FALSE;
  449         if (rbdr->rbdr_taskq != NULL) {
  450                 /* Remove tasks */
  451                 while (taskqueue_cancel(rbdr->rbdr_taskq,
  452                     &rbdr->rbdr_task_nowait, NULL) != 0) {
  453                         /* Finish the nowait task first */
  454                         taskqueue_drain(rbdr->rbdr_taskq,
  455                             &rbdr->rbdr_task_nowait);
  456                 }
  457                 taskqueue_free(rbdr->rbdr_taskq);
  458                 rbdr->rbdr_taskq = NULL;
  459 
  460                 while (taskqueue_cancel(taskqueue_thread,
  461                     &rbdr->rbdr_task, NULL) != 0) {
  462                         /* Now finish the sleepable task */
  463                         taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
  464                 }
  465         }
  466 
  467         /*
  468          * Free all of the memory under the RB descriptors.
  469          * There are assumptions here:
  470          * 1. Corresponding RBDR is disabled
  471          *    - it is safe to operate using head and tail indexes
  472          * 2. All bffers that were received are properly freed by
  473          *    the receive handler
  474          *    - there is no need to unload DMA map and free MBUF for other
  475          *      descriptors than unused ones
  476          */
  477         if (rbdr->rbdr_buff_dmat != NULL) {
  478                 head = rbdr->head;
  479                 tail = rbdr->tail;
  480                 while (head != tail) {
  481                         desc = GET_RBDR_DESC(rbdr, head);
  482                         buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  483                         rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
  484                         bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
  485                         mbuf = rinfo->mbuf;
  486                         /* This will destroy everything including rinfo! */
  487                         m_freem(mbuf);
  488                         head++;
  489                         head &= (rbdr->dmem.q_len - 1);
  490                 }
  491                 /* Free tail descriptor */
  492                 desc = GET_RBDR_DESC(rbdr, tail);
  493                 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  494                 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
  495                 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
  496                 mbuf = rinfo->mbuf;
  497                 /* This will destroy everything including rinfo! */
  498                 m_freem(mbuf);
  499 
  500                 /* Destroy DMA maps */
  501                 for (idx = 0; idx < qs->rbdr_len; idx++) {
  502                         if (rbdr->rbdr_buff_dmaps[idx] == NULL)
  503                                 continue;
  504                         err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
  505                             rbdr->rbdr_buff_dmaps[idx]);
  506                         KASSERT(err == 0,
  507                             ("%s: Could not destroy DMA map for RB, desc: %d",
  508                             __func__, idx));
  509                         rbdr->rbdr_buff_dmaps[idx] = NULL;
  510                 }
  511 
  512                 /* Now destroy the tag */
  513                 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
  514                 KASSERT(err == 0,
  515                     ("%s: Trying to destroy BUSY DMA tag", __func__));
  516 
  517                 rbdr->head = 0;
  518                 rbdr->tail = 0;
  519         }
  520 
  521         /* Free RBDR ring */
  522         nicvf_free_q_desc_mem(nic, &rbdr->dmem);
  523 }
  524 
  525 /*
  526  * Refill receive buffer descriptors with new buffers.
  527  */
  528 static int
  529 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
  530 {
  531         struct nicvf *nic;
  532         struct queue_set *qs;
  533         int rbdr_idx;
  534         int tail, qcount;
  535         int refill_rb_cnt;
  536         struct rbdr_entry_t *desc;
  537         bus_dmamap_t dmap;
  538         bus_addr_t rbuf;
  539         boolean_t rb_alloc_fail;
  540         int new_rb;
  541 
  542         rb_alloc_fail = TRUE;
  543         new_rb = 0;
  544         nic = rbdr->nic;
  545         qs = nic->qs;
  546         rbdr_idx = rbdr->idx;
  547 
  548         /* Check if it's enabled */
  549         if (!rbdr->enable)
  550                 return (0);
  551 
  552         /* Get no of desc's to be refilled */
  553         qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
  554         qcount &= 0x7FFFF;
  555         /* Doorbell can be ringed with a max of ring size minus 1 */
  556         if (qcount >= (qs->rbdr_len - 1)) {
  557                 rb_alloc_fail = FALSE;
  558                 goto out;
  559         } else
  560                 refill_rb_cnt = qs->rbdr_len - qcount - 1;
  561 
  562         /* Start filling descs from tail */
  563         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
  564         while (refill_rb_cnt) {
  565                 tail++;
  566                 tail &= (rbdr->dmem.q_len - 1);
  567 
  568                 dmap = rbdr->rbdr_buff_dmaps[tail];
  569                 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
  570                     DMA_BUFFER_LEN, &rbuf)) {
  571                         /* Something went wrong. Resign */
  572                         break;
  573                 }
  574                 desc = GET_RBDR_DESC(rbdr, tail);
  575                 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
  576                 refill_rb_cnt--;
  577                 new_rb++;
  578         }
  579 
  580         /* make sure all memory stores are done before ringing doorbell */
  581         wmb();
  582 
  583         /* Check if buffer allocation failed */
  584         if (refill_rb_cnt == 0)
  585                 rb_alloc_fail = FALSE;
  586 
  587         /* Notify HW */
  588         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
  589                               rbdr_idx, new_rb);
  590 out:
  591         if (!rb_alloc_fail) {
  592                 /*
  593                  * Re-enable RBDR interrupts only
  594                  * if buffer allocation is success.
  595                  */
  596                 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
  597 
  598                 return (0);
  599         }
  600 
  601         return (ENOMEM);
  602 }
  603 
  604 /* Refill RBs even if sleep is needed to reclaim memory */
  605 static void
  606 nicvf_rbdr_task(void *arg, int pending)
  607 {
  608         struct rbdr *rbdr;
  609         int err;
  610 
  611         rbdr = (struct rbdr *)arg;
  612 
  613         err = nicvf_refill_rbdr(rbdr, M_WAITOK);
  614         if (__predict_false(err != 0)) {
  615                 panic("%s: Failed to refill RBs even when sleep enabled",
  616                     __func__);
  617         }
  618 }
  619 
  620 /* Refill RBs as soon as possible without waiting */
  621 static void
  622 nicvf_rbdr_task_nowait(void *arg, int pending)
  623 {
  624         struct rbdr *rbdr;
  625         int err;
  626 
  627         rbdr = (struct rbdr *)arg;
  628 
  629         err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
  630         if (err != 0) {
  631                 /*
  632                  * Schedule another, sleepable kernel thread
  633                  * that will for sure refill the buffers.
  634                  */
  635                 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
  636         }
  637 }
  638 
  639 static int
  640 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
  641     struct cqe_rx_t *cqe_rx, int cqe_type)
  642 {
  643         struct mbuf *mbuf;
  644         struct rcv_queue *rq;
  645         int rq_idx;
  646         int err = 0;
  647 
  648         rq_idx = cqe_rx->rq_idx;
  649         rq = &nic->qs->rq[rq_idx];
  650 
  651         /* Check for errors */
  652         err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
  653         if (err && !cqe_rx->rb_cnt)
  654                 return (0);
  655 
  656         mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
  657         if (mbuf == NULL) {
  658                 dprintf(nic->dev, "Packet not received\n");
  659                 return (0);
  660         }
  661 
  662         /* If error packet */
  663         if (err != 0) {
  664                 m_freem(mbuf);
  665                 return (0);
  666         }
  667 
  668         if (rq->lro_enabled &&
  669             ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
  670             (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
  671             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
  672                 /*
  673                  * At this point it is known that there are no errors in the
  674                  * packet. Attempt to LRO enqueue. Send to stack if no resources
  675                  * or enqueue error.
  676                  */
  677                 if ((rq->lro.lro_cnt != 0) &&
  678                     (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
  679                         return (0);
  680         }
  681         /*
  682          * Push this packet to the stack later to avoid
  683          * unlocking completion task in the middle of work.
  684          */
  685         err = buf_ring_enqueue(cq->rx_br, mbuf);
  686         if (err != 0) {
  687                 /*
  688                  * Failed to enqueue this mbuf.
  689                  * We don't drop it, just schedule another task.
  690                  */
  691                 return (err);
  692         }
  693 
  694         return (0);
  695 }
  696 
  697 static void
  698 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
  699     struct cqe_send_t *cqe_tx, int cqe_type)
  700 {
  701         bus_dmamap_t dmap;
  702         struct mbuf *mbuf;
  703         struct snd_queue *sq;
  704         struct sq_hdr_subdesc *hdr;
  705 
  706         mbuf = NULL;
  707         sq = &nic->qs->sq[cqe_tx->sq_idx];
  708 
  709         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
  710         if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
  711                 return;
  712 
  713         dprintf(nic->dev,
  714             "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
  715             __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
  716             cqe_tx->sqe_ptr, hdr->subdesc_cnt);
  717 
  718         dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
  719         bus_dmamap_unload(sq->snd_buff_dmat, dmap);
  720 
  721         mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
  722         if (mbuf != NULL) {
  723                 m_freem(mbuf);
  724                 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
  725                 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
  726         }
  727 
  728         nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
  729 }
  730 
  731 static int
  732 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
  733 {
  734         struct mbuf *mbuf;
  735         struct ifnet *ifp;
  736         int processed_cqe, work_done = 0, tx_done = 0;
  737         int cqe_count, cqe_head;
  738         struct queue_set *qs = nic->qs;
  739         struct cmp_queue *cq = &qs->cq[cq_idx];
  740         struct snd_queue *sq = &qs->sq[cq_idx];
  741         struct rcv_queue *rq;
  742         struct cqe_rx_t *cq_desc;
  743         struct lro_ctrl *lro;
  744         int rq_idx;
  745         int cmp_err;
  746 
  747         NICVF_CMP_LOCK(cq);
  748         cmp_err = 0;
  749         processed_cqe = 0;
  750         /* Get no of valid CQ entries to process */
  751         cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
  752         cqe_count &= CQ_CQE_COUNT;
  753         if (cqe_count == 0)
  754                 goto out;
  755 
  756         /* Get head of the valid CQ entries */
  757         cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
  758         cqe_head &= 0xFFFF;
  759 
  760         dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
  761             __func__, cq_idx, cqe_count, cqe_head);
  762         while (processed_cqe < cqe_count) {
  763                 /* Get the CQ descriptor */
  764                 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
  765                 cqe_head++;
  766                 cqe_head &= (cq->dmem.q_len - 1);
  767                 /* Prefetch next CQ descriptor */
  768                 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
  769 
  770                 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
  771                     cq_desc->cqe_type);
  772                 switch (cq_desc->cqe_type) {
  773                 case CQE_TYPE_RX:
  774                         cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
  775                             CQE_TYPE_RX);
  776                         if (__predict_false(cmp_err != 0)) {
  777                                 /*
  778                                  * Ups. Cannot finish now.
  779                                  * Let's try again later.
  780                                  */
  781                                 goto done;
  782                         }
  783                         work_done++;
  784                         break;
  785                 case CQE_TYPE_SEND:
  786                         nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
  787                             CQE_TYPE_SEND);
  788                         tx_done++;
  789                         break;
  790                 case CQE_TYPE_INVALID:
  791                 case CQE_TYPE_RX_SPLIT:
  792                 case CQE_TYPE_RX_TCP:
  793                 case CQE_TYPE_SEND_PTP:
  794                         /* Ignore for now */
  795                         break;
  796                 }
  797                 processed_cqe++;
  798         }
  799 done:
  800         dprintf(nic->dev,
  801             "%s CQ%d processed_cqe %d work_done %d\n",
  802             __func__, cq_idx, processed_cqe, work_done);
  803 
  804         /* Ring doorbell to inform H/W to reuse processed CQEs */
  805         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
  806 
  807         if ((tx_done > 0) &&
  808             ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
  809                 /* Reenable TXQ if its stopped earlier due to SQ full */
  810                 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
  811                 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
  812         }
  813 out:
  814         /*
  815          * Flush any outstanding LRO work
  816          */
  817         rq_idx = cq_idx;
  818         rq = &nic->qs->rq[rq_idx];
  819         lro = &rq->lro;
  820         tcp_lro_flush_all(lro);
  821 
  822         NICVF_CMP_UNLOCK(cq);
  823 
  824         ifp = nic->ifp;
  825         /* Push received MBUFs to the stack */
  826         while (!buf_ring_empty(cq->rx_br)) {
  827                 mbuf = buf_ring_dequeue_mc(cq->rx_br);
  828                 if (__predict_true(mbuf != NULL))
  829                         (*ifp->if_input)(ifp, mbuf);
  830         }
  831 
  832         return (cmp_err);
  833 }
  834 
  835 /*
  836  * Qset error interrupt handler
  837  *
  838  * As of now only CQ errors are handled
  839  */
  840 static void
  841 nicvf_qs_err_task(void *arg, int pending)
  842 {
  843         struct nicvf *nic;
  844         struct queue_set *qs;
  845         int qidx;
  846         uint64_t status;
  847         boolean_t enable = TRUE;
  848 
  849         nic = (struct nicvf *)arg;
  850         qs = nic->qs;
  851 
  852         /* Deactivate network interface */
  853         if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
  854 
  855         /* Check if it is CQ err */
  856         for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
  857                 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
  858                     qidx);
  859                 if ((status & CQ_ERR_MASK) == 0)
  860                         continue;
  861                 /* Process already queued CQEs and reconfig CQ */
  862                 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
  863                 nicvf_sq_disable(nic, qidx);
  864                 (void)nicvf_cq_intr_handler(nic, qidx);
  865                 nicvf_cmp_queue_config(nic, qs, qidx, enable);
  866                 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
  867                 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
  868                 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
  869         }
  870 
  871         if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
  872         /* Re-enable Qset error interrupt */
  873         nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
  874 }
  875 
  876 static void
  877 nicvf_cmp_task(void *arg, int pending)
  878 {
  879         struct cmp_queue *cq;
  880         struct nicvf *nic;
  881         int cmp_err;
  882 
  883         cq = (struct cmp_queue *)arg;
  884         nic = cq->nic;
  885 
  886         /* Handle CQ descriptors */
  887         cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
  888         if (__predict_false(cmp_err != 0)) {
  889                 /*
  890                  * Schedule another thread here since we did not
  891                  * process the entire CQ due to Tx or Rx CQ parse error.
  892                  */
  893                 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
  894 
  895         }
  896 
  897         nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
  898         /* Reenable interrupt (previously disabled in nicvf_intr_handler() */
  899         nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
  900 
  901 }
  902 
  903 /* Initialize completion queue */
  904 static int
  905 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
  906     int qidx)
  907 {
  908         int err;
  909 
  910         /* Initizalize lock */
  911         snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
  912             device_get_nameunit(nic->dev), qidx);
  913         mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
  914 
  915         err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
  916                                      NICVF_CQ_BASE_ALIGN_BYTES);
  917 
  918         if (err != 0) {
  919                 device_printf(nic->dev,
  920                     "Could not allocate DMA memory for CQ\n");
  921                 return (err);
  922         }
  923 
  924         cq->desc = cq->dmem.base;
  925         cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
  926         cq->nic = nic;
  927         cq->idx = qidx;
  928         nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
  929 
  930         cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
  931             &cq->mtx);
  932 
  933         /* Allocate taskqueue */
  934         TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
  935         cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
  936             taskqueue_thread_enqueue, &cq->cmp_taskq);
  937         taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
  938             device_get_nameunit(nic->dev), qidx);
  939 
  940         return (0);
  941 }
  942 
  943 static void
  944 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
  945 {
  946 
  947         if (cq == NULL)
  948                 return;
  949         /*
  950          * The completion queue itself should be disabled by now
  951          * (ref. nicvf_snd_queue_config()).
  952          * Ensure that it is safe to disable it or panic.
  953          */
  954         if (cq->enable)
  955                 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
  956 
  957         if (cq->cmp_taskq != NULL) {
  958                 /* Remove task */
  959                 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
  960                         taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
  961 
  962                 taskqueue_free(cq->cmp_taskq);
  963                 cq->cmp_taskq = NULL;
  964         }
  965         /*
  966          * Completion interrupt will possibly enable interrupts again
  967          * so disable interrupting now after we finished processing
  968          * completion task. It is safe to do so since the corresponding CQ
  969          * was already disabled.
  970          */
  971         nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
  972         nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
  973 
  974         NICVF_CMP_LOCK(cq);
  975         nicvf_free_q_desc_mem(nic, &cq->dmem);
  976         drbr_free(cq->rx_br, M_DEVBUF);
  977         NICVF_CMP_UNLOCK(cq);
  978         mtx_destroy(&cq->mtx);
  979         memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
  980 }
  981 
  982 int
  983 nicvf_xmit_locked(struct snd_queue *sq)
  984 {
  985         struct nicvf *nic;
  986         struct ifnet *ifp;
  987         struct mbuf *next;
  988         int err;
  989 
  990         NICVF_TX_LOCK_ASSERT(sq);
  991 
  992         nic = sq->nic;
  993         ifp = nic->ifp;
  994         err = 0;
  995 
  996         while ((next = drbr_peek(ifp, sq->br)) != NULL) {
  997                 /* Send a copy of the frame to the BPF listener */
  998                 ETHER_BPF_MTAP(ifp, next);
  999 
 1000                 err = nicvf_tx_mbuf_locked(sq, &next);
 1001                 if (err != 0) {
 1002                         if (next == NULL)
 1003                                 drbr_advance(ifp, sq->br);
 1004                         else
 1005                                 drbr_putback(ifp, sq->br, next);
 1006 
 1007                         break;
 1008                 }
 1009                 drbr_advance(ifp, sq->br);
 1010         }
 1011         return (err);
 1012 }
 1013 
 1014 static void
 1015 nicvf_snd_task(void *arg, int pending)
 1016 {
 1017         struct snd_queue *sq = (struct snd_queue *)arg;
 1018         struct nicvf *nic;
 1019         struct ifnet *ifp;
 1020         int err;
 1021 
 1022         nic = sq->nic;
 1023         ifp = nic->ifp;
 1024 
 1025         /*
 1026          * Skip sending anything if the driver is not running,
 1027          * SQ full or link is down.
 1028          */
 1029         if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1030             IFF_DRV_RUNNING) || !nic->link_up)
 1031                 return;
 1032 
 1033         NICVF_TX_LOCK(sq);
 1034         err = nicvf_xmit_locked(sq);
 1035         NICVF_TX_UNLOCK(sq);
 1036         /* Try again */
 1037         if (err != 0)
 1038                 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
 1039 }
 1040 
 1041 /* Initialize transmit queue */
 1042 static int
 1043 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
 1044     int qidx)
 1045 {
 1046         size_t i;
 1047         int err;
 1048 
 1049         /* Initizalize TX lock for this queue */
 1050         snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
 1051             device_get_nameunit(nic->dev), qidx);
 1052         mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
 1053 
 1054         NICVF_TX_LOCK(sq);
 1055         /* Allocate buffer ring */
 1056         sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
 1057             M_NOWAIT, &sq->mtx);
 1058         if (sq->br == NULL) {
 1059                 device_printf(nic->dev,
 1060                     "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
 1061                 err = ENOMEM;
 1062                 goto error;
 1063         }
 1064 
 1065         /* Allocate DMA memory for Tx descriptors */
 1066         err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
 1067                                      NICVF_SQ_BASE_ALIGN_BYTES);
 1068         if (err != 0) {
 1069                 device_printf(nic->dev,
 1070                     "Could not allocate DMA memory for SQ\n");
 1071                 goto error;
 1072         }
 1073 
 1074         sq->desc = sq->dmem.base;
 1075         sq->head = sq->tail = 0;
 1076         atomic_store_rel_int(&sq->free_cnt, q_len - 1);
 1077         sq->thresh = SND_QUEUE_THRESH;
 1078         sq->idx = qidx;
 1079         sq->nic = nic;
 1080 
 1081         /*
 1082          * Allocate DMA maps for Tx buffers
 1083          */
 1084 
 1085         /* Create DMA tag first */
 1086         err = bus_dma_tag_create(
 1087             bus_get_dma_tag(nic->dev),          /* parent tag */
 1088             1,                                  /* alignment */
 1089             0,                                  /* boundary */
 1090             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1091             BUS_SPACE_MAXADDR,                  /* highaddr */
 1092             NULL, NULL,                         /* filtfunc, filtfuncarg */
 1093             NICVF_TSO_MAXSIZE,                  /* maxsize */
 1094             NICVF_TSO_NSEGS,                    /* nsegments */
 1095             MCLBYTES,                           /* maxsegsize */
 1096             0,                                  /* flags */
 1097             NULL, NULL,                         /* lockfunc, lockfuncarg */
 1098             &sq->snd_buff_dmat);                /* dmat */
 1099 
 1100         if (err != 0) {
 1101                 device_printf(nic->dev,
 1102                     "Failed to create busdma tag for Tx buffers\n");
 1103                 goto error;
 1104         }
 1105 
 1106         /* Allocate send buffers array */
 1107         sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
 1108             (M_NOWAIT | M_ZERO));
 1109         if (sq->snd_buff == NULL) {
 1110                 device_printf(nic->dev,
 1111                     "Could not allocate memory for Tx buffers array\n");
 1112                 err = ENOMEM;
 1113                 goto error;
 1114         }
 1115 
 1116         /* Now populate maps */
 1117         for (i = 0; i < q_len; i++) {
 1118                 err = bus_dmamap_create(sq->snd_buff_dmat, 0,
 1119                     &sq->snd_buff[i].dmap);
 1120                 if (err != 0) {
 1121                         device_printf(nic->dev,
 1122                             "Failed to create DMA maps for Tx buffers\n");
 1123                         goto error;
 1124                 }
 1125         }
 1126         NICVF_TX_UNLOCK(sq);
 1127 
 1128         /* Allocate taskqueue */
 1129         TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
 1130         sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
 1131             taskqueue_thread_enqueue, &sq->snd_taskq);
 1132         taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
 1133             device_get_nameunit(nic->dev), qidx);
 1134 
 1135         return (0);
 1136 error:
 1137         NICVF_TX_UNLOCK(sq);
 1138         return (err);
 1139 }
 1140 
 1141 static void
 1142 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 1143 {
 1144         struct queue_set *qs = nic->qs;
 1145         size_t i;
 1146         int err;
 1147 
 1148         if (sq == NULL)
 1149                 return;
 1150 
 1151         if (sq->snd_taskq != NULL) {
 1152                 /* Remove task */
 1153                 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
 1154                         taskqueue_drain(sq->snd_taskq, &sq->snd_task);
 1155 
 1156                 taskqueue_free(sq->snd_taskq);
 1157                 sq->snd_taskq = NULL;
 1158         }
 1159 
 1160         NICVF_TX_LOCK(sq);
 1161         if (sq->snd_buff_dmat != NULL) {
 1162                 if (sq->snd_buff != NULL) {
 1163                         for (i = 0; i < qs->sq_len; i++) {
 1164                                 m_freem(sq->snd_buff[i].mbuf);
 1165                                 sq->snd_buff[i].mbuf = NULL;
 1166 
 1167                                 bus_dmamap_unload(sq->snd_buff_dmat,
 1168                                     sq->snd_buff[i].dmap);
 1169                                 err = bus_dmamap_destroy(sq->snd_buff_dmat,
 1170                                     sq->snd_buff[i].dmap);
 1171                                 /*
 1172                                  * If bus_dmamap_destroy fails it can cause
 1173                                  * random panic later if the tag is also
 1174                                  * destroyed in the process.
 1175                                  */
 1176                                 KASSERT(err == 0,
 1177                                     ("%s: Could not destroy DMA map for SQ",
 1178                                     __func__));
 1179                         }
 1180                 }
 1181 
 1182                 free(sq->snd_buff, M_NICVF);
 1183 
 1184                 err = bus_dma_tag_destroy(sq->snd_buff_dmat);
 1185                 KASSERT(err == 0,
 1186                     ("%s: Trying to destroy BUSY DMA tag", __func__));
 1187         }
 1188 
 1189         /* Free private driver ring for this send queue */
 1190         if (sq->br != NULL)
 1191                 drbr_free(sq->br, M_DEVBUF);
 1192 
 1193         if (sq->dmem.base != NULL)
 1194                 nicvf_free_q_desc_mem(nic, &sq->dmem);
 1195 
 1196         NICVF_TX_UNLOCK(sq);
 1197         /* Destroy Tx lock */
 1198         mtx_destroy(&sq->mtx);
 1199         memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
 1200 }
 1201 
 1202 static void
 1203 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1204 {
 1205 
 1206         /* Disable send queue */
 1207         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
 1208         /* Check if SQ is stopped */
 1209         if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
 1210                 return;
 1211         /* Reset send queue */
 1212         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 1213 }
 1214 
 1215 static void
 1216 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1217 {
 1218         union nic_mbx mbx = {};
 1219 
 1220         /* Make sure all packets in the pipeline are written back into mem */
 1221         mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
 1222         nicvf_send_msg_to_pf(nic, &mbx);
 1223 }
 1224 
 1225 static void
 1226 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1227 {
 1228 
 1229         /* Disable timer threshold (doesn't get reset upon CQ reset */
 1230         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
 1231         /* Disable completion queue */
 1232         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
 1233         /* Reset completion queue */
 1234         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 1235 }
 1236 
 1237 static void
 1238 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
 1239 {
 1240         uint64_t tmp, fifo_state;
 1241         int timeout = 10;
 1242 
 1243         /* Save head and tail pointers for feeing up buffers */
 1244         rbdr->head =
 1245             nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
 1246         rbdr->tail =
 1247             nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
 1248 
 1249         /*
 1250          * If RBDR FIFO is in 'FAIL' state then do a reset first
 1251          * before relaiming.
 1252          */
 1253         fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
 1254         if (((fifo_state >> 62) & 0x03) == 0x3) {
 1255                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
 1256                     qidx, NICVF_RBDR_RESET);
 1257         }
 1258 
 1259         /* Disable RBDR */
 1260         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
 1261         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 1262                 return;
 1263         while (1) {
 1264                 tmp = nicvf_queue_reg_read(nic,
 1265                     NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
 1266                 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
 1267                         break;
 1268 
 1269                 DELAY(1000);
 1270                 timeout--;
 1271                 if (!timeout) {
 1272                         device_printf(nic->dev,
 1273                             "Failed polling on prefetch status\n");
 1274                         return;
 1275                 }
 1276         }
 1277         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
 1278             NICVF_RBDR_RESET);
 1279 
 1280         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
 1281                 return;
 1282         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
 1283         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 1284                 return;
 1285 }
 1286 
 1287 /* Configures receive queue */
 1288 static void
 1289 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 1290     int qidx, bool enable)
 1291 {
 1292         union nic_mbx mbx = {};
 1293         struct rcv_queue *rq;
 1294         struct rq_cfg rq_cfg;
 1295         struct ifnet *ifp;
 1296         struct lro_ctrl *lro;
 1297 
 1298         ifp = nic->ifp;
 1299 
 1300         rq = &qs->rq[qidx];
 1301         rq->enable = enable;
 1302 
 1303         lro = &rq->lro;
 1304 
 1305         /* Disable receive queue */
 1306         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
 1307 
 1308         if (!rq->enable) {
 1309                 nicvf_reclaim_rcv_queue(nic, qs, qidx);
 1310                 /* Free LRO memory */
 1311                 tcp_lro_free(lro);
 1312                 rq->lro_enabled = FALSE;
 1313                 return;
 1314         }
 1315 
 1316         /* Configure LRO if enabled */
 1317         rq->lro_enabled = FALSE;
 1318         if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
 1319                 if (tcp_lro_init(lro) != 0) {
 1320                         device_printf(nic->dev,
 1321                             "Failed to initialize LRO for RXQ%d\n", qidx);
 1322                 } else {
 1323                         rq->lro_enabled = TRUE;
 1324                         lro->ifp = nic->ifp;
 1325                 }
 1326         }
 1327 
 1328         rq->cq_qs = qs->vnic_id;
 1329         rq->cq_idx = qidx;
 1330         rq->start_rbdr_qs = qs->vnic_id;
 1331         rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
 1332         rq->cont_rbdr_qs = qs->vnic_id;
 1333         rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
 1334         /* all writes of RBDR data to be loaded into L2 Cache as well*/
 1335         rq->caching = 1;
 1336 
 1337         /* Send a mailbox msg to PF to config RQ */
 1338         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
 1339         mbx.rq.qs_num = qs->vnic_id;
 1340         mbx.rq.rq_num = qidx;
 1341         mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
 1342             (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
 1343             (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
 1344             (rq->start_qs_rbdr_idx);
 1345         nicvf_send_msg_to_pf(nic, &mbx);
 1346 
 1347         mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
 1348         mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
 1349         nicvf_send_msg_to_pf(nic, &mbx);
 1350 
 1351         /*
 1352          * RQ drop config
 1353          * Enable CQ drop to reserve sufficient CQEs for all tx packets
 1354          */
 1355         mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
 1356         mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
 1357         nicvf_send_msg_to_pf(nic, &mbx);
 1358 
 1359         nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
 1360 
 1361         /* Enable Receive queue */
 1362         rq_cfg.ena = 1;
 1363         rq_cfg.tcp_ena = 0;
 1364         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
 1365             *(uint64_t *)&rq_cfg);
 1366 }
 1367 
 1368 /* Configures completion queue */
 1369 static void
 1370 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
 1371     int qidx, boolean_t enable)
 1372 {
 1373         struct cmp_queue *cq;
 1374         struct cq_cfg cq_cfg;
 1375 
 1376         cq = &qs->cq[qidx];
 1377         cq->enable = enable;
 1378 
 1379         if (!cq->enable) {
 1380                 nicvf_reclaim_cmp_queue(nic, qs, qidx);
 1381                 return;
 1382         }
 1383 
 1384         /* Reset completion queue */
 1385         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 1386 
 1387         /* Set completion queue base address */
 1388         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
 1389             (uint64_t)(cq->dmem.phys_base));
 1390 
 1391         /* Enable Completion queue */
 1392         cq_cfg.ena = 1;
 1393         cq_cfg.reset = 0;
 1394         cq_cfg.caching = 0;
 1395         cq_cfg.qsize = CMP_QSIZE;
 1396         cq_cfg.avg_con = 0;
 1397         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
 1398 
 1399         /* Set threshold value for interrupt generation */
 1400         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
 1401         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
 1402             nic->cq_coalesce_usecs);
 1403 }
 1404 
 1405 /* Configures transmit queue */
 1406 static void
 1407 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
 1408     boolean_t enable)
 1409 {
 1410         union nic_mbx mbx = {};
 1411         struct snd_queue *sq;
 1412         struct sq_cfg sq_cfg;
 1413 
 1414         sq = &qs->sq[qidx];
 1415         sq->enable = enable;
 1416 
 1417         if (!sq->enable) {
 1418                 nicvf_reclaim_snd_queue(nic, qs, qidx);
 1419                 return;
 1420         }
 1421 
 1422         /* Reset send queue */
 1423         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 1424 
 1425         sq->cq_qs = qs->vnic_id;
 1426         sq->cq_idx = qidx;
 1427 
 1428         /* Send a mailbox msg to PF to config SQ */
 1429         mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
 1430         mbx.sq.qs_num = qs->vnic_id;
 1431         mbx.sq.sq_num = qidx;
 1432         mbx.sq.sqs_mode = nic->sqs_mode;
 1433         mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
 1434         nicvf_send_msg_to_pf(nic, &mbx);
 1435 
 1436         /* Set queue base address */
 1437         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
 1438             (uint64_t)(sq->dmem.phys_base));
 1439 
 1440         /* Enable send queue  & set queue size */
 1441         sq_cfg.ena = 1;
 1442         sq_cfg.reset = 0;
 1443         sq_cfg.ldwb = 0;
 1444         sq_cfg.qsize = SND_QSIZE;
 1445         sq_cfg.tstmp_bgx_intf = 0;
 1446         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
 1447 
 1448         /* Set threshold value for interrupt generation */
 1449         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
 1450 }
 1451 
 1452 /* Configures receive buffer descriptor ring */
 1453 static void
 1454 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
 1455     boolean_t enable)
 1456 {
 1457         struct rbdr *rbdr;
 1458         struct rbdr_cfg rbdr_cfg;
 1459 
 1460         rbdr = &qs->rbdr[qidx];
 1461         nicvf_reclaim_rbdr(nic, rbdr, qidx);
 1462         if (!enable)
 1463                 return;
 1464 
 1465         /* Set descriptor base address */
 1466         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
 1467             (uint64_t)(rbdr->dmem.phys_base));
 1468 
 1469         /* Enable RBDR  & set queue size */
 1470         /* Buffer size should be in multiples of 128 bytes */
 1471         rbdr_cfg.ena = 1;
 1472         rbdr_cfg.reset = 0;
 1473         rbdr_cfg.ldwb = 0;
 1474         rbdr_cfg.qsize = RBDR_SIZE;
 1475         rbdr_cfg.avg_con = 0;
 1476         rbdr_cfg.lines = rbdr->dma_size / 128;
 1477         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
 1478             *(uint64_t *)&rbdr_cfg);
 1479 
 1480         /* Notify HW */
 1481         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
 1482             qs->rbdr_len - 1);
 1483 
 1484         /* Set threshold value for interrupt generation */
 1485         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
 1486             rbdr->thresh - 1);
 1487 }
 1488 
 1489 /* Requests PF to assign and enable Qset */
 1490 void
 1491 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
 1492 {
 1493         union nic_mbx mbx = {};
 1494         struct queue_set *qs;
 1495         struct qs_cfg *qs_cfg;
 1496 
 1497         qs = nic->qs;
 1498         if (qs == NULL) {
 1499                 device_printf(nic->dev,
 1500                     "Qset is still not allocated, don't init queues\n");
 1501                 return;
 1502         }
 1503 
 1504         qs->enable = enable;
 1505         qs->vnic_id = nic->vf_id;
 1506 
 1507         /* Send a mailbox msg to PF to config Qset */
 1508         mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
 1509         mbx.qs.num = qs->vnic_id;
 1510 
 1511         mbx.qs.cfg = 0;
 1512         qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
 1513         if (qs->enable) {
 1514                 qs_cfg->ena = 1;
 1515                 qs_cfg->vnic = qs->vnic_id;
 1516         }
 1517         nicvf_send_msg_to_pf(nic, &mbx);
 1518 }
 1519 
 1520 static void
 1521 nicvf_free_resources(struct nicvf *nic)
 1522 {
 1523         int qidx;
 1524         struct queue_set *qs;
 1525 
 1526         qs = nic->qs;
 1527         /*
 1528          * Remove QS error task first since it has to be dead
 1529          * to safely free completion queue tasks.
 1530          */
 1531         if (qs->qs_err_taskq != NULL) {
 1532                 /* Shut down QS error tasks */
 1533                 while (taskqueue_cancel(qs->qs_err_taskq,
 1534                     &qs->qs_err_task,  NULL) != 0) {
 1535                         taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
 1536 
 1537                 }
 1538                 taskqueue_free(qs->qs_err_taskq);
 1539                 qs->qs_err_taskq = NULL;
 1540         }
 1541         /* Free receive buffer descriptor ring */
 1542         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1543                 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
 1544 
 1545         /* Free completion queue */
 1546         for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1547                 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
 1548 
 1549         /* Free send queue */
 1550         for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1551                 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
 1552 }
 1553 
 1554 static int
 1555 nicvf_alloc_resources(struct nicvf *nic)
 1556 {
 1557         struct queue_set *qs = nic->qs;
 1558         int qidx;
 1559 
 1560         /* Alloc receive buffer descriptor ring */
 1561         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
 1562                 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
 1563                                     DMA_BUFFER_LEN, qidx))
 1564                         goto alloc_fail;
 1565         }
 1566 
 1567         /* Alloc send queue */
 1568         for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
 1569                 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
 1570                         goto alloc_fail;
 1571         }
 1572 
 1573         /* Alloc completion queue */
 1574         for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 1575                 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
 1576                         goto alloc_fail;
 1577         }
 1578 
 1579         /* Allocate QS error taskqueue */
 1580         TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
 1581         qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
 1582             taskqueue_thread_enqueue, &qs->qs_err_taskq);
 1583         taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
 1584             device_get_nameunit(nic->dev));
 1585 
 1586         return (0);
 1587 alloc_fail:
 1588         nicvf_free_resources(nic);
 1589         return (ENOMEM);
 1590 }
 1591 
 1592 int
 1593 nicvf_set_qset_resources(struct nicvf *nic)
 1594 {
 1595         struct queue_set *qs;
 1596 
 1597         qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
 1598         nic->qs = qs;
 1599 
 1600         /* Set count of each queue */
 1601         qs->rbdr_cnt = RBDR_CNT;
 1602         qs->rq_cnt = RCV_QUEUE_CNT;
 1603 
 1604         qs->sq_cnt = SND_QUEUE_CNT;
 1605         qs->cq_cnt = CMP_QUEUE_CNT;
 1606 
 1607         /* Set queue lengths */
 1608         qs->rbdr_len = RCV_BUF_COUNT;
 1609         qs->sq_len = SND_QUEUE_LEN;
 1610         qs->cq_len = CMP_QUEUE_LEN;
 1611 
 1612         nic->rx_queues = qs->rq_cnt;
 1613         nic->tx_queues = qs->sq_cnt;
 1614 
 1615         return (0);
 1616 }
 1617 
 1618 int
 1619 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
 1620 {
 1621         boolean_t disable = FALSE;
 1622         struct queue_set *qs;
 1623         int qidx;
 1624 
 1625         qs = nic->qs;
 1626         if (qs == NULL)
 1627                 return (0);
 1628 
 1629         if (enable) {
 1630                 if (nicvf_alloc_resources(nic) != 0)
 1631                         return (ENOMEM);
 1632 
 1633                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1634                         nicvf_snd_queue_config(nic, qs, qidx, enable);
 1635                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1636                         nicvf_cmp_queue_config(nic, qs, qidx, enable);
 1637                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1638                         nicvf_rbdr_config(nic, qs, qidx, enable);
 1639                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 1640                         nicvf_rcv_queue_config(nic, qs, qidx, enable);
 1641         } else {
 1642                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 1643                         nicvf_rcv_queue_config(nic, qs, qidx, disable);
 1644                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1645                         nicvf_rbdr_config(nic, qs, qidx, disable);
 1646                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1647                         nicvf_snd_queue_config(nic, qs, qidx, disable);
 1648                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1649                         nicvf_cmp_queue_config(nic, qs, qidx, disable);
 1650 
 1651                 nicvf_free_resources(nic);
 1652         }
 1653 
 1654         return (0);
 1655 }
 1656 
 1657 /*
 1658  * Get a free desc from SQ
 1659  * returns descriptor ponter & descriptor number
 1660  */
 1661 static __inline int
 1662 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
 1663 {
 1664         int qentry;
 1665 
 1666         qentry = sq->tail;
 1667         atomic_subtract_int(&sq->free_cnt, desc_cnt);
 1668         sq->tail += desc_cnt;
 1669         sq->tail &= (sq->dmem.q_len - 1);
 1670 
 1671         return (qentry);
 1672 }
 1673 
 1674 /* Free descriptor back to SQ for future use */
 1675 static void
 1676 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
 1677 {
 1678 
 1679         atomic_add_int(&sq->free_cnt, desc_cnt);
 1680         sq->head += desc_cnt;
 1681         sq->head &= (sq->dmem.q_len - 1);
 1682 }
 1683 
 1684 static __inline int
 1685 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
 1686 {
 1687         qentry++;
 1688         qentry &= (sq->dmem.q_len - 1);
 1689         return (qentry);
 1690 }
 1691 
 1692 static void
 1693 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
 1694 {
 1695         uint64_t sq_cfg;
 1696 
 1697         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 1698         sq_cfg |= NICVF_SQ_EN;
 1699         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 1700         /* Ring doorbell so that H/W restarts processing SQEs */
 1701         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
 1702 }
 1703 
 1704 static void
 1705 nicvf_sq_disable(struct nicvf *nic, int qidx)
 1706 {
 1707         uint64_t sq_cfg;
 1708 
 1709         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 1710         sq_cfg &= ~NICVF_SQ_EN;
 1711         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 1712 }
 1713 
 1714 static void
 1715 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
 1716 {
 1717         uint64_t head;
 1718         struct snd_buff *snd_buff;
 1719         struct sq_hdr_subdesc *hdr;
 1720 
 1721         NICVF_TX_LOCK(sq);
 1722         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
 1723         while (sq->head != head) {
 1724                 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
 1725                 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
 1726                         nicvf_put_sq_desc(sq, 1);
 1727                         continue;
 1728                 }
 1729                 snd_buff = &sq->snd_buff[sq->head];
 1730                 if (snd_buff->mbuf != NULL) {
 1731                         bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1732                         m_freem(snd_buff->mbuf);
 1733                         sq->snd_buff[sq->head].mbuf = NULL;
 1734                 }
 1735                 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
 1736         }
 1737         NICVF_TX_UNLOCK(sq);
 1738 }
 1739 
 1740 /*
 1741  * Add SQ HEADER subdescriptor.
 1742  * First subdescriptor for every send descriptor.
 1743  */
 1744 static __inline int
 1745 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
 1746                          int subdesc_cnt, struct mbuf *mbuf, int len)
 1747 {
 1748         struct nicvf *nic;
 1749         struct sq_hdr_subdesc *hdr;
 1750         struct ether_vlan_header *eh;
 1751 #ifdef INET
 1752         struct ip *ip;
 1753         struct tcphdr *th;
 1754 #endif
 1755         uint16_t etype;
 1756         int ehdrlen, iphlen, poff, proto;
 1757 
 1758         nic = sq->nic;
 1759 
 1760         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
 1761         sq->snd_buff[qentry].mbuf = mbuf;
 1762 
 1763         memset(hdr, 0, SND_QUEUE_DESC_SIZE);
 1764         hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
 1765         /* Enable notification via CQE after processing SQE */
 1766         hdr->post_cqe = 1;
 1767         /* No of subdescriptors following this */
 1768         hdr->subdesc_cnt = subdesc_cnt;
 1769         hdr->tot_len = len;
 1770 
 1771         eh = mtod(mbuf, struct ether_vlan_header *);
 1772         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 1773                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1774                 etype = ntohs(eh->evl_proto);
 1775         } else {
 1776                 ehdrlen = ETHER_HDR_LEN;
 1777                 etype = ntohs(eh->evl_encap_proto);
 1778         }
 1779 
 1780         poff = proto = -1;
 1781         switch (etype) {
 1782 #ifdef INET6
 1783         case ETHERTYPE_IPV6:
 1784                 if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
 1785                         mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
 1786                         sq->snd_buff[qentry].mbuf = NULL;
 1787                         if (mbuf == NULL)
 1788                                 return (ENOBUFS);
 1789                 }
 1790                 poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
 1791                 if (poff < 0)
 1792                         return (ENOBUFS);
 1793                 poff += ehdrlen;
 1794                 break;
 1795 #endif
 1796 #ifdef INET
 1797         case ETHERTYPE_IP:
 1798                 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
 1799                         mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
 1800                         sq->snd_buff[qentry].mbuf = mbuf;
 1801                         if (mbuf == NULL)
 1802                                 return (ENOBUFS);
 1803                 }
 1804                 if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
 1805                         hdr->csum_l3 = 1; /* Enable IP csum calculation */
 1806 
 1807                 ip = (struct ip *)(mbuf->m_data + ehdrlen);
 1808                 iphlen = ip->ip_hl << 2;
 1809                 poff = ehdrlen + iphlen;
 1810                 proto = ip->ip_p;
 1811                 break;
 1812 #endif
 1813         }
 1814 
 1815 #if defined(INET6) || defined(INET)
 1816         if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
 1817                 switch (proto) {
 1818                 case IPPROTO_TCP:
 1819                         if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
 1820                                 break;
 1821 
 1822                         if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
 1823                                 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
 1824                                 sq->snd_buff[qentry].mbuf = mbuf;
 1825                                 if (mbuf == NULL)
 1826                                         return (ENOBUFS);
 1827                         }
 1828                         hdr->csum_l4 = SEND_L4_CSUM_TCP;
 1829                         break;
 1830                 case IPPROTO_UDP:
 1831                         if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
 1832                                 break;
 1833 
 1834                         if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
 1835                                 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
 1836                                 sq->snd_buff[qentry].mbuf = mbuf;
 1837                                 if (mbuf == NULL)
 1838                                         return (ENOBUFS);
 1839                         }
 1840                         hdr->csum_l4 = SEND_L4_CSUM_UDP;
 1841                         break;
 1842                 case IPPROTO_SCTP:
 1843                         if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
 1844                                 break;
 1845 
 1846                         if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
 1847                                 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
 1848                                 sq->snd_buff[qentry].mbuf = mbuf;
 1849                                 if (mbuf == NULL)
 1850                                         return (ENOBUFS);
 1851                         }
 1852                         hdr->csum_l4 = SEND_L4_CSUM_SCTP;
 1853                         break;
 1854                 default:
 1855                         break;
 1856                 }
 1857                 hdr->l3_offset = ehdrlen;
 1858                 hdr->l4_offset = poff;
 1859         }
 1860 
 1861         if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
 1862                 th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
 1863 
 1864                 hdr->tso = 1;
 1865                 hdr->tso_start = poff + (th->th_off * 4);
 1866                 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
 1867                 hdr->inner_l3_offset = ehdrlen - 2;
 1868                 nic->drv_stats.tx_tso++;
 1869         }
 1870 #endif
 1871 
 1872         return (0);
 1873 }
 1874 
 1875 /*
 1876  * SQ GATHER subdescriptor
 1877  * Must follow HDR descriptor
 1878  */
 1879 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
 1880                                                int size, uint64_t data)
 1881 {
 1882         struct sq_gather_subdesc *gather;
 1883 
 1884         qentry &= (sq->dmem.q_len - 1);
 1885         gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
 1886 
 1887         memset(gather, 0, SND_QUEUE_DESC_SIZE);
 1888         gather->subdesc_type = SQ_DESC_TYPE_GATHER;
 1889         gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
 1890         gather->size = size;
 1891         gather->addr = data;
 1892 }
 1893 
 1894 /* Put an mbuf to a SQ for packet transfer. */
 1895 static int
 1896 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
 1897 {
 1898         bus_dma_segment_t segs[256];
 1899         struct snd_buff *snd_buff;
 1900         size_t seg;
 1901         int nsegs, qentry;
 1902         int subdesc_cnt;
 1903         int err;
 1904 
 1905         NICVF_TX_LOCK_ASSERT(sq);
 1906 
 1907         if (sq->free_cnt == 0)
 1908                 return (ENOBUFS);
 1909 
 1910         snd_buff = &sq->snd_buff[sq->tail];
 1911 
 1912         err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
 1913             *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
 1914         if (__predict_false(err != 0)) {
 1915                 /* ARM64TODO: Add mbuf defragmenting if we lack maps */
 1916                 m_freem(*mbufp);
 1917                 *mbufp = NULL;
 1918                 return (err);
 1919         }
 1920 
 1921         /* Set how many subdescriptors is required */
 1922         subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
 1923         if (subdesc_cnt > sq->free_cnt) {
 1924                 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
 1925                 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1926                 return (ENOBUFS);
 1927         }
 1928 
 1929         qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 1930 
 1931         /* Add SQ header subdesc */
 1932         err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
 1933             (*mbufp)->m_pkthdr.len);
 1934         if (err != 0) {
 1935                 nicvf_put_sq_desc(sq, subdesc_cnt);
 1936                 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1937                 if (err == ENOBUFS) {
 1938                         m_freem(*mbufp);
 1939                         *mbufp = NULL;
 1940                 }
 1941                 return (err);
 1942         }
 1943 
 1944         /* Add SQ gather subdescs */
 1945         for (seg = 0; seg < nsegs; seg++) {
 1946                 qentry = nicvf_get_nxt_sqentry(sq, qentry);
 1947                 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
 1948                     segs[seg].ds_addr);
 1949         }
 1950 
 1951         /* make sure all memory stores are done before ringing doorbell */
 1952         bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
 1953 
 1954         dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
 1955             __func__, sq->idx, subdesc_cnt);
 1956         /* Inform HW to xmit new packet */
 1957         nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
 1958             sq->idx, subdesc_cnt);
 1959         return (0);
 1960 }
 1961 
 1962 static __inline u_int
 1963 frag_num(u_int i)
 1964 {
 1965 #if BYTE_ORDER == BIG_ENDIAN
 1966         return ((i & ~3) + 3 - (i & 3));
 1967 #else
 1968         return (i);
 1969 #endif
 1970 }
 1971 
 1972 /* Returns MBUF for a received packet */
 1973 struct mbuf *
 1974 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 1975 {
 1976         int frag;
 1977         int payload_len = 0;
 1978         struct mbuf *mbuf;
 1979         struct mbuf *mbuf_frag;
 1980         uint16_t *rb_lens = NULL;
 1981         uint64_t *rb_ptrs = NULL;
 1982 
 1983         mbuf = NULL;
 1984         rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
 1985         rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
 1986 
 1987         dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
 1988             __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
 1989 
 1990         for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
 1991                 payload_len = rb_lens[frag_num(frag)];
 1992                 if (frag == 0) {
 1993                         /* First fragment */
 1994                         mbuf = nicvf_rb_ptr_to_mbuf(nic,
 1995                             (*rb_ptrs - cqe_rx->align_pad));
 1996                         mbuf->m_len = payload_len;
 1997                         mbuf->m_data += cqe_rx->align_pad;
 1998                         if_setrcvif(mbuf, nic->ifp);
 1999                 } else {
 2000                         /* Add fragments */
 2001                         mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
 2002                         m_append(mbuf, payload_len, mbuf_frag->m_data);
 2003                         m_freem(mbuf_frag);
 2004                 }
 2005                 /* Next buffer pointer */
 2006                 rb_ptrs++;
 2007         }
 2008 
 2009         if (__predict_true(mbuf != NULL)) {
 2010                 m_fixhdr(mbuf);
 2011                 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
 2012                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
 2013                 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
 2014                         /*
 2015                          * HW by default verifies IP & TCP/UDP/SCTP checksums
 2016                          */
 2017                         if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
 2018                                 mbuf->m_pkthdr.csum_flags =
 2019                                     (CSUM_IP_CHECKED | CSUM_IP_VALID);
 2020                         }
 2021 
 2022                         switch (cqe_rx->l4_type) {
 2023                         case L4TYPE_UDP:
 2024                         case L4TYPE_TCP: /* fall through */
 2025                                 mbuf->m_pkthdr.csum_flags |=
 2026                                     (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 2027                                 mbuf->m_pkthdr.csum_data = 0xffff;
 2028                                 break;
 2029                         case L4TYPE_SCTP:
 2030                                 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
 2031                                 break;
 2032                         default:
 2033                                 break;
 2034                         }
 2035                 }
 2036         }
 2037 
 2038         return (mbuf);
 2039 }
 2040 
 2041 /* Enable interrupt */
 2042 void
 2043 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
 2044 {
 2045         uint64_t reg_val;
 2046 
 2047         reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
 2048 
 2049         switch (int_type) {
 2050         case NICVF_INTR_CQ:
 2051                 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2052                 break;
 2053         case NICVF_INTR_SQ:
 2054                 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2055                 break;
 2056         case NICVF_INTR_RBDR:
 2057                 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2058                 break;
 2059         case NICVF_INTR_PKT_DROP:
 2060                 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2061                 break;
 2062         case NICVF_INTR_TCP_TIMER:
 2063                 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2064                 break;
 2065         case NICVF_INTR_MBOX:
 2066                 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
 2067                 break;
 2068         case NICVF_INTR_QS_ERR:
 2069                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2070                 break;
 2071         default:
 2072                 device_printf(nic->dev,
 2073                            "Failed to enable interrupt: unknown type\n");
 2074                 break;
 2075         }
 2076 
 2077         nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
 2078 }
 2079 
 2080 /* Disable interrupt */
 2081 void
 2082 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
 2083 {
 2084         uint64_t reg_val = 0;
 2085 
 2086         switch (int_type) {
 2087         case NICVF_INTR_CQ:
 2088                 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2089                 break;
 2090         case NICVF_INTR_SQ:
 2091                 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2092                 break;
 2093         case NICVF_INTR_RBDR:
 2094                 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2095                 break;
 2096         case NICVF_INTR_PKT_DROP:
 2097                 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2098                 break;
 2099         case NICVF_INTR_TCP_TIMER:
 2100                 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2101                 break;
 2102         case NICVF_INTR_MBOX:
 2103                 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
 2104                 break;
 2105         case NICVF_INTR_QS_ERR:
 2106                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2107                 break;
 2108         default:
 2109                 device_printf(nic->dev,
 2110                            "Failed to disable interrupt: unknown type\n");
 2111                 break;
 2112         }
 2113 
 2114         nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
 2115 }
 2116 
 2117 /* Clear interrupt */
 2118 void
 2119 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
 2120 {
 2121         uint64_t reg_val = 0;
 2122 
 2123         switch (int_type) {
 2124         case NICVF_INTR_CQ:
 2125                 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2126                 break;
 2127         case NICVF_INTR_SQ:
 2128                 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2129                 break;
 2130         case NICVF_INTR_RBDR:
 2131                 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2132                 break;
 2133         case NICVF_INTR_PKT_DROP:
 2134                 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2135                 break;
 2136         case NICVF_INTR_TCP_TIMER:
 2137                 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2138                 break;
 2139         case NICVF_INTR_MBOX:
 2140                 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
 2141                 break;
 2142         case NICVF_INTR_QS_ERR:
 2143                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2144                 break;
 2145         default:
 2146                 device_printf(nic->dev,
 2147                            "Failed to clear interrupt: unknown type\n");
 2148                 break;
 2149         }
 2150 
 2151         nicvf_reg_write(nic, NIC_VF_INT, reg_val);
 2152 }
 2153 
 2154 /* Check if interrupt is enabled */
 2155 int
 2156 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
 2157 {
 2158         uint64_t reg_val;
 2159         uint64_t mask = 0xff;
 2160 
 2161         reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
 2162 
 2163         switch (int_type) {
 2164         case NICVF_INTR_CQ:
 2165                 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2166                 break;
 2167         case NICVF_INTR_SQ:
 2168                 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2169                 break;
 2170         case NICVF_INTR_RBDR:
 2171                 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2172                 break;
 2173         case NICVF_INTR_PKT_DROP:
 2174                 mask = NICVF_INTR_PKT_DROP_MASK;
 2175                 break;
 2176         case NICVF_INTR_TCP_TIMER:
 2177                 mask = NICVF_INTR_TCP_TIMER_MASK;
 2178                 break;
 2179         case NICVF_INTR_MBOX:
 2180                 mask = NICVF_INTR_MBOX_MASK;
 2181                 break;
 2182         case NICVF_INTR_QS_ERR:
 2183                 mask = NICVF_INTR_QS_ERR_MASK;
 2184                 break;
 2185         default:
 2186                 device_printf(nic->dev,
 2187                            "Failed to check interrupt enable: unknown type\n");
 2188                 break;
 2189         }
 2190 
 2191         return (reg_val & mask);
 2192 }
 2193 
 2194 void
 2195 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
 2196 {
 2197         struct rcv_queue *rq;
 2198 
 2199 #define GET_RQ_STATS(reg) \
 2200         nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
 2201                             (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
 2202 
 2203         rq = &nic->qs->rq[rq_idx];
 2204         rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
 2205         rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
 2206 }
 2207 
 2208 void
 2209 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
 2210 {
 2211         struct snd_queue *sq;
 2212 
 2213 #define GET_SQ_STATS(reg) \
 2214         nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
 2215                             (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
 2216 
 2217         sq = &nic->qs->sq[sq_idx];
 2218         sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
 2219         sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
 2220 }
 2221 
 2222 /* Check for errors in the receive cmp.queue entry */
 2223 int
 2224 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
 2225     struct cqe_rx_t *cqe_rx)
 2226 {
 2227         struct nicvf_hw_stats *stats = &nic->hw_stats;
 2228         struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 2229 
 2230         if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
 2231                 drv_stats->rx_frames_ok++;
 2232                 return (0);
 2233         }
 2234 
 2235         switch (cqe_rx->err_opcode) {
 2236         case CQ_RX_ERROP_RE_PARTIAL:
 2237                 stats->rx_bgx_truncated_pkts++;
 2238                 break;
 2239         case CQ_RX_ERROP_RE_JABBER:
 2240                 stats->rx_jabber_errs++;
 2241                 break;
 2242         case CQ_RX_ERROP_RE_FCS:
 2243                 stats->rx_fcs_errs++;
 2244                 break;
 2245         case CQ_RX_ERROP_RE_RX_CTL:
 2246                 stats->rx_bgx_errs++;
 2247                 break;
 2248         case CQ_RX_ERROP_PREL2_ERR:
 2249                 stats->rx_prel2_errs++;
 2250                 break;
 2251         case CQ_RX_ERROP_L2_MAL:
 2252                 stats->rx_l2_hdr_malformed++;
 2253                 break;
 2254         case CQ_RX_ERROP_L2_OVERSIZE:
 2255                 stats->rx_oversize++;
 2256                 break;
 2257         case CQ_RX_ERROP_L2_UNDERSIZE:
 2258                 stats->rx_undersize++;
 2259                 break;
 2260         case CQ_RX_ERROP_L2_LENMISM:
 2261                 stats->rx_l2_len_mismatch++;
 2262                 break;
 2263         case CQ_RX_ERROP_L2_PCLP:
 2264                 stats->rx_l2_pclp++;
 2265                 break;
 2266         case CQ_RX_ERROP_IP_NOT:
 2267                 stats->rx_ip_ver_errs++;
 2268                 break;
 2269         case CQ_RX_ERROP_IP_CSUM_ERR:
 2270                 stats->rx_ip_csum_errs++;
 2271                 break;
 2272         case CQ_RX_ERROP_IP_MAL:
 2273                 stats->rx_ip_hdr_malformed++;
 2274                 break;
 2275         case CQ_RX_ERROP_IP_MALD:
 2276                 stats->rx_ip_payload_malformed++;
 2277                 break;
 2278         case CQ_RX_ERROP_IP_HOP:
 2279                 stats->rx_ip_ttl_errs++;
 2280                 break;
 2281         case CQ_RX_ERROP_L3_PCLP:
 2282                 stats->rx_l3_pclp++;
 2283                 break;
 2284         case CQ_RX_ERROP_L4_MAL:
 2285                 stats->rx_l4_malformed++;
 2286                 break;
 2287         case CQ_RX_ERROP_L4_CHK:
 2288                 stats->rx_l4_csum_errs++;
 2289                 break;
 2290         case CQ_RX_ERROP_UDP_LEN:
 2291                 stats->rx_udp_len_errs++;
 2292                 break;
 2293         case CQ_RX_ERROP_L4_PORT:
 2294                 stats->rx_l4_port_errs++;
 2295                 break;
 2296         case CQ_RX_ERROP_TCP_FLAG:
 2297                 stats->rx_tcp_flag_errs++;
 2298                 break;
 2299         case CQ_RX_ERROP_TCP_OFFSET:
 2300                 stats->rx_tcp_offset_errs++;
 2301                 break;
 2302         case CQ_RX_ERROP_L4_PCLP:
 2303                 stats->rx_l4_pclp++;
 2304                 break;
 2305         case CQ_RX_ERROP_RBDR_TRUNC:
 2306                 stats->rx_truncated_pkts++;
 2307                 break;
 2308         }
 2309 
 2310         return (1);
 2311 }
 2312 
 2313 /* Check for errors in the send cmp.queue entry */
 2314 int
 2315 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
 2316     struct cqe_send_t *cqe_tx)
 2317 {
 2318         struct cmp_queue_stats *stats = &cq->stats;
 2319 
 2320         switch (cqe_tx->send_status) {
 2321         case CQ_TX_ERROP_GOOD:
 2322                 stats->tx.good++;
 2323                 return (0);
 2324         case CQ_TX_ERROP_DESC_FAULT:
 2325                 stats->tx.desc_fault++;
 2326                 break;
 2327         case CQ_TX_ERROP_HDR_CONS_ERR:
 2328                 stats->tx.hdr_cons_err++;
 2329                 break;
 2330         case CQ_TX_ERROP_SUBDC_ERR:
 2331                 stats->tx.subdesc_err++;
 2332                 break;
 2333         case CQ_TX_ERROP_IMM_SIZE_OFLOW:
 2334                 stats->tx.imm_size_oflow++;
 2335                 break;
 2336         case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
 2337                 stats->tx.data_seq_err++;
 2338                 break;
 2339         case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
 2340                 stats->tx.mem_seq_err++;
 2341                 break;
 2342         case CQ_TX_ERROP_LOCK_VIOL:
 2343                 stats->tx.lock_viol++;
 2344                 break;
 2345         case CQ_TX_ERROP_DATA_FAULT:
 2346                 stats->tx.data_fault++;
 2347                 break;
 2348         case CQ_TX_ERROP_TSTMP_CONFLICT:
 2349                 stats->tx.tstmp_conflict++;
 2350                 break;
 2351         case CQ_TX_ERROP_TSTMP_TIMEOUT:
 2352                 stats->tx.tstmp_timeout++;
 2353                 break;
 2354         case CQ_TX_ERROP_MEM_FAULT:
 2355                 stats->tx.mem_fault++;
 2356                 break;
 2357         case CQ_TX_ERROP_CK_OVERLAP:
 2358                 stats->tx.csum_overlap++;
 2359                 break;
 2360         case CQ_TX_ERROP_CK_OFLOW:
 2361                 stats->tx.csum_overflow++;
 2362                 break;
 2363         }
 2364 
 2365         return (1);
 2366 }

Cache object: 27b753554b4954dc00a3ed78b028bd27


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.