The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vnic/nicvf_queues.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (C) 2015 Cavium Inc.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  *
   28  */
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include "opt_inet.h"
   33 #include "opt_inet6.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/bitset.h>
   38 #include <sys/bitstring.h>
   39 #include <sys/buf_ring.h>
   40 #include <sys/bus.h>
   41 #include <sys/endian.h>
   42 #include <sys/kernel.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/rman.h>
   46 #include <sys/pciio.h>
   47 #include <sys/pcpu.h>
   48 #include <sys/proc.h>
   49 #include <sys/sockio.h>
   50 #include <sys/socket.h>
   51 #include <sys/stdatomic.h>
   52 #include <sys/cpuset.h>
   53 #include <sys/lock.h>
   54 #include <sys/mutex.h>
   55 #include <sys/smp.h>
   56 #include <sys/taskqueue.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/pmap.h>
   60 
   61 #include <machine/bus.h>
   62 #include <machine/vmparam.h>
   63 
   64 #include <net/if.h>
   65 #include <net/if_var.h>
   66 #include <net/if_media.h>
   67 #include <net/ifq.h>
   68 #include <net/bpf.h>
   69 #include <net/ethernet.h>
   70 
   71 #include <netinet/in_systm.h>
   72 #include <netinet/in.h>
   73 #include <netinet/if_ether.h>
   74 #include <netinet/ip.h>
   75 #include <netinet/ip6.h>
   76 #include <netinet/sctp.h>
   77 #include <netinet/tcp.h>
   78 #include <netinet/tcp_lro.h>
   79 #include <netinet/udp.h>
   80 
   81 #include <netinet6/ip6_var.h>
   82 
   83 #include <dev/pci/pcireg.h>
   84 #include <dev/pci/pcivar.h>
   85 
   86 #include "thunder_bgx.h"
   87 #include "nic_reg.h"
   88 #include "nic.h"
   89 #include "q_struct.h"
   90 #include "nicvf_queues.h"
   91 
   92 #define DEBUG
   93 #undef DEBUG
   94 
   95 #ifdef DEBUG
   96 #define dprintf(dev, fmt, ...)  device_printf(dev, fmt, ##__VA_ARGS__)
   97 #else
   98 #define dprintf(dev, fmt, ...)
   99 #endif
  100 
  101 MALLOC_DECLARE(M_NICVF);
  102 
  103 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
  104 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
  105 static void nicvf_sq_disable(struct nicvf *, int);
  106 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
  107 static void nicvf_put_sq_desc(struct snd_queue *, int);
  108 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
  109     boolean_t);
  110 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
  111 
  112 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
  113 
  114 static void nicvf_rbdr_task(void *, int);
  115 static void nicvf_rbdr_task_nowait(void *, int);
  116 
  117 struct rbuf_info {
  118         bus_dma_tag_t   dmat;
  119         bus_dmamap_t    dmap;
  120         struct mbuf *   mbuf;
  121 };
  122 
  123 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
  124 
  125 /* Poll a register for a specific value */
  126 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
  127                           uint64_t reg, int bit_pos, int bits, int val)
  128 {
  129         uint64_t bit_mask;
  130         uint64_t reg_val;
  131         int timeout = 10;
  132 
  133         bit_mask = (1UL << bits) - 1;
  134         bit_mask = (bit_mask << bit_pos);
  135 
  136         while (timeout) {
  137                 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
  138                 if (((reg_val & bit_mask) >> bit_pos) == val)
  139                         return (0);
  140 
  141                 DELAY(1000);
  142                 timeout--;
  143         }
  144         device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
  145         return (ETIMEDOUT);
  146 }
  147 
  148 /* Callback for bus_dmamap_load() */
  149 static void
  150 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  151 {
  152         bus_addr_t *paddr;
  153 
  154         KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
  155         paddr = arg;
  156         *paddr = segs->ds_addr;
  157 }
  158 
  159 /* Allocate memory for a queue's descriptors */
  160 static int
  161 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
  162     int q_len, int desc_size, int align_bytes)
  163 {
  164         int err, err_dmat;
  165 
  166         /* Create DMA tag first */
  167         err = bus_dma_tag_create(
  168             bus_get_dma_tag(nic->dev),          /* parent tag */
  169             align_bytes,                        /* alignment */
  170             0,                                  /* boundary */
  171             BUS_SPACE_MAXADDR,                  /* lowaddr */
  172             BUS_SPACE_MAXADDR,                  /* highaddr */
  173             NULL, NULL,                         /* filtfunc, filtfuncarg */
  174             (q_len * desc_size),                /* maxsize */
  175             1,                                  /* nsegments */
  176             (q_len * desc_size),                /* maxsegsize */
  177             0,                                  /* flags */
  178             NULL, NULL,                         /* lockfunc, lockfuncarg */
  179             &dmem->dmat);                       /* dmat */
  180 
  181         if (err != 0) {
  182                 device_printf(nic->dev,
  183                     "Failed to create busdma tag for descriptors ring\n");
  184                 return (err);
  185         }
  186 
  187         /* Allocate segment of continuous DMA safe memory */
  188         err = bus_dmamem_alloc(
  189             dmem->dmat,                         /* DMA tag */
  190             &dmem->base,                        /* virtual address */
  191             (BUS_DMA_NOWAIT | BUS_DMA_ZERO),    /* flags */
  192             &dmem->dmap);                       /* DMA map */
  193         if (err != 0) {
  194                 device_printf(nic->dev, "Failed to allocate DMA safe memory for"
  195                     "descriptors ring\n");
  196                 goto dmamem_fail;
  197         }
  198 
  199         err = bus_dmamap_load(
  200             dmem->dmat,
  201             dmem->dmap,
  202             dmem->base,
  203             (q_len * desc_size),                /* allocation size */
  204             nicvf_dmamap_q_cb,                  /* map to DMA address cb. */
  205             &dmem->phys_base,                   /* physical address */
  206             BUS_DMA_NOWAIT);
  207         if (err != 0) {
  208                 device_printf(nic->dev,
  209                     "Cannot load DMA map of descriptors ring\n");
  210                 goto dmamap_fail;
  211         }
  212 
  213         dmem->q_len = q_len;
  214         dmem->size = (desc_size * q_len);
  215 
  216         return (0);
  217 
  218 dmamap_fail:
  219         bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
  220         dmem->phys_base = 0;
  221 dmamem_fail:
  222         err_dmat = bus_dma_tag_destroy(dmem->dmat);
  223         dmem->base = NULL;
  224         KASSERT(err_dmat == 0,
  225             ("%s: Trying to destroy BUSY DMA tag", __func__));
  226 
  227         return (err);
  228 }
  229 
  230 /* Free queue's descriptor memory */
  231 static void
  232 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
  233 {
  234         int err;
  235 
  236         if ((dmem == NULL) || (dmem->base == NULL))
  237                 return;
  238 
  239         /* Unload a map */
  240         bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
  241         bus_dmamap_unload(dmem->dmat, dmem->dmap);
  242         /* Free DMA memory */
  243         bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
  244         /* Destroy DMA tag */
  245         err = bus_dma_tag_destroy(dmem->dmat);
  246 
  247         KASSERT(err == 0,
  248             ("%s: Trying to destroy BUSY DMA tag", __func__));
  249 
  250         dmem->phys_base = 0;
  251         dmem->base = NULL;
  252 }
  253 
  254 /*
  255  * Allocate buffer for packet reception
  256  * HW returns memory address where packet is DMA'ed but not a pointer
  257  * into RBDR ring, so save buffer address at the start of fragment and
  258  * align the start address to a cache aligned address
  259  */
  260 static __inline int
  261 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
  262     bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
  263 {
  264         struct mbuf *mbuf;
  265         struct rbuf_info *rinfo;
  266         bus_dma_segment_t segs[1];
  267         int nsegs;
  268         int err;
  269 
  270         mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
  271         if (mbuf == NULL)
  272                 return (ENOMEM);
  273 
  274         /*
  275          * The length is equal to the actual length + one 128b line
  276          * used as a room for rbuf_info structure.
  277          */
  278         mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
  279 
  280         err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
  281             &nsegs, BUS_DMA_NOWAIT);
  282         if (err != 0) {
  283                 device_printf(nic->dev,
  284                     "Failed to map mbuf into DMA visible memory, err: %d\n",
  285                     err);
  286                 m_freem(mbuf);
  287                 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
  288                 return (err);
  289         }
  290         if (nsegs != 1)
  291                 panic("Unexpected number of DMA segments for RB: %d", nsegs);
  292         /*
  293          * Now use the room for rbuf_info structure
  294          * and adjust mbuf data and length.
  295          */
  296         rinfo = (struct rbuf_info *)mbuf->m_data;
  297         m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
  298 
  299         rinfo->dmat = rbdr->rbdr_buff_dmat;
  300         rinfo->dmap = dmap;
  301         rinfo->mbuf = mbuf;
  302 
  303         *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
  304 
  305         return (0);
  306 }
  307 
  308 /* Retrieve mbuf for received packet */
  309 static struct mbuf *
  310 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
  311 {
  312         struct mbuf *mbuf;
  313         struct rbuf_info *rinfo;
  314 
  315         /* Get buffer start address and alignment offset */
  316         rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
  317 
  318         /* Now retrieve mbuf to give to stack */
  319         mbuf = rinfo->mbuf;
  320         if (__predict_false(mbuf == NULL)) {
  321                 panic("%s: Received packet fragment with NULL mbuf",
  322                     device_get_nameunit(nic->dev));
  323         }
  324         /*
  325          * Clear the mbuf in the descriptor to indicate
  326          * that this slot is processed and free to use.
  327          */
  328         rinfo->mbuf = NULL;
  329 
  330         bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
  331         bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
  332 
  333         return (mbuf);
  334 }
  335 
  336 /* Allocate RBDR ring and populate receive buffers */
  337 static int
  338 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
  339     int buf_size, int qidx)
  340 {
  341         bus_dmamap_t dmap;
  342         bus_addr_t rbuf;
  343         struct rbdr_entry_t *desc;
  344         int idx;
  345         int err;
  346 
  347         /* Allocate rbdr descriptors ring */
  348         err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
  349             sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
  350         if (err != 0) {
  351                 device_printf(nic->dev,
  352                     "Failed to create RBDR descriptors ring\n");
  353                 return (err);
  354         }
  355 
  356         rbdr->desc = rbdr->dmem.base;
  357         /*
  358          * Buffer size has to be in multiples of 128 bytes.
  359          * Make room for metadata of size of one line (128 bytes).
  360          */
  361         rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
  362         rbdr->enable = TRUE;
  363         rbdr->thresh = RBDR_THRESH;
  364         rbdr->nic = nic;
  365         rbdr->idx = qidx;
  366 
  367         /*
  368          * Create DMA tag for Rx buffers.
  369          * Each map created using this tag is intended to store Rx payload for
  370          * one fragment and one header structure containing rbuf_info (thus
  371          * additional 128 byte line since RB must be a multiple of 128 byte
  372          * cache line).
  373          */
  374         if (buf_size > MCLBYTES) {
  375                 device_printf(nic->dev,
  376                     "Buffer size to large for mbuf cluster\n");
  377                 return (EINVAL);
  378         }
  379         err = bus_dma_tag_create(
  380             bus_get_dma_tag(nic->dev),          /* parent tag */
  381             NICVF_RCV_BUF_ALIGN_BYTES,          /* alignment */
  382             0,                                  /* boundary */
  383             DMAP_MAX_PHYSADDR,                  /* lowaddr */
  384             DMAP_MIN_PHYSADDR,                  /* highaddr */
  385             NULL, NULL,                         /* filtfunc, filtfuncarg */
  386             roundup2(buf_size, MCLBYTES),       /* maxsize */
  387             1,                                  /* nsegments */
  388             roundup2(buf_size, MCLBYTES),       /* maxsegsize */
  389             0,                                  /* flags */
  390             NULL, NULL,                         /* lockfunc, lockfuncarg */
  391             &rbdr->rbdr_buff_dmat);             /* dmat */
  392 
  393         if (err != 0) {
  394                 device_printf(nic->dev,
  395                     "Failed to create busdma tag for RBDR buffers\n");
  396                 return (err);
  397         }
  398 
  399         rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
  400             ring_len, M_NICVF, (M_WAITOK | M_ZERO));
  401 
  402         for (idx = 0; idx < ring_len; idx++) {
  403                 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
  404                 if (err != 0) {
  405                         device_printf(nic->dev,
  406                             "Failed to create DMA map for RB\n");
  407                         return (err);
  408                 }
  409                 rbdr->rbdr_buff_dmaps[idx] = dmap;
  410 
  411                 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
  412                     DMA_BUFFER_LEN, &rbuf);
  413                 if (err != 0)
  414                         return (err);
  415 
  416                 desc = GET_RBDR_DESC(rbdr, idx);
  417                 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
  418         }
  419 
  420         /* Allocate taskqueue */
  421         TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
  422         TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
  423         rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
  424             taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
  425         taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
  426             device_get_nameunit(nic->dev));
  427 
  428         return (0);
  429 }
  430 
  431 /* Free RBDR ring and its receive buffers */
  432 static void
  433 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
  434 {
  435         struct mbuf *mbuf;
  436         struct queue_set *qs;
  437         struct rbdr_entry_t *desc;
  438         struct rbuf_info *rinfo;
  439         bus_addr_t buf_addr;
  440         int head, tail, idx;
  441         int err;
  442 
  443         qs = nic->qs;
  444 
  445         if ((qs == NULL) || (rbdr == NULL))
  446                 return;
  447 
  448         rbdr->enable = FALSE;
  449         if (rbdr->rbdr_taskq != NULL) {
  450                 /* Remove tasks */
  451                 while (taskqueue_cancel(rbdr->rbdr_taskq,
  452                     &rbdr->rbdr_task_nowait, NULL) != 0) {
  453                         /* Finish the nowait task first */
  454                         taskqueue_drain(rbdr->rbdr_taskq,
  455                             &rbdr->rbdr_task_nowait);
  456                 }
  457                 taskqueue_free(rbdr->rbdr_taskq);
  458                 rbdr->rbdr_taskq = NULL;
  459 
  460                 while (taskqueue_cancel(taskqueue_thread,
  461                     &rbdr->rbdr_task, NULL) != 0) {
  462                         /* Now finish the sleepable task */
  463                         taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
  464                 }
  465         }
  466 
  467         /*
  468          * Free all of the memory under the RB descriptors.
  469          * There are assumptions here:
  470          * 1. Corresponding RBDR is disabled
  471          *    - it is safe to operate using head and tail indexes
  472          * 2. All bffers that were received are properly freed by
  473          *    the receive handler
  474          *    - there is no need to unload DMA map and free MBUF for other
  475          *      descriptors than unused ones
  476          */
  477         if (rbdr->rbdr_buff_dmat != NULL) {
  478                 head = rbdr->head;
  479                 tail = rbdr->tail;
  480                 while (head != tail) {
  481                         desc = GET_RBDR_DESC(rbdr, head);
  482                         buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  483                         rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
  484                         bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
  485                         mbuf = rinfo->mbuf;
  486                         /* This will destroy everything including rinfo! */
  487                         m_freem(mbuf);
  488                         head++;
  489                         head &= (rbdr->dmem.q_len - 1);
  490                 }
  491                 /* Free tail descriptor */
  492                 desc = GET_RBDR_DESC(rbdr, tail);
  493                 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  494                 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
  495                 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
  496                 mbuf = rinfo->mbuf;
  497                 /* This will destroy everything including rinfo! */
  498                 m_freem(mbuf);
  499 
  500                 /* Destroy DMA maps */
  501                 for (idx = 0; idx < qs->rbdr_len; idx++) {
  502                         if (rbdr->rbdr_buff_dmaps[idx] == NULL)
  503                                 continue;
  504                         err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
  505                             rbdr->rbdr_buff_dmaps[idx]);
  506                         KASSERT(err == 0,
  507                             ("%s: Could not destroy DMA map for RB, desc: %d",
  508                             __func__, idx));
  509                         rbdr->rbdr_buff_dmaps[idx] = NULL;
  510                 }
  511 
  512                 /* Now destroy the tag */
  513                 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
  514                 KASSERT(err == 0,
  515                     ("%s: Trying to destroy BUSY DMA tag", __func__));
  516 
  517                 rbdr->head = 0;
  518                 rbdr->tail = 0;
  519         }
  520 
  521         /* Free RBDR ring */
  522         nicvf_free_q_desc_mem(nic, &rbdr->dmem);
  523 }
  524 
  525 /*
  526  * Refill receive buffer descriptors with new buffers.
  527  */
  528 static int
  529 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
  530 {
  531         struct nicvf *nic;
  532         struct queue_set *qs;
  533         int rbdr_idx;
  534         int tail, qcount;
  535         int refill_rb_cnt;
  536         struct rbdr_entry_t *desc;
  537         bus_dmamap_t dmap;
  538         bus_addr_t rbuf;
  539         boolean_t rb_alloc_fail;
  540         int new_rb;
  541 
  542         rb_alloc_fail = TRUE;
  543         new_rb = 0;
  544         nic = rbdr->nic;
  545         qs = nic->qs;
  546         rbdr_idx = rbdr->idx;
  547 
  548         /* Check if it's enabled */
  549         if (!rbdr->enable)
  550                 return (0);
  551 
  552         /* Get no of desc's to be refilled */
  553         qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
  554         qcount &= 0x7FFFF;
  555         /* Doorbell can be ringed with a max of ring size minus 1 */
  556         if (qcount >= (qs->rbdr_len - 1)) {
  557                 rb_alloc_fail = FALSE;
  558                 goto out;
  559         } else
  560                 refill_rb_cnt = qs->rbdr_len - qcount - 1;
  561 
  562         /* Start filling descs from tail */
  563         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
  564         while (refill_rb_cnt) {
  565                 tail++;
  566                 tail &= (rbdr->dmem.q_len - 1);
  567 
  568                 dmap = rbdr->rbdr_buff_dmaps[tail];
  569                 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
  570                     DMA_BUFFER_LEN, &rbuf)) {
  571                         /* Something went wrong. Resign */
  572                         break;
  573                 }
  574                 desc = GET_RBDR_DESC(rbdr, tail);
  575                 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
  576                 refill_rb_cnt--;
  577                 new_rb++;
  578         }
  579 
  580         /* make sure all memory stores are done before ringing doorbell */
  581         wmb();
  582 
  583         /* Check if buffer allocation failed */
  584         if (refill_rb_cnt == 0)
  585                 rb_alloc_fail = FALSE;
  586 
  587         /* Notify HW */
  588         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
  589                               rbdr_idx, new_rb);
  590 out:
  591         if (!rb_alloc_fail) {
  592                 /*
  593                  * Re-enable RBDR interrupts only
  594                  * if buffer allocation is success.
  595                  */
  596                 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
  597 
  598                 return (0);
  599         }
  600 
  601         return (ENOMEM);
  602 }
  603 
  604 /* Refill RBs even if sleep is needed to reclaim memory */
  605 static void
  606 nicvf_rbdr_task(void *arg, int pending)
  607 {
  608         struct rbdr *rbdr;
  609         int err;
  610 
  611         rbdr = (struct rbdr *)arg;
  612 
  613         err = nicvf_refill_rbdr(rbdr, M_WAITOK);
  614         if (__predict_false(err != 0)) {
  615                 panic("%s: Failed to refill RBs even when sleep enabled",
  616                     __func__);
  617         }
  618 }
  619 
  620 /* Refill RBs as soon as possible without waiting */
  621 static void
  622 nicvf_rbdr_task_nowait(void *arg, int pending)
  623 {
  624         struct rbdr *rbdr;
  625         int err;
  626 
  627         rbdr = (struct rbdr *)arg;
  628 
  629         err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
  630         if (err != 0) {
  631                 /*
  632                  * Schedule another, sleepable kernel thread
  633                  * that will for sure refill the buffers.
  634                  */
  635                 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
  636         }
  637 }
  638 
  639 static int
  640 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
  641     struct cqe_rx_t *cqe_rx, int cqe_type)
  642 {
  643         struct mbuf *mbuf;
  644         struct rcv_queue *rq;
  645         int rq_idx;
  646         int err = 0;
  647 
  648         rq_idx = cqe_rx->rq_idx;
  649         rq = &nic->qs->rq[rq_idx];
  650 
  651         /* Check for errors */
  652         err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
  653         if (err && !cqe_rx->rb_cnt)
  654                 return (0);
  655 
  656         mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
  657         if (mbuf == NULL) {
  658                 dprintf(nic->dev, "Packet not received\n");
  659                 return (0);
  660         }
  661 
  662         /* If error packet */
  663         if (err != 0) {
  664                 m_freem(mbuf);
  665                 return (0);
  666         }
  667 
  668         if (rq->lro_enabled &&
  669             ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
  670             (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
  671             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
  672                 /*
  673                  * At this point it is known that there are no errors in the
  674                  * packet. Attempt to LRO enqueue. Send to stack if no resources
  675                  * or enqueue error.
  676                  */
  677                 if ((rq->lro.lro_cnt != 0) &&
  678                     (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
  679                         return (0);
  680         }
  681         /*
  682          * Push this packet to the stack later to avoid
  683          * unlocking completion task in the middle of work.
  684          */
  685         err = buf_ring_enqueue(cq->rx_br, mbuf);
  686         if (err != 0) {
  687                 /*
  688                  * Failed to enqueue this mbuf.
  689                  * We don't drop it, just schedule another task.
  690                  */
  691                 return (err);
  692         }
  693 
  694         return (0);
  695 }
  696 
  697 static void
  698 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
  699     struct cqe_send_t *cqe_tx, int cqe_type)
  700 {
  701         bus_dmamap_t dmap;
  702         struct mbuf *mbuf;
  703         struct snd_queue *sq;
  704         struct sq_hdr_subdesc *hdr;
  705 
  706         mbuf = NULL;
  707         sq = &nic->qs->sq[cqe_tx->sq_idx];
  708 
  709         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
  710         if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
  711                 return;
  712 
  713         dprintf(nic->dev,
  714             "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
  715             __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
  716             cqe_tx->sqe_ptr, hdr->subdesc_cnt);
  717 
  718         dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
  719         bus_dmamap_unload(sq->snd_buff_dmat, dmap);
  720 
  721         mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
  722         if (mbuf != NULL) {
  723                 m_freem(mbuf);
  724                 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
  725                 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
  726         }
  727 
  728         nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
  729 }
  730 
  731 static int
  732 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
  733 {
  734         struct mbuf *mbuf;
  735         struct ifnet *ifp;
  736         int processed_cqe, work_done = 0, tx_done = 0;
  737         int cqe_count, cqe_head;
  738         struct queue_set *qs = nic->qs;
  739         struct cmp_queue *cq = &qs->cq[cq_idx];
  740         struct snd_queue *sq = &qs->sq[cq_idx];
  741         struct rcv_queue *rq;
  742         struct cqe_rx_t *cq_desc;
  743         struct lro_ctrl *lro;
  744         int rq_idx;
  745         int cmp_err;
  746 
  747         NICVF_CMP_LOCK(cq);
  748         cmp_err = 0;
  749         processed_cqe = 0;
  750         /* Get no of valid CQ entries to process */
  751         cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
  752         cqe_count &= CQ_CQE_COUNT;
  753         if (cqe_count == 0)
  754                 goto out;
  755 
  756         /* Get head of the valid CQ entries */
  757         cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
  758         cqe_head &= 0xFFFF;
  759 
  760         dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
  761             __func__, cq_idx, cqe_count, cqe_head);
  762         while (processed_cqe < cqe_count) {
  763                 /* Get the CQ descriptor */
  764                 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
  765                 cqe_head++;
  766                 cqe_head &= (cq->dmem.q_len - 1);
  767                 /* Prefetch next CQ descriptor */
  768                 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
  769 
  770                 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
  771                     cq_desc->cqe_type);
  772                 switch (cq_desc->cqe_type) {
  773                 case CQE_TYPE_RX:
  774                         cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
  775                             CQE_TYPE_RX);
  776                         if (__predict_false(cmp_err != 0)) {
  777                                 /*
  778                                  * Ups. Cannot finish now.
  779                                  * Let's try again later.
  780                                  */
  781                                 goto done;
  782                         }
  783                         work_done++;
  784                         break;
  785                 case CQE_TYPE_SEND:
  786                         nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
  787                             CQE_TYPE_SEND);
  788                         tx_done++;
  789                         break;
  790                 case CQE_TYPE_INVALID:
  791                 case CQE_TYPE_RX_SPLIT:
  792                 case CQE_TYPE_RX_TCP:
  793                 case CQE_TYPE_SEND_PTP:
  794                         /* Ignore for now */
  795                         break;
  796                 }
  797                 processed_cqe++;
  798         }
  799 done:
  800         dprintf(nic->dev,
  801             "%s CQ%d processed_cqe %d work_done %d\n",
  802             __func__, cq_idx, processed_cqe, work_done);
  803 
  804         /* Ring doorbell to inform H/W to reuse processed CQEs */
  805         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
  806 
  807         if ((tx_done > 0) &&
  808             ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
  809                 /* Reenable TXQ if its stopped earlier due to SQ full */
  810                 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
  811                 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
  812         }
  813 out:
  814         /*
  815          * Flush any outstanding LRO work
  816          */
  817         rq_idx = cq_idx;
  818         rq = &nic->qs->rq[rq_idx];
  819         lro = &rq->lro;
  820         tcp_lro_flush_all(lro);
  821 
  822         NICVF_CMP_UNLOCK(cq);
  823 
  824         ifp = nic->ifp;
  825         /* Push received MBUFs to the stack */
  826         while (!buf_ring_empty(cq->rx_br)) {
  827                 mbuf = buf_ring_dequeue_mc(cq->rx_br);
  828                 if (__predict_true(mbuf != NULL))
  829                         (*ifp->if_input)(ifp, mbuf);
  830         }
  831 
  832         return (cmp_err);
  833 }
  834 
  835 /*
  836  * Qset error interrupt handler
  837  *
  838  * As of now only CQ errors are handled
  839  */
  840 static void
  841 nicvf_qs_err_task(void *arg, int pending)
  842 {
  843         struct nicvf *nic;
  844         struct queue_set *qs;
  845         int qidx;
  846         uint64_t status;
  847         boolean_t enable = TRUE;
  848 
  849         nic = (struct nicvf *)arg;
  850         qs = nic->qs;
  851 
  852         /* Deactivate network interface */
  853         if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
  854 
  855         /* Check if it is CQ err */
  856         for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
  857                 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
  858                     qidx);
  859                 if ((status & CQ_ERR_MASK) == 0)
  860                         continue;
  861                 /* Process already queued CQEs and reconfig CQ */
  862                 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
  863                 nicvf_sq_disable(nic, qidx);
  864                 (void)nicvf_cq_intr_handler(nic, qidx);
  865                 nicvf_cmp_queue_config(nic, qs, qidx, enable);
  866                 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
  867                 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
  868                 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
  869         }
  870 
  871         if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
  872         /* Re-enable Qset error interrupt */
  873         nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
  874 }
  875 
  876 static void
  877 nicvf_cmp_task(void *arg, int pending)
  878 {
  879         struct cmp_queue *cq;
  880         struct nicvf *nic;
  881         int cmp_err;
  882 
  883         cq = (struct cmp_queue *)arg;
  884         nic = cq->nic;
  885 
  886         /* Handle CQ descriptors */
  887         cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
  888         if (__predict_false(cmp_err != 0)) {
  889                 /*
  890                  * Schedule another thread here since we did not
  891                  * process the entire CQ due to Tx or Rx CQ parse error.
  892                  */
  893                 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
  894         }
  895 
  896         nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
  897         /* Reenable interrupt (previously disabled in nicvf_intr_handler() */
  898         nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
  899 
  900 }
  901 
  902 /* Initialize completion queue */
  903 static int
  904 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
  905     int qidx)
  906 {
  907         int err;
  908 
  909         /* Initizalize lock */
  910         snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
  911             device_get_nameunit(nic->dev), qidx);
  912         mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
  913 
  914         err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
  915                                      NICVF_CQ_BASE_ALIGN_BYTES);
  916 
  917         if (err != 0) {
  918                 device_printf(nic->dev,
  919                     "Could not allocate DMA memory for CQ\n");
  920                 return (err);
  921         }
  922 
  923         cq->desc = cq->dmem.base;
  924         cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
  925         cq->nic = nic;
  926         cq->idx = qidx;
  927         nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
  928 
  929         cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
  930             &cq->mtx);
  931 
  932         /* Allocate taskqueue */
  933         NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
  934         cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
  935             taskqueue_thread_enqueue, &cq->cmp_taskq);
  936         taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
  937             device_get_nameunit(nic->dev), qidx);
  938 
  939         return (0);
  940 }
  941 
  942 static void
  943 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
  944 {
  945 
  946         if (cq == NULL)
  947                 return;
  948         /*
  949          * The completion queue itself should be disabled by now
  950          * (ref. nicvf_snd_queue_config()).
  951          * Ensure that it is safe to disable it or panic.
  952          */
  953         if (cq->enable)
  954                 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
  955 
  956         if (cq->cmp_taskq != NULL) {
  957                 /* Remove task */
  958                 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
  959                         taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
  960 
  961                 taskqueue_free(cq->cmp_taskq);
  962                 cq->cmp_taskq = NULL;
  963         }
  964         /*
  965          * Completion interrupt will possibly enable interrupts again
  966          * so disable interrupting now after we finished processing
  967          * completion task. It is safe to do so since the corresponding CQ
  968          * was already disabled.
  969          */
  970         nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
  971         nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
  972 
  973         NICVF_CMP_LOCK(cq);
  974         nicvf_free_q_desc_mem(nic, &cq->dmem);
  975         drbr_free(cq->rx_br, M_DEVBUF);
  976         NICVF_CMP_UNLOCK(cq);
  977         mtx_destroy(&cq->mtx);
  978         memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
  979 }
  980 
  981 int
  982 nicvf_xmit_locked(struct snd_queue *sq)
  983 {
  984         struct nicvf *nic;
  985         struct ifnet *ifp;
  986         struct mbuf *next;
  987         int err;
  988 
  989         NICVF_TX_LOCK_ASSERT(sq);
  990 
  991         nic = sq->nic;
  992         ifp = nic->ifp;
  993         err = 0;
  994 
  995         while ((next = drbr_peek(ifp, sq->br)) != NULL) {
  996                 /* Send a copy of the frame to the BPF listener */
  997                 ETHER_BPF_MTAP(ifp, next);
  998 
  999                 err = nicvf_tx_mbuf_locked(sq, &next);
 1000                 if (err != 0) {
 1001                         if (next == NULL)
 1002                                 drbr_advance(ifp, sq->br);
 1003                         else
 1004                                 drbr_putback(ifp, sq->br, next);
 1005 
 1006                         break;
 1007                 }
 1008                 drbr_advance(ifp, sq->br);
 1009         }
 1010         return (err);
 1011 }
 1012 
 1013 static void
 1014 nicvf_snd_task(void *arg, int pending)
 1015 {
 1016         struct snd_queue *sq = (struct snd_queue *)arg;
 1017         struct nicvf *nic;
 1018         struct ifnet *ifp;
 1019         int err;
 1020 
 1021         nic = sq->nic;
 1022         ifp = nic->ifp;
 1023 
 1024         /*
 1025          * Skip sending anything if the driver is not running,
 1026          * SQ full or link is down.
 1027          */
 1028         if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1029             IFF_DRV_RUNNING) || !nic->link_up)
 1030                 return;
 1031 
 1032         NICVF_TX_LOCK(sq);
 1033         err = nicvf_xmit_locked(sq);
 1034         NICVF_TX_UNLOCK(sq);
 1035         /* Try again */
 1036         if (err != 0)
 1037                 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
 1038 }
 1039 
 1040 /* Initialize transmit queue */
 1041 static int
 1042 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
 1043     int qidx)
 1044 {
 1045         size_t i;
 1046         int err;
 1047 
 1048         /* Initizalize TX lock for this queue */
 1049         snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
 1050             device_get_nameunit(nic->dev), qidx);
 1051         mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
 1052 
 1053         NICVF_TX_LOCK(sq);
 1054         /* Allocate buffer ring */
 1055         sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
 1056             M_NOWAIT, &sq->mtx);
 1057         if (sq->br == NULL) {
 1058                 device_printf(nic->dev,
 1059                     "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
 1060                 err = ENOMEM;
 1061                 goto error;
 1062         }
 1063 
 1064         /* Allocate DMA memory for Tx descriptors */
 1065         err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
 1066                                      NICVF_SQ_BASE_ALIGN_BYTES);
 1067         if (err != 0) {
 1068                 device_printf(nic->dev,
 1069                     "Could not allocate DMA memory for SQ\n");
 1070                 goto error;
 1071         }
 1072 
 1073         sq->desc = sq->dmem.base;
 1074         sq->head = sq->tail = 0;
 1075         atomic_store_rel_int(&sq->free_cnt, q_len - 1);
 1076         sq->thresh = SND_QUEUE_THRESH;
 1077         sq->idx = qidx;
 1078         sq->nic = nic;
 1079 
 1080         /*
 1081          * Allocate DMA maps for Tx buffers
 1082          */
 1083 
 1084         /* Create DMA tag first */
 1085         err = bus_dma_tag_create(
 1086             bus_get_dma_tag(nic->dev),          /* parent tag */
 1087             1,                                  /* alignment */
 1088             0,                                  /* boundary */
 1089             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1090             BUS_SPACE_MAXADDR,                  /* highaddr */
 1091             NULL, NULL,                         /* filtfunc, filtfuncarg */
 1092             NICVF_TSO_MAXSIZE,                  /* maxsize */
 1093             NICVF_TSO_NSEGS,                    /* nsegments */
 1094             MCLBYTES,                           /* maxsegsize */
 1095             0,                                  /* flags */
 1096             NULL, NULL,                         /* lockfunc, lockfuncarg */
 1097             &sq->snd_buff_dmat);                /* dmat */
 1098 
 1099         if (err != 0) {
 1100                 device_printf(nic->dev,
 1101                     "Failed to create busdma tag for Tx buffers\n");
 1102                 goto error;
 1103         }
 1104 
 1105         /* Allocate send buffers array */
 1106         sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
 1107             (M_NOWAIT | M_ZERO));
 1108         if (sq->snd_buff == NULL) {
 1109                 device_printf(nic->dev,
 1110                     "Could not allocate memory for Tx buffers array\n");
 1111                 err = ENOMEM;
 1112                 goto error;
 1113         }
 1114 
 1115         /* Now populate maps */
 1116         for (i = 0; i < q_len; i++) {
 1117                 err = bus_dmamap_create(sq->snd_buff_dmat, 0,
 1118                     &sq->snd_buff[i].dmap);
 1119                 if (err != 0) {
 1120                         device_printf(nic->dev,
 1121                             "Failed to create DMA maps for Tx buffers\n");
 1122                         goto error;
 1123                 }
 1124         }
 1125         NICVF_TX_UNLOCK(sq);
 1126 
 1127         /* Allocate taskqueue */
 1128         TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
 1129         sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
 1130             taskqueue_thread_enqueue, &sq->snd_taskq);
 1131         taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
 1132             device_get_nameunit(nic->dev), qidx);
 1133 
 1134         return (0);
 1135 error:
 1136         NICVF_TX_UNLOCK(sq);
 1137         return (err);
 1138 }
 1139 
 1140 static void
 1141 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 1142 {
 1143         struct queue_set *qs = nic->qs;
 1144         size_t i;
 1145         int err;
 1146 
 1147         if (sq == NULL)
 1148                 return;
 1149 
 1150         if (sq->snd_taskq != NULL) {
 1151                 /* Remove task */
 1152                 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
 1153                         taskqueue_drain(sq->snd_taskq, &sq->snd_task);
 1154 
 1155                 taskqueue_free(sq->snd_taskq);
 1156                 sq->snd_taskq = NULL;
 1157         }
 1158 
 1159         NICVF_TX_LOCK(sq);
 1160         if (sq->snd_buff_dmat != NULL) {
 1161                 if (sq->snd_buff != NULL) {
 1162                         for (i = 0; i < qs->sq_len; i++) {
 1163                                 m_freem(sq->snd_buff[i].mbuf);
 1164                                 sq->snd_buff[i].mbuf = NULL;
 1165 
 1166                                 bus_dmamap_unload(sq->snd_buff_dmat,
 1167                                     sq->snd_buff[i].dmap);
 1168                                 err = bus_dmamap_destroy(sq->snd_buff_dmat,
 1169                                     sq->snd_buff[i].dmap);
 1170                                 /*
 1171                                  * If bus_dmamap_destroy fails it can cause
 1172                                  * random panic later if the tag is also
 1173                                  * destroyed in the process.
 1174                                  */
 1175                                 KASSERT(err == 0,
 1176                                     ("%s: Could not destroy DMA map for SQ",
 1177                                     __func__));
 1178                         }
 1179                 }
 1180 
 1181                 free(sq->snd_buff, M_NICVF);
 1182 
 1183                 err = bus_dma_tag_destroy(sq->snd_buff_dmat);
 1184                 KASSERT(err == 0,
 1185                     ("%s: Trying to destroy BUSY DMA tag", __func__));
 1186         }
 1187 
 1188         /* Free private driver ring for this send queue */
 1189         if (sq->br != NULL)
 1190                 drbr_free(sq->br, M_DEVBUF);
 1191 
 1192         if (sq->dmem.base != NULL)
 1193                 nicvf_free_q_desc_mem(nic, &sq->dmem);
 1194 
 1195         NICVF_TX_UNLOCK(sq);
 1196         /* Destroy Tx lock */
 1197         mtx_destroy(&sq->mtx);
 1198         memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
 1199 }
 1200 
 1201 static void
 1202 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1203 {
 1204 
 1205         /* Disable send queue */
 1206         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
 1207         /* Check if SQ is stopped */
 1208         if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
 1209                 return;
 1210         /* Reset send queue */
 1211         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 1212 }
 1213 
 1214 static void
 1215 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1216 {
 1217         union nic_mbx mbx = {};
 1218 
 1219         /* Make sure all packets in the pipeline are written back into mem */
 1220         mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
 1221         nicvf_send_msg_to_pf(nic, &mbx);
 1222 }
 1223 
 1224 static void
 1225 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1226 {
 1227 
 1228         /* Disable timer threshold (doesn't get reset upon CQ reset */
 1229         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
 1230         /* Disable completion queue */
 1231         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
 1232         /* Reset completion queue */
 1233         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 1234 }
 1235 
 1236 static void
 1237 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
 1238 {
 1239         uint64_t tmp, fifo_state;
 1240         int timeout = 10;
 1241 
 1242         /* Save head and tail pointers for feeing up buffers */
 1243         rbdr->head =
 1244             nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
 1245         rbdr->tail =
 1246             nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
 1247 
 1248         /*
 1249          * If RBDR FIFO is in 'FAIL' state then do a reset first
 1250          * before relaiming.
 1251          */
 1252         fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
 1253         if (((fifo_state >> 62) & 0x03) == 0x3) {
 1254                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
 1255                     qidx, NICVF_RBDR_RESET);
 1256         }
 1257 
 1258         /* Disable RBDR */
 1259         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
 1260         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 1261                 return;
 1262         while (1) {
 1263                 tmp = nicvf_queue_reg_read(nic,
 1264                     NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
 1265                 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
 1266                         break;
 1267 
 1268                 DELAY(1000);
 1269                 timeout--;
 1270                 if (!timeout) {
 1271                         device_printf(nic->dev,
 1272                             "Failed polling on prefetch status\n");
 1273                         return;
 1274                 }
 1275         }
 1276         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
 1277             NICVF_RBDR_RESET);
 1278 
 1279         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
 1280                 return;
 1281         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
 1282         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 1283                 return;
 1284 }
 1285 
 1286 /* Configures receive queue */
 1287 static void
 1288 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 1289     int qidx, bool enable)
 1290 {
 1291         union nic_mbx mbx = {};
 1292         struct rcv_queue *rq;
 1293         struct rq_cfg rq_cfg;
 1294         struct ifnet *ifp;
 1295         struct lro_ctrl *lro;
 1296 
 1297         ifp = nic->ifp;
 1298 
 1299         rq = &qs->rq[qidx];
 1300         rq->enable = enable;
 1301 
 1302         lro = &rq->lro;
 1303 
 1304         /* Disable receive queue */
 1305         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
 1306 
 1307         if (!rq->enable) {
 1308                 nicvf_reclaim_rcv_queue(nic, qs, qidx);
 1309                 /* Free LRO memory */
 1310                 tcp_lro_free(lro);
 1311                 rq->lro_enabled = FALSE;
 1312                 return;
 1313         }
 1314 
 1315         /* Configure LRO if enabled */
 1316         rq->lro_enabled = FALSE;
 1317         if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
 1318                 if (tcp_lro_init(lro) != 0) {
 1319                         device_printf(nic->dev,
 1320                             "Failed to initialize LRO for RXQ%d\n", qidx);
 1321                 } else {
 1322                         rq->lro_enabled = TRUE;
 1323                         lro->ifp = nic->ifp;
 1324                 }
 1325         }
 1326 
 1327         rq->cq_qs = qs->vnic_id;
 1328         rq->cq_idx = qidx;
 1329         rq->start_rbdr_qs = qs->vnic_id;
 1330         rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
 1331         rq->cont_rbdr_qs = qs->vnic_id;
 1332         rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
 1333         /* all writes of RBDR data to be loaded into L2 Cache as well*/
 1334         rq->caching = 1;
 1335 
 1336         /* Send a mailbox msg to PF to config RQ */
 1337         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
 1338         mbx.rq.qs_num = qs->vnic_id;
 1339         mbx.rq.rq_num = qidx;
 1340         mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
 1341             (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
 1342             (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
 1343             (rq->start_qs_rbdr_idx);
 1344         nicvf_send_msg_to_pf(nic, &mbx);
 1345 
 1346         mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
 1347         mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
 1348         nicvf_send_msg_to_pf(nic, &mbx);
 1349 
 1350         /*
 1351          * RQ drop config
 1352          * Enable CQ drop to reserve sufficient CQEs for all tx packets
 1353          */
 1354         mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
 1355         mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
 1356         nicvf_send_msg_to_pf(nic, &mbx);
 1357 
 1358         nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
 1359 
 1360         /* Enable Receive queue */
 1361         rq_cfg.ena = 1;
 1362         rq_cfg.tcp_ena = 0;
 1363         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
 1364             *(uint64_t *)&rq_cfg);
 1365 }
 1366 
 1367 /* Configures completion queue */
 1368 static void
 1369 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
 1370     int qidx, boolean_t enable)
 1371 {
 1372         struct cmp_queue *cq;
 1373         struct cq_cfg cq_cfg;
 1374 
 1375         cq = &qs->cq[qidx];
 1376         cq->enable = enable;
 1377 
 1378         if (!cq->enable) {
 1379                 nicvf_reclaim_cmp_queue(nic, qs, qidx);
 1380                 return;
 1381         }
 1382 
 1383         /* Reset completion queue */
 1384         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 1385 
 1386         /* Set completion queue base address */
 1387         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
 1388             (uint64_t)(cq->dmem.phys_base));
 1389 
 1390         /* Enable Completion queue */
 1391         cq_cfg.ena = 1;
 1392         cq_cfg.reset = 0;
 1393         cq_cfg.caching = 0;
 1394         cq_cfg.qsize = CMP_QSIZE;
 1395         cq_cfg.avg_con = 0;
 1396         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
 1397 
 1398         /* Set threshold value for interrupt generation */
 1399         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
 1400         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
 1401             nic->cq_coalesce_usecs);
 1402 }
 1403 
 1404 /* Configures transmit queue */
 1405 static void
 1406 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
 1407     boolean_t enable)
 1408 {
 1409         union nic_mbx mbx = {};
 1410         struct snd_queue *sq;
 1411         struct sq_cfg sq_cfg;
 1412 
 1413         sq = &qs->sq[qidx];
 1414         sq->enable = enable;
 1415 
 1416         if (!sq->enable) {
 1417                 nicvf_reclaim_snd_queue(nic, qs, qidx);
 1418                 return;
 1419         }
 1420 
 1421         /* Reset send queue */
 1422         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 1423 
 1424         sq->cq_qs = qs->vnic_id;
 1425         sq->cq_idx = qidx;
 1426 
 1427         /* Send a mailbox msg to PF to config SQ */
 1428         mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
 1429         mbx.sq.qs_num = qs->vnic_id;
 1430         mbx.sq.sq_num = qidx;
 1431         mbx.sq.sqs_mode = nic->sqs_mode;
 1432         mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
 1433         nicvf_send_msg_to_pf(nic, &mbx);
 1434 
 1435         /* Set queue base address */
 1436         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
 1437             (uint64_t)(sq->dmem.phys_base));
 1438 
 1439         /* Enable send queue  & set queue size */
 1440         sq_cfg.ena = 1;
 1441         sq_cfg.reset = 0;
 1442         sq_cfg.ldwb = 0;
 1443         sq_cfg.qsize = SND_QSIZE;
 1444         sq_cfg.tstmp_bgx_intf = 0;
 1445         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
 1446 
 1447         /* Set threshold value for interrupt generation */
 1448         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
 1449 }
 1450 
 1451 /* Configures receive buffer descriptor ring */
 1452 static void
 1453 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
 1454     boolean_t enable)
 1455 {
 1456         struct rbdr *rbdr;
 1457         struct rbdr_cfg rbdr_cfg;
 1458 
 1459         rbdr = &qs->rbdr[qidx];
 1460         nicvf_reclaim_rbdr(nic, rbdr, qidx);
 1461         if (!enable)
 1462                 return;
 1463 
 1464         /* Set descriptor base address */
 1465         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
 1466             (uint64_t)(rbdr->dmem.phys_base));
 1467 
 1468         /* Enable RBDR  & set queue size */
 1469         /* Buffer size should be in multiples of 128 bytes */
 1470         rbdr_cfg.ena = 1;
 1471         rbdr_cfg.reset = 0;
 1472         rbdr_cfg.ldwb = 0;
 1473         rbdr_cfg.qsize = RBDR_SIZE;
 1474         rbdr_cfg.avg_con = 0;
 1475         rbdr_cfg.lines = rbdr->dma_size / 128;
 1476         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
 1477             *(uint64_t *)&rbdr_cfg);
 1478 
 1479         /* Notify HW */
 1480         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
 1481             qs->rbdr_len - 1);
 1482 
 1483         /* Set threshold value for interrupt generation */
 1484         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
 1485             rbdr->thresh - 1);
 1486 }
 1487 
 1488 /* Requests PF to assign and enable Qset */
 1489 void
 1490 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
 1491 {
 1492         union nic_mbx mbx = {};
 1493         struct queue_set *qs;
 1494         struct qs_cfg *qs_cfg;
 1495 
 1496         qs = nic->qs;
 1497         if (qs == NULL) {
 1498                 device_printf(nic->dev,
 1499                     "Qset is still not allocated, don't init queues\n");
 1500                 return;
 1501         }
 1502 
 1503         qs->enable = enable;
 1504         qs->vnic_id = nic->vf_id;
 1505 
 1506         /* Send a mailbox msg to PF to config Qset */
 1507         mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
 1508         mbx.qs.num = qs->vnic_id;
 1509 
 1510         mbx.qs.cfg = 0;
 1511         qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
 1512         if (qs->enable) {
 1513                 qs_cfg->ena = 1;
 1514                 qs_cfg->vnic = qs->vnic_id;
 1515         }
 1516         nicvf_send_msg_to_pf(nic, &mbx);
 1517 }
 1518 
 1519 static void
 1520 nicvf_free_resources(struct nicvf *nic)
 1521 {
 1522         int qidx;
 1523         struct queue_set *qs;
 1524 
 1525         qs = nic->qs;
 1526         /*
 1527          * Remove QS error task first since it has to be dead
 1528          * to safely free completion queue tasks.
 1529          */
 1530         if (qs->qs_err_taskq != NULL) {
 1531                 /* Shut down QS error tasks */
 1532                 while (taskqueue_cancel(qs->qs_err_taskq,
 1533                     &qs->qs_err_task,  NULL) != 0) {
 1534                         taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
 1535                 }
 1536                 taskqueue_free(qs->qs_err_taskq);
 1537                 qs->qs_err_taskq = NULL;
 1538         }
 1539         /* Free receive buffer descriptor ring */
 1540         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1541                 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
 1542 
 1543         /* Free completion queue */
 1544         for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1545                 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
 1546 
 1547         /* Free send queue */
 1548         for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1549                 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
 1550 }
 1551 
 1552 static int
 1553 nicvf_alloc_resources(struct nicvf *nic)
 1554 {
 1555         struct queue_set *qs = nic->qs;
 1556         int qidx;
 1557 
 1558         /* Alloc receive buffer descriptor ring */
 1559         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
 1560                 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
 1561                                     DMA_BUFFER_LEN, qidx))
 1562                         goto alloc_fail;
 1563         }
 1564 
 1565         /* Alloc send queue */
 1566         for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
 1567                 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
 1568                         goto alloc_fail;
 1569         }
 1570 
 1571         /* Alloc completion queue */
 1572         for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 1573                 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
 1574                         goto alloc_fail;
 1575         }
 1576 
 1577         /* Allocate QS error taskqueue */
 1578         NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
 1579         qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
 1580             taskqueue_thread_enqueue, &qs->qs_err_taskq);
 1581         taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
 1582             device_get_nameunit(nic->dev));
 1583 
 1584         return (0);
 1585 alloc_fail:
 1586         nicvf_free_resources(nic);
 1587         return (ENOMEM);
 1588 }
 1589 
 1590 int
 1591 nicvf_set_qset_resources(struct nicvf *nic)
 1592 {
 1593         struct queue_set *qs;
 1594 
 1595         qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
 1596         nic->qs = qs;
 1597 
 1598         /* Set count of each queue */
 1599         qs->rbdr_cnt = RBDR_CNT;
 1600         qs->rq_cnt = RCV_QUEUE_CNT;
 1601 
 1602         qs->sq_cnt = SND_QUEUE_CNT;
 1603         qs->cq_cnt = CMP_QUEUE_CNT;
 1604 
 1605         /* Set queue lengths */
 1606         qs->rbdr_len = RCV_BUF_COUNT;
 1607         qs->sq_len = SND_QUEUE_LEN;
 1608         qs->cq_len = CMP_QUEUE_LEN;
 1609 
 1610         nic->rx_queues = qs->rq_cnt;
 1611         nic->tx_queues = qs->sq_cnt;
 1612 
 1613         return (0);
 1614 }
 1615 
 1616 int
 1617 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
 1618 {
 1619         boolean_t disable = FALSE;
 1620         struct queue_set *qs;
 1621         int qidx;
 1622 
 1623         qs = nic->qs;
 1624         if (qs == NULL)
 1625                 return (0);
 1626 
 1627         if (enable) {
 1628                 if (nicvf_alloc_resources(nic) != 0)
 1629                         return (ENOMEM);
 1630 
 1631                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1632                         nicvf_snd_queue_config(nic, qs, qidx, enable);
 1633                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1634                         nicvf_cmp_queue_config(nic, qs, qidx, enable);
 1635                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1636                         nicvf_rbdr_config(nic, qs, qidx, enable);
 1637                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 1638                         nicvf_rcv_queue_config(nic, qs, qidx, enable);
 1639         } else {
 1640                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 1641                         nicvf_rcv_queue_config(nic, qs, qidx, disable);
 1642                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1643                         nicvf_rbdr_config(nic, qs, qidx, disable);
 1644                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1645                         nicvf_snd_queue_config(nic, qs, qidx, disable);
 1646                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1647                         nicvf_cmp_queue_config(nic, qs, qidx, disable);
 1648 
 1649                 nicvf_free_resources(nic);
 1650         }
 1651 
 1652         return (0);
 1653 }
 1654 
 1655 /*
 1656  * Get a free desc from SQ
 1657  * returns descriptor ponter & descriptor number
 1658  */
 1659 static __inline int
 1660 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
 1661 {
 1662         int qentry;
 1663 
 1664         qentry = sq->tail;
 1665         atomic_subtract_int(&sq->free_cnt, desc_cnt);
 1666         sq->tail += desc_cnt;
 1667         sq->tail &= (sq->dmem.q_len - 1);
 1668 
 1669         return (qentry);
 1670 }
 1671 
 1672 /* Free descriptor back to SQ for future use */
 1673 static void
 1674 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
 1675 {
 1676 
 1677         atomic_add_int(&sq->free_cnt, desc_cnt);
 1678         sq->head += desc_cnt;
 1679         sq->head &= (sq->dmem.q_len - 1);
 1680 }
 1681 
 1682 static __inline int
 1683 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
 1684 {
 1685         qentry++;
 1686         qentry &= (sq->dmem.q_len - 1);
 1687         return (qentry);
 1688 }
 1689 
 1690 static void
 1691 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
 1692 {
 1693         uint64_t sq_cfg;
 1694 
 1695         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 1696         sq_cfg |= NICVF_SQ_EN;
 1697         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 1698         /* Ring doorbell so that H/W restarts processing SQEs */
 1699         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
 1700 }
 1701 
 1702 static void
 1703 nicvf_sq_disable(struct nicvf *nic, int qidx)
 1704 {
 1705         uint64_t sq_cfg;
 1706 
 1707         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 1708         sq_cfg &= ~NICVF_SQ_EN;
 1709         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 1710 }
 1711 
 1712 static void
 1713 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
 1714 {
 1715         uint64_t head;
 1716         struct snd_buff *snd_buff;
 1717         struct sq_hdr_subdesc *hdr;
 1718 
 1719         NICVF_TX_LOCK(sq);
 1720         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
 1721         while (sq->head != head) {
 1722                 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
 1723                 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
 1724                         nicvf_put_sq_desc(sq, 1);
 1725                         continue;
 1726                 }
 1727                 snd_buff = &sq->snd_buff[sq->head];
 1728                 if (snd_buff->mbuf != NULL) {
 1729                         bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1730                         m_freem(snd_buff->mbuf);
 1731                         sq->snd_buff[sq->head].mbuf = NULL;
 1732                 }
 1733                 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
 1734         }
 1735         NICVF_TX_UNLOCK(sq);
 1736 }
 1737 
 1738 /*
 1739  * Add SQ HEADER subdescriptor.
 1740  * First subdescriptor for every send descriptor.
 1741  */
 1742 static __inline int
 1743 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
 1744                          int subdesc_cnt, struct mbuf *mbuf, int len)
 1745 {
 1746         struct nicvf *nic;
 1747         struct sq_hdr_subdesc *hdr;
 1748         struct ether_vlan_header *eh;
 1749 #ifdef INET
 1750         struct ip *ip;
 1751         struct tcphdr *th;
 1752 #endif
 1753         uint16_t etype;
 1754         int ehdrlen, iphlen, poff, proto;
 1755 
 1756         nic = sq->nic;
 1757 
 1758         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
 1759         sq->snd_buff[qentry].mbuf = mbuf;
 1760 
 1761         memset(hdr, 0, SND_QUEUE_DESC_SIZE);
 1762         hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
 1763         /* Enable notification via CQE after processing SQE */
 1764         hdr->post_cqe = 1;
 1765         /* No of subdescriptors following this */
 1766         hdr->subdesc_cnt = subdesc_cnt;
 1767         hdr->tot_len = len;
 1768 
 1769         eh = mtod(mbuf, struct ether_vlan_header *);
 1770         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 1771                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1772                 etype = ntohs(eh->evl_proto);
 1773         } else {
 1774                 ehdrlen = ETHER_HDR_LEN;
 1775                 etype = ntohs(eh->evl_encap_proto);
 1776         }
 1777 
 1778         poff = proto = -1;
 1779         switch (etype) {
 1780 #ifdef INET6
 1781         case ETHERTYPE_IPV6:
 1782                 if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
 1783                         mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
 1784                         sq->snd_buff[qentry].mbuf = NULL;
 1785                         if (mbuf == NULL)
 1786                                 return (ENOBUFS);
 1787                 }
 1788                 poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
 1789                 if (poff < 0)
 1790                         return (ENOBUFS);
 1791                 poff += ehdrlen;
 1792                 break;
 1793 #endif
 1794 #ifdef INET
 1795         case ETHERTYPE_IP:
 1796                 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
 1797                         mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
 1798                         sq->snd_buff[qentry].mbuf = mbuf;
 1799                         if (mbuf == NULL)
 1800                                 return (ENOBUFS);
 1801                 }
 1802                 if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
 1803                         hdr->csum_l3 = 1; /* Enable IP csum calculation */
 1804 
 1805                 ip = (struct ip *)(mbuf->m_data + ehdrlen);
 1806                 iphlen = ip->ip_hl << 2;
 1807                 poff = ehdrlen + iphlen;
 1808                 proto = ip->ip_p;
 1809                 break;
 1810 #endif
 1811         }
 1812 
 1813 #if defined(INET6) || defined(INET)
 1814         if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
 1815                 switch (proto) {
 1816                 case IPPROTO_TCP:
 1817                         if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
 1818                                 break;
 1819 
 1820                         if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
 1821                                 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
 1822                                 sq->snd_buff[qentry].mbuf = mbuf;
 1823                                 if (mbuf == NULL)
 1824                                         return (ENOBUFS);
 1825                         }
 1826                         hdr->csum_l4 = SEND_L4_CSUM_TCP;
 1827                         break;
 1828                 case IPPROTO_UDP:
 1829                         if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
 1830                                 break;
 1831 
 1832                         if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
 1833                                 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
 1834                                 sq->snd_buff[qentry].mbuf = mbuf;
 1835                                 if (mbuf == NULL)
 1836                                         return (ENOBUFS);
 1837                         }
 1838                         hdr->csum_l4 = SEND_L4_CSUM_UDP;
 1839                         break;
 1840                 case IPPROTO_SCTP:
 1841                         if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
 1842                                 break;
 1843 
 1844                         if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
 1845                                 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
 1846                                 sq->snd_buff[qentry].mbuf = mbuf;
 1847                                 if (mbuf == NULL)
 1848                                         return (ENOBUFS);
 1849                         }
 1850                         hdr->csum_l4 = SEND_L4_CSUM_SCTP;
 1851                         break;
 1852                 default:
 1853                         break;
 1854                 }
 1855                 hdr->l3_offset = ehdrlen;
 1856                 hdr->l4_offset = poff;
 1857         }
 1858 
 1859         if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
 1860                 th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
 1861 
 1862                 hdr->tso = 1;
 1863                 hdr->tso_start = poff + (th->th_off * 4);
 1864                 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
 1865                 hdr->inner_l3_offset = ehdrlen - 2;
 1866                 nic->drv_stats.tx_tso++;
 1867         }
 1868 #endif
 1869 
 1870         return (0);
 1871 }
 1872 
 1873 /*
 1874  * SQ GATHER subdescriptor
 1875  * Must follow HDR descriptor
 1876  */
 1877 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
 1878                                                int size, uint64_t data)
 1879 {
 1880         struct sq_gather_subdesc *gather;
 1881 
 1882         qentry &= (sq->dmem.q_len - 1);
 1883         gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
 1884 
 1885         memset(gather, 0, SND_QUEUE_DESC_SIZE);
 1886         gather->subdesc_type = SQ_DESC_TYPE_GATHER;
 1887         gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
 1888         gather->size = size;
 1889         gather->addr = data;
 1890 }
 1891 
 1892 /* Put an mbuf to a SQ for packet transfer. */
 1893 static int
 1894 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
 1895 {
 1896         bus_dma_segment_t segs[256];
 1897         struct snd_buff *snd_buff;
 1898         size_t seg;
 1899         int nsegs, qentry;
 1900         int subdesc_cnt;
 1901         int err;
 1902 
 1903         NICVF_TX_LOCK_ASSERT(sq);
 1904 
 1905         if (sq->free_cnt == 0)
 1906                 return (ENOBUFS);
 1907 
 1908         snd_buff = &sq->snd_buff[sq->tail];
 1909 
 1910         err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
 1911             *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
 1912         if (__predict_false(err != 0)) {
 1913                 /* ARM64TODO: Add mbuf defragmenting if we lack maps */
 1914                 m_freem(*mbufp);
 1915                 *mbufp = NULL;
 1916                 return (err);
 1917         }
 1918 
 1919         /* Set how many subdescriptors is required */
 1920         subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
 1921         if (subdesc_cnt > sq->free_cnt) {
 1922                 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
 1923                 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1924                 return (ENOBUFS);
 1925         }
 1926 
 1927         qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 1928 
 1929         /* Add SQ header subdesc */
 1930         err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
 1931             (*mbufp)->m_pkthdr.len);
 1932         if (err != 0) {
 1933                 nicvf_put_sq_desc(sq, subdesc_cnt);
 1934                 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1935                 if (err == ENOBUFS) {
 1936                         m_freem(*mbufp);
 1937                         *mbufp = NULL;
 1938                 }
 1939                 return (err);
 1940         }
 1941 
 1942         /* Add SQ gather subdescs */
 1943         for (seg = 0; seg < nsegs; seg++) {
 1944                 qentry = nicvf_get_nxt_sqentry(sq, qentry);
 1945                 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
 1946                     segs[seg].ds_addr);
 1947         }
 1948 
 1949         /* make sure all memory stores are done before ringing doorbell */
 1950         bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
 1951 
 1952         dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
 1953             __func__, sq->idx, subdesc_cnt);
 1954         /* Inform HW to xmit new packet */
 1955         nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
 1956             sq->idx, subdesc_cnt);
 1957         return (0);
 1958 }
 1959 
 1960 static __inline u_int
 1961 frag_num(u_int i)
 1962 {
 1963 #if BYTE_ORDER == BIG_ENDIAN
 1964         return ((i & ~3) + 3 - (i & 3));
 1965 #else
 1966         return (i);
 1967 #endif
 1968 }
 1969 
 1970 /* Returns MBUF for a received packet */
 1971 struct mbuf *
 1972 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 1973 {
 1974         int frag;
 1975         int payload_len = 0;
 1976         struct mbuf *mbuf;
 1977         struct mbuf *mbuf_frag;
 1978         uint16_t *rb_lens = NULL;
 1979         uint64_t *rb_ptrs = NULL;
 1980 
 1981         mbuf = NULL;
 1982         rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
 1983         rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
 1984 
 1985         dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
 1986             __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
 1987 
 1988         for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
 1989                 payload_len = rb_lens[frag_num(frag)];
 1990                 if (frag == 0) {
 1991                         /* First fragment */
 1992                         mbuf = nicvf_rb_ptr_to_mbuf(nic,
 1993                             (*rb_ptrs - cqe_rx->align_pad));
 1994                         mbuf->m_len = payload_len;
 1995                         mbuf->m_data += cqe_rx->align_pad;
 1996                         if_setrcvif(mbuf, nic->ifp);
 1997                 } else {
 1998                         /* Add fragments */
 1999                         mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
 2000                         m_append(mbuf, payload_len, mbuf_frag->m_data);
 2001                         m_freem(mbuf_frag);
 2002                 }
 2003                 /* Next buffer pointer */
 2004                 rb_ptrs++;
 2005         }
 2006 
 2007         if (__predict_true(mbuf != NULL)) {
 2008                 m_fixhdr(mbuf);
 2009                 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
 2010                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
 2011                 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
 2012                         /*
 2013                          * HW by default verifies IP & TCP/UDP/SCTP checksums
 2014                          */
 2015                         if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
 2016                                 mbuf->m_pkthdr.csum_flags =
 2017                                     (CSUM_IP_CHECKED | CSUM_IP_VALID);
 2018                         }
 2019 
 2020                         switch (cqe_rx->l4_type) {
 2021                         case L4TYPE_UDP:
 2022                         case L4TYPE_TCP: /* fall through */
 2023                                 mbuf->m_pkthdr.csum_flags |=
 2024                                     (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 2025                                 mbuf->m_pkthdr.csum_data = 0xffff;
 2026                                 break;
 2027                         case L4TYPE_SCTP:
 2028                                 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
 2029                                 break;
 2030                         default:
 2031                                 break;
 2032                         }
 2033                 }
 2034         }
 2035 
 2036         return (mbuf);
 2037 }
 2038 
 2039 /* Enable interrupt */
 2040 void
 2041 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
 2042 {
 2043         uint64_t reg_val;
 2044 
 2045         reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
 2046 
 2047         switch (int_type) {
 2048         case NICVF_INTR_CQ:
 2049                 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2050                 break;
 2051         case NICVF_INTR_SQ:
 2052                 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2053                 break;
 2054         case NICVF_INTR_RBDR:
 2055                 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2056                 break;
 2057         case NICVF_INTR_PKT_DROP:
 2058                 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2059                 break;
 2060         case NICVF_INTR_TCP_TIMER:
 2061                 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2062                 break;
 2063         case NICVF_INTR_MBOX:
 2064                 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
 2065                 break;
 2066         case NICVF_INTR_QS_ERR:
 2067                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2068                 break;
 2069         default:
 2070                 device_printf(nic->dev,
 2071                            "Failed to enable interrupt: unknown type\n");
 2072                 break;
 2073         }
 2074 
 2075         nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
 2076 }
 2077 
 2078 /* Disable interrupt */
 2079 void
 2080 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
 2081 {
 2082         uint64_t reg_val = 0;
 2083 
 2084         switch (int_type) {
 2085         case NICVF_INTR_CQ:
 2086                 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2087                 break;
 2088         case NICVF_INTR_SQ:
 2089                 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2090                 break;
 2091         case NICVF_INTR_RBDR:
 2092                 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2093                 break;
 2094         case NICVF_INTR_PKT_DROP:
 2095                 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2096                 break;
 2097         case NICVF_INTR_TCP_TIMER:
 2098                 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2099                 break;
 2100         case NICVF_INTR_MBOX:
 2101                 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
 2102                 break;
 2103         case NICVF_INTR_QS_ERR:
 2104                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2105                 break;
 2106         default:
 2107                 device_printf(nic->dev,
 2108                            "Failed to disable interrupt: unknown type\n");
 2109                 break;
 2110         }
 2111 
 2112         nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
 2113 }
 2114 
 2115 /* Clear interrupt */
 2116 void
 2117 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
 2118 {
 2119         uint64_t reg_val = 0;
 2120 
 2121         switch (int_type) {
 2122         case NICVF_INTR_CQ:
 2123                 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2124                 break;
 2125         case NICVF_INTR_SQ:
 2126                 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2127                 break;
 2128         case NICVF_INTR_RBDR:
 2129                 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2130                 break;
 2131         case NICVF_INTR_PKT_DROP:
 2132                 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2133                 break;
 2134         case NICVF_INTR_TCP_TIMER:
 2135                 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2136                 break;
 2137         case NICVF_INTR_MBOX:
 2138                 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
 2139                 break;
 2140         case NICVF_INTR_QS_ERR:
 2141                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2142                 break;
 2143         default:
 2144                 device_printf(nic->dev,
 2145                            "Failed to clear interrupt: unknown type\n");
 2146                 break;
 2147         }
 2148 
 2149         nicvf_reg_write(nic, NIC_VF_INT, reg_val);
 2150 }
 2151 
 2152 /* Check if interrupt is enabled */
 2153 int
 2154 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
 2155 {
 2156         uint64_t reg_val;
 2157         uint64_t mask = 0xff;
 2158 
 2159         reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
 2160 
 2161         switch (int_type) {
 2162         case NICVF_INTR_CQ:
 2163                 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2164                 break;
 2165         case NICVF_INTR_SQ:
 2166                 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2167                 break;
 2168         case NICVF_INTR_RBDR:
 2169                 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2170                 break;
 2171         case NICVF_INTR_PKT_DROP:
 2172                 mask = NICVF_INTR_PKT_DROP_MASK;
 2173                 break;
 2174         case NICVF_INTR_TCP_TIMER:
 2175                 mask = NICVF_INTR_TCP_TIMER_MASK;
 2176                 break;
 2177         case NICVF_INTR_MBOX:
 2178                 mask = NICVF_INTR_MBOX_MASK;
 2179                 break;
 2180         case NICVF_INTR_QS_ERR:
 2181                 mask = NICVF_INTR_QS_ERR_MASK;
 2182                 break;
 2183         default:
 2184                 device_printf(nic->dev,
 2185                            "Failed to check interrupt enable: unknown type\n");
 2186                 break;
 2187         }
 2188 
 2189         return (reg_val & mask);
 2190 }
 2191 
 2192 void
 2193 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
 2194 {
 2195         struct rcv_queue *rq;
 2196 
 2197 #define GET_RQ_STATS(reg) \
 2198         nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
 2199                             (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
 2200 
 2201         rq = &nic->qs->rq[rq_idx];
 2202         rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
 2203         rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
 2204 }
 2205 
 2206 void
 2207 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
 2208 {
 2209         struct snd_queue *sq;
 2210 
 2211 #define GET_SQ_STATS(reg) \
 2212         nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
 2213                             (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
 2214 
 2215         sq = &nic->qs->sq[sq_idx];
 2216         sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
 2217         sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
 2218 }
 2219 
 2220 /* Check for errors in the receive cmp.queue entry */
 2221 int
 2222 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
 2223     struct cqe_rx_t *cqe_rx)
 2224 {
 2225         struct nicvf_hw_stats *stats = &nic->hw_stats;
 2226         struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 2227 
 2228         if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
 2229                 drv_stats->rx_frames_ok++;
 2230                 return (0);
 2231         }
 2232 
 2233         switch (cqe_rx->err_opcode) {
 2234         case CQ_RX_ERROP_RE_PARTIAL:
 2235                 stats->rx_bgx_truncated_pkts++;
 2236                 break;
 2237         case CQ_RX_ERROP_RE_JABBER:
 2238                 stats->rx_jabber_errs++;
 2239                 break;
 2240         case CQ_RX_ERROP_RE_FCS:
 2241                 stats->rx_fcs_errs++;
 2242                 break;
 2243         case CQ_RX_ERROP_RE_RX_CTL:
 2244                 stats->rx_bgx_errs++;
 2245                 break;
 2246         case CQ_RX_ERROP_PREL2_ERR:
 2247                 stats->rx_prel2_errs++;
 2248                 break;
 2249         case CQ_RX_ERROP_L2_MAL:
 2250                 stats->rx_l2_hdr_malformed++;
 2251                 break;
 2252         case CQ_RX_ERROP_L2_OVERSIZE:
 2253                 stats->rx_oversize++;
 2254                 break;
 2255         case CQ_RX_ERROP_L2_UNDERSIZE:
 2256                 stats->rx_undersize++;
 2257                 break;
 2258         case CQ_RX_ERROP_L2_LENMISM:
 2259                 stats->rx_l2_len_mismatch++;
 2260                 break;
 2261         case CQ_RX_ERROP_L2_PCLP:
 2262                 stats->rx_l2_pclp++;
 2263                 break;
 2264         case CQ_RX_ERROP_IP_NOT:
 2265                 stats->rx_ip_ver_errs++;
 2266                 break;
 2267         case CQ_RX_ERROP_IP_CSUM_ERR:
 2268                 stats->rx_ip_csum_errs++;
 2269                 break;
 2270         case CQ_RX_ERROP_IP_MAL:
 2271                 stats->rx_ip_hdr_malformed++;
 2272                 break;
 2273         case CQ_RX_ERROP_IP_MALD:
 2274                 stats->rx_ip_payload_malformed++;
 2275                 break;
 2276         case CQ_RX_ERROP_IP_HOP:
 2277                 stats->rx_ip_ttl_errs++;
 2278                 break;
 2279         case CQ_RX_ERROP_L3_PCLP:
 2280                 stats->rx_l3_pclp++;
 2281                 break;
 2282         case CQ_RX_ERROP_L4_MAL:
 2283                 stats->rx_l4_malformed++;
 2284                 break;
 2285         case CQ_RX_ERROP_L4_CHK:
 2286                 stats->rx_l4_csum_errs++;
 2287                 break;
 2288         case CQ_RX_ERROP_UDP_LEN:
 2289                 stats->rx_udp_len_errs++;
 2290                 break;
 2291         case CQ_RX_ERROP_L4_PORT:
 2292                 stats->rx_l4_port_errs++;
 2293                 break;
 2294         case CQ_RX_ERROP_TCP_FLAG:
 2295                 stats->rx_tcp_flag_errs++;
 2296                 break;
 2297         case CQ_RX_ERROP_TCP_OFFSET:
 2298                 stats->rx_tcp_offset_errs++;
 2299                 break;
 2300         case CQ_RX_ERROP_L4_PCLP:
 2301                 stats->rx_l4_pclp++;
 2302                 break;
 2303         case CQ_RX_ERROP_RBDR_TRUNC:
 2304                 stats->rx_truncated_pkts++;
 2305                 break;
 2306         }
 2307 
 2308         return (1);
 2309 }
 2310 
 2311 /* Check for errors in the send cmp.queue entry */
 2312 int
 2313 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
 2314     struct cqe_send_t *cqe_tx)
 2315 {
 2316         struct cmp_queue_stats *stats = &cq->stats;
 2317 
 2318         switch (cqe_tx->send_status) {
 2319         case CQ_TX_ERROP_GOOD:
 2320                 stats->tx.good++;
 2321                 return (0);
 2322         case CQ_TX_ERROP_DESC_FAULT:
 2323                 stats->tx.desc_fault++;
 2324                 break;
 2325         case CQ_TX_ERROP_HDR_CONS_ERR:
 2326                 stats->tx.hdr_cons_err++;
 2327                 break;
 2328         case CQ_TX_ERROP_SUBDC_ERR:
 2329                 stats->tx.subdesc_err++;
 2330                 break;
 2331         case CQ_TX_ERROP_IMM_SIZE_OFLOW:
 2332                 stats->tx.imm_size_oflow++;
 2333                 break;
 2334         case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
 2335                 stats->tx.data_seq_err++;
 2336                 break;
 2337         case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
 2338                 stats->tx.mem_seq_err++;
 2339                 break;
 2340         case CQ_TX_ERROP_LOCK_VIOL:
 2341                 stats->tx.lock_viol++;
 2342                 break;
 2343         case CQ_TX_ERROP_DATA_FAULT:
 2344                 stats->tx.data_fault++;
 2345                 break;
 2346         case CQ_TX_ERROP_TSTMP_CONFLICT:
 2347                 stats->tx.tstmp_conflict++;
 2348                 break;
 2349         case CQ_TX_ERROP_TSTMP_TIMEOUT:
 2350                 stats->tx.tstmp_timeout++;
 2351                 break;
 2352         case CQ_TX_ERROP_MEM_FAULT:
 2353                 stats->tx.mem_fault++;
 2354                 break;
 2355         case CQ_TX_ERROP_CK_OVERLAP:
 2356                 stats->tx.csum_overlap++;
 2357                 break;
 2358         case CQ_TX_ERROP_CK_OFLOW:
 2359                 stats->tx.csum_overflow++;
 2360                 break;
 2361         }
 2362 
 2363         return (1);
 2364 }

Cache object: 2077d6435de458a2fd384b10499f009c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.