The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vnic/nicvf_queues.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (C) 2015 Cavium Inc.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  *
   28  */
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include "opt_inet.h"
   33 #include "opt_inet6.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/bitset.h>
   38 #include <sys/bitstring.h>
   39 #include <sys/buf_ring.h>
   40 #include <sys/bus.h>
   41 #include <sys/endian.h>
   42 #include <sys/kernel.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/rman.h>
   46 #include <sys/pciio.h>
   47 #include <sys/pcpu.h>
   48 #include <sys/proc.h>
   49 #include <sys/sockio.h>
   50 #include <sys/socket.h>
   51 #include <sys/stdatomic.h>
   52 #include <sys/cpuset.h>
   53 #include <sys/lock.h>
   54 #include <sys/mutex.h>
   55 #include <sys/smp.h>
   56 #include <sys/taskqueue.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/pmap.h>
   60 
   61 #include <machine/bus.h>
   62 #include <machine/vmparam.h>
   63 
   64 #include <net/if.h>
   65 #include <net/if_var.h>
   66 #include <net/if_media.h>
   67 #include <net/ifq.h>
   68 #include <net/bpf.h>
   69 #include <net/ethernet.h>
   70 
   71 #include <netinet/in_systm.h>
   72 #include <netinet/in.h>
   73 #include <netinet/if_ether.h>
   74 #include <netinet/ip.h>
   75 #include <netinet/ip6.h>
   76 #include <netinet/sctp.h>
   77 #include <netinet/tcp.h>
   78 #include <netinet/tcp_lro.h>
   79 #include <netinet/udp.h>
   80 
   81 #include <netinet6/ip6_var.h>
   82 
   83 #include <dev/pci/pcireg.h>
   84 #include <dev/pci/pcivar.h>
   85 
   86 #include "thunder_bgx.h"
   87 #include "nic_reg.h"
   88 #include "nic.h"
   89 #include "q_struct.h"
   90 #include "nicvf_queues.h"
   91 
   92 #define DEBUG
   93 #undef DEBUG
   94 
   95 #ifdef DEBUG
   96 #define dprintf(dev, fmt, ...)  device_printf(dev, fmt, ##__VA_ARGS__)
   97 #else
   98 #define dprintf(dev, fmt, ...)
   99 #endif
  100 
  101 MALLOC_DECLARE(M_NICVF);
  102 
  103 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
  104 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
  105 static void nicvf_sq_disable(struct nicvf *, int);
  106 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
  107 static void nicvf_put_sq_desc(struct snd_queue *, int);
  108 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
  109     boolean_t);
  110 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
  111 
  112 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
  113 
  114 static void nicvf_rbdr_task(void *, int);
  115 static void nicvf_rbdr_task_nowait(void *, int);
  116 
  117 struct rbuf_info {
  118         bus_dma_tag_t   dmat;
  119         bus_dmamap_t    dmap;
  120         struct mbuf *   mbuf;
  121 };
  122 
  123 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
  124 
  125 /* Poll a register for a specific value */
  126 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
  127                           uint64_t reg, int bit_pos, int bits, int val)
  128 {
  129         uint64_t bit_mask;
  130         uint64_t reg_val;
  131         int timeout = 10;
  132 
  133         bit_mask = (1UL << bits) - 1;
  134         bit_mask = (bit_mask << bit_pos);
  135 
  136         while (timeout) {
  137                 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
  138                 if (((reg_val & bit_mask) >> bit_pos) == val)
  139                         return (0);
  140 
  141                 DELAY(1000);
  142                 timeout--;
  143         }
  144         device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
  145         return (ETIMEDOUT);
  146 }
  147 
  148 /* Callback for bus_dmamap_load() */
  149 static void
  150 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  151 {
  152         bus_addr_t *paddr;
  153 
  154         KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
  155         paddr = arg;
  156         *paddr = segs->ds_addr;
  157 }
  158 
  159 /* Allocate memory for a queue's descriptors */
  160 static int
  161 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
  162     int q_len, int desc_size, int align_bytes)
  163 {
  164         int err, err_dmat __diagused;
  165 
  166         /* Create DMA tag first */
  167         err = bus_dma_tag_create(
  168             bus_get_dma_tag(nic->dev),          /* parent tag */
  169             align_bytes,                        /* alignment */
  170             0,                                  /* boundary */
  171             BUS_SPACE_MAXADDR,                  /* lowaddr */
  172             BUS_SPACE_MAXADDR,                  /* highaddr */
  173             NULL, NULL,                         /* filtfunc, filtfuncarg */
  174             (q_len * desc_size),                /* maxsize */
  175             1,                                  /* nsegments */
  176             (q_len * desc_size),                /* maxsegsize */
  177             0,                                  /* flags */
  178             NULL, NULL,                         /* lockfunc, lockfuncarg */
  179             &dmem->dmat);                       /* dmat */
  180 
  181         if (err != 0) {
  182                 device_printf(nic->dev,
  183                     "Failed to create busdma tag for descriptors ring\n");
  184                 return (err);
  185         }
  186 
  187         /* Allocate segment of continuous DMA safe memory */
  188         err = bus_dmamem_alloc(
  189             dmem->dmat,                         /* DMA tag */
  190             &dmem->base,                        /* virtual address */
  191             (BUS_DMA_NOWAIT | BUS_DMA_ZERO),    /* flags */
  192             &dmem->dmap);                       /* DMA map */
  193         if (err != 0) {
  194                 device_printf(nic->dev, "Failed to allocate DMA safe memory for"
  195                     "descriptors ring\n");
  196                 goto dmamem_fail;
  197         }
  198 
  199         err = bus_dmamap_load(
  200             dmem->dmat,
  201             dmem->dmap,
  202             dmem->base,
  203             (q_len * desc_size),                /* allocation size */
  204             nicvf_dmamap_q_cb,                  /* map to DMA address cb. */
  205             &dmem->phys_base,                   /* physical address */
  206             BUS_DMA_NOWAIT);
  207         if (err != 0) {
  208                 device_printf(nic->dev,
  209                     "Cannot load DMA map of descriptors ring\n");
  210                 goto dmamap_fail;
  211         }
  212 
  213         dmem->q_len = q_len;
  214         dmem->size = (desc_size * q_len);
  215 
  216         return (0);
  217 
  218 dmamap_fail:
  219         bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
  220         dmem->phys_base = 0;
  221 dmamem_fail:
  222         err_dmat = bus_dma_tag_destroy(dmem->dmat);
  223         dmem->base = NULL;
  224         KASSERT(err_dmat == 0,
  225             ("%s: Trying to destroy BUSY DMA tag", __func__));
  226 
  227         return (err);
  228 }
  229 
  230 /* Free queue's descriptor memory */
  231 static void
  232 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
  233 {
  234         int err __diagused;
  235 
  236         if ((dmem == NULL) || (dmem->base == NULL))
  237                 return;
  238 
  239         /* Unload a map */
  240         bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
  241         bus_dmamap_unload(dmem->dmat, dmem->dmap);
  242         /* Free DMA memory */
  243         bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
  244         /* Destroy DMA tag */
  245         err = bus_dma_tag_destroy(dmem->dmat);
  246 
  247         KASSERT(err == 0,
  248             ("%s: Trying to destroy BUSY DMA tag", __func__));
  249 
  250         dmem->phys_base = 0;
  251         dmem->base = NULL;
  252 }
  253 
  254 /*
  255  * Allocate buffer for packet reception
  256  * HW returns memory address where packet is DMA'ed but not a pointer
  257  * into RBDR ring, so save buffer address at the start of fragment and
  258  * align the start address to a cache aligned address
  259  */
  260 static __inline int
  261 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
  262     bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
  263 {
  264         struct mbuf *mbuf;
  265         struct rbuf_info *rinfo;
  266         bus_dma_segment_t segs[1];
  267         int nsegs;
  268         int err;
  269 
  270         mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
  271         if (mbuf == NULL)
  272                 return (ENOMEM);
  273 
  274         /*
  275          * The length is equal to the actual length + one 128b line
  276          * used as a room for rbuf_info structure.
  277          */
  278         mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
  279 
  280         err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
  281             &nsegs, BUS_DMA_NOWAIT);
  282         if (err != 0) {
  283                 device_printf(nic->dev,
  284                     "Failed to map mbuf into DMA visible memory, err: %d\n",
  285                     err);
  286                 m_freem(mbuf);
  287                 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
  288                 return (err);
  289         }
  290         if (nsegs != 1)
  291                 panic("Unexpected number of DMA segments for RB: %d", nsegs);
  292         /*
  293          * Now use the room for rbuf_info structure
  294          * and adjust mbuf data and length.
  295          */
  296         rinfo = (struct rbuf_info *)mbuf->m_data;
  297         m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
  298 
  299         rinfo->dmat = rbdr->rbdr_buff_dmat;
  300         rinfo->dmap = dmap;
  301         rinfo->mbuf = mbuf;
  302 
  303         *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
  304 
  305         return (0);
  306 }
  307 
  308 /* Retrieve mbuf for received packet */
  309 static struct mbuf *
  310 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
  311 {
  312         struct mbuf *mbuf;
  313         struct rbuf_info *rinfo;
  314 
  315         /* Get buffer start address and alignment offset */
  316         rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
  317 
  318         /* Now retrieve mbuf to give to stack */
  319         mbuf = rinfo->mbuf;
  320         if (__predict_false(mbuf == NULL)) {
  321                 panic("%s: Received packet fragment with NULL mbuf",
  322                     device_get_nameunit(nic->dev));
  323         }
  324         /*
  325          * Clear the mbuf in the descriptor to indicate
  326          * that this slot is processed and free to use.
  327          */
  328         rinfo->mbuf = NULL;
  329 
  330         bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
  331         bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
  332 
  333         return (mbuf);
  334 }
  335 
  336 /* Allocate RBDR ring and populate receive buffers */
  337 static int
  338 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
  339     int buf_size, int qidx)
  340 {
  341         bus_dmamap_t dmap;
  342         bus_addr_t rbuf;
  343         struct rbdr_entry_t *desc;
  344         int idx;
  345         int err;
  346 
  347         /* Allocate rbdr descriptors ring */
  348         err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
  349             sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
  350         if (err != 0) {
  351                 device_printf(nic->dev,
  352                     "Failed to create RBDR descriptors ring\n");
  353                 return (err);
  354         }
  355 
  356         rbdr->desc = rbdr->dmem.base;
  357         /*
  358          * Buffer size has to be in multiples of 128 bytes.
  359          * Make room for metadata of size of one line (128 bytes).
  360          */
  361         rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
  362         rbdr->enable = TRUE;
  363         rbdr->thresh = RBDR_THRESH;
  364         rbdr->nic = nic;
  365         rbdr->idx = qidx;
  366 
  367         /*
  368          * Create DMA tag for Rx buffers.
  369          * Each map created using this tag is intended to store Rx payload for
  370          * one fragment and one header structure containing rbuf_info (thus
  371          * additional 128 byte line since RB must be a multiple of 128 byte
  372          * cache line).
  373          */
  374         if (buf_size > MCLBYTES) {
  375                 device_printf(nic->dev,
  376                     "Buffer size to large for mbuf cluster\n");
  377                 return (EINVAL);
  378         }
  379         err = bus_dma_tag_create(
  380             bus_get_dma_tag(nic->dev),          /* parent tag */
  381             NICVF_RCV_BUF_ALIGN_BYTES,          /* alignment */
  382             0,                                  /* boundary */
  383             DMAP_MAX_PHYSADDR,                  /* lowaddr */
  384             DMAP_MIN_PHYSADDR,                  /* highaddr */
  385             NULL, NULL,                         /* filtfunc, filtfuncarg */
  386             roundup2(buf_size, MCLBYTES),       /* maxsize */
  387             1,                                  /* nsegments */
  388             roundup2(buf_size, MCLBYTES),       /* maxsegsize */
  389             0,                                  /* flags */
  390             NULL, NULL,                         /* lockfunc, lockfuncarg */
  391             &rbdr->rbdr_buff_dmat);             /* dmat */
  392 
  393         if (err != 0) {
  394                 device_printf(nic->dev,
  395                     "Failed to create busdma tag for RBDR buffers\n");
  396                 return (err);
  397         }
  398 
  399         rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
  400             ring_len, M_NICVF, (M_WAITOK | M_ZERO));
  401 
  402         for (idx = 0; idx < ring_len; idx++) {
  403                 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
  404                 if (err != 0) {
  405                         device_printf(nic->dev,
  406                             "Failed to create DMA map for RB\n");
  407                         return (err);
  408                 }
  409                 rbdr->rbdr_buff_dmaps[idx] = dmap;
  410 
  411                 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
  412                     DMA_BUFFER_LEN, &rbuf);
  413                 if (err != 0)
  414                         return (err);
  415 
  416                 desc = GET_RBDR_DESC(rbdr, idx);
  417                 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
  418         }
  419 
  420         /* Allocate taskqueue */
  421         TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
  422         TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
  423         rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
  424             taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
  425         taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
  426             device_get_nameunit(nic->dev));
  427 
  428         return (0);
  429 }
  430 
  431 /* Free RBDR ring and its receive buffers */
  432 static void
  433 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
  434 {
  435         struct mbuf *mbuf;
  436         struct queue_set *qs;
  437         struct rbdr_entry_t *desc;
  438         struct rbuf_info *rinfo;
  439         bus_addr_t buf_addr;
  440         int head, tail, idx;
  441         int err __diagused;
  442 
  443         qs = nic->qs;
  444 
  445         if ((qs == NULL) || (rbdr == NULL))
  446                 return;
  447 
  448         rbdr->enable = FALSE;
  449         if (rbdr->rbdr_taskq != NULL) {
  450                 /* Remove tasks */
  451                 while (taskqueue_cancel(rbdr->rbdr_taskq,
  452                     &rbdr->rbdr_task_nowait, NULL) != 0) {
  453                         /* Finish the nowait task first */
  454                         taskqueue_drain(rbdr->rbdr_taskq,
  455                             &rbdr->rbdr_task_nowait);
  456                 }
  457                 taskqueue_free(rbdr->rbdr_taskq);
  458                 rbdr->rbdr_taskq = NULL;
  459 
  460                 while (taskqueue_cancel(taskqueue_thread,
  461                     &rbdr->rbdr_task, NULL) != 0) {
  462                         /* Now finish the sleepable task */
  463                         taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
  464                 }
  465         }
  466 
  467         /*
  468          * Free all of the memory under the RB descriptors.
  469          * There are assumptions here:
  470          * 1. Corresponding RBDR is disabled
  471          *    - it is safe to operate using head and tail indexes
  472          * 2. All bffers that were received are properly freed by
  473          *    the receive handler
  474          *    - there is no need to unload DMA map and free MBUF for other
  475          *      descriptors than unused ones
  476          */
  477         if (rbdr->rbdr_buff_dmat != NULL) {
  478                 head = rbdr->head;
  479                 tail = rbdr->tail;
  480                 while (head != tail) {
  481                         desc = GET_RBDR_DESC(rbdr, head);
  482                         buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  483                         rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
  484                         bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
  485                         mbuf = rinfo->mbuf;
  486                         /* This will destroy everything including rinfo! */
  487                         m_freem(mbuf);
  488                         head++;
  489                         head &= (rbdr->dmem.q_len - 1);
  490                 }
  491                 /* Free tail descriptor */
  492                 desc = GET_RBDR_DESC(rbdr, tail);
  493                 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
  494                 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
  495                 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
  496                 mbuf = rinfo->mbuf;
  497                 /* This will destroy everything including rinfo! */
  498                 m_freem(mbuf);
  499 
  500                 /* Destroy DMA maps */
  501                 for (idx = 0; idx < qs->rbdr_len; idx++) {
  502                         if (rbdr->rbdr_buff_dmaps[idx] == NULL)
  503                                 continue;
  504                         err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
  505                             rbdr->rbdr_buff_dmaps[idx]);
  506                         KASSERT(err == 0,
  507                             ("%s: Could not destroy DMA map for RB, desc: %d",
  508                             __func__, idx));
  509                         rbdr->rbdr_buff_dmaps[idx] = NULL;
  510                 }
  511 
  512                 /* Now destroy the tag */
  513                 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
  514                 KASSERT(err == 0,
  515                     ("%s: Trying to destroy BUSY DMA tag", __func__));
  516 
  517                 rbdr->head = 0;
  518                 rbdr->tail = 0;
  519         }
  520 
  521         /* Free RBDR ring */
  522         nicvf_free_q_desc_mem(nic, &rbdr->dmem);
  523 }
  524 
  525 /*
  526  * Refill receive buffer descriptors with new buffers.
  527  */
  528 static int
  529 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
  530 {
  531         struct nicvf *nic;
  532         struct queue_set *qs;
  533         int rbdr_idx;
  534         int tail, qcount;
  535         int refill_rb_cnt;
  536         struct rbdr_entry_t *desc;
  537         bus_dmamap_t dmap;
  538         bus_addr_t rbuf;
  539         boolean_t rb_alloc_fail;
  540         int new_rb;
  541 
  542         rb_alloc_fail = TRUE;
  543         new_rb = 0;
  544         nic = rbdr->nic;
  545         qs = nic->qs;
  546         rbdr_idx = rbdr->idx;
  547 
  548         /* Check if it's enabled */
  549         if (!rbdr->enable)
  550                 return (0);
  551 
  552         /* Get no of desc's to be refilled */
  553         qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
  554         qcount &= 0x7FFFF;
  555         /* Doorbell can be ringed with a max of ring size minus 1 */
  556         if (qcount >= (qs->rbdr_len - 1)) {
  557                 rb_alloc_fail = FALSE;
  558                 goto out;
  559         } else
  560                 refill_rb_cnt = qs->rbdr_len - qcount - 1;
  561 
  562         /* Start filling descs from tail */
  563         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
  564         while (refill_rb_cnt) {
  565                 tail++;
  566                 tail &= (rbdr->dmem.q_len - 1);
  567 
  568                 dmap = rbdr->rbdr_buff_dmaps[tail];
  569                 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
  570                     DMA_BUFFER_LEN, &rbuf)) {
  571                         /* Something went wrong. Resign */
  572                         break;
  573                 }
  574                 desc = GET_RBDR_DESC(rbdr, tail);
  575                 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
  576                 refill_rb_cnt--;
  577                 new_rb++;
  578         }
  579 
  580         /* make sure all memory stores are done before ringing doorbell */
  581         wmb();
  582 
  583         /* Check if buffer allocation failed */
  584         if (refill_rb_cnt == 0)
  585                 rb_alloc_fail = FALSE;
  586 
  587         /* Notify HW */
  588         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
  589                               rbdr_idx, new_rb);
  590 out:
  591         if (!rb_alloc_fail) {
  592                 /*
  593                  * Re-enable RBDR interrupts only
  594                  * if buffer allocation is success.
  595                  */
  596                 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
  597 
  598                 return (0);
  599         }
  600 
  601         return (ENOMEM);
  602 }
  603 
  604 /* Refill RBs even if sleep is needed to reclaim memory */
  605 static void
  606 nicvf_rbdr_task(void *arg, int pending)
  607 {
  608         struct rbdr *rbdr;
  609         int err;
  610 
  611         rbdr = (struct rbdr *)arg;
  612 
  613         err = nicvf_refill_rbdr(rbdr, M_WAITOK);
  614         if (__predict_false(err != 0)) {
  615                 panic("%s: Failed to refill RBs even when sleep enabled",
  616                     __func__);
  617         }
  618 }
  619 
  620 /* Refill RBs as soon as possible without waiting */
  621 static void
  622 nicvf_rbdr_task_nowait(void *arg, int pending)
  623 {
  624         struct rbdr *rbdr;
  625         int err;
  626 
  627         rbdr = (struct rbdr *)arg;
  628 
  629         err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
  630         if (err != 0) {
  631                 /*
  632                  * Schedule another, sleepable kernel thread
  633                  * that will for sure refill the buffers.
  634                  */
  635                 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
  636         }
  637 }
  638 
  639 static int
  640 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
  641     struct cqe_rx_t *cqe_rx, int cqe_type)
  642 {
  643         struct mbuf *mbuf;
  644         struct rcv_queue *rq;
  645         int rq_idx;
  646         int err = 0;
  647 
  648         rq_idx = cqe_rx->rq_idx;
  649         rq = &nic->qs->rq[rq_idx];
  650 
  651         /* Check for errors */
  652         err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
  653         if (err && !cqe_rx->rb_cnt)
  654                 return (0);
  655 
  656         mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
  657         if (mbuf == NULL) {
  658                 dprintf(nic->dev, "Packet not received\n");
  659                 return (0);
  660         }
  661 
  662         /* If error packet */
  663         if (err != 0) {
  664                 m_freem(mbuf);
  665                 return (0);
  666         }
  667 
  668         if (rq->lro_enabled &&
  669             ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
  670             (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
  671             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
  672                 /*
  673                  * At this point it is known that there are no errors in the
  674                  * packet. Attempt to LRO enqueue. Send to stack if no resources
  675                  * or enqueue error.
  676                  */
  677                 if ((rq->lro.lro_cnt != 0) &&
  678                     (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
  679                         return (0);
  680         }
  681         /*
  682          * Push this packet to the stack later to avoid
  683          * unlocking completion task in the middle of work.
  684          */
  685         err = buf_ring_enqueue(cq->rx_br, mbuf);
  686         if (err != 0) {
  687                 /*
  688                  * Failed to enqueue this mbuf.
  689                  * We don't drop it, just schedule another task.
  690                  */
  691                 return (err);
  692         }
  693 
  694         return (0);
  695 }
  696 
  697 static void
  698 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
  699     struct cqe_send_t *cqe_tx, int cqe_type)
  700 {
  701         bus_dmamap_t dmap;
  702         struct mbuf *mbuf;
  703         struct snd_queue *sq;
  704         struct sq_hdr_subdesc *hdr;
  705 
  706         mbuf = NULL;
  707         sq = &nic->qs->sq[cqe_tx->sq_idx];
  708 
  709         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
  710         if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
  711                 return;
  712 
  713         dprintf(nic->dev,
  714             "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
  715             __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
  716             cqe_tx->sqe_ptr, hdr->subdesc_cnt);
  717 
  718         dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
  719         bus_dmamap_unload(sq->snd_buff_dmat, dmap);
  720 
  721         mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
  722         if (mbuf != NULL) {
  723                 m_freem(mbuf);
  724                 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
  725                 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
  726         }
  727 
  728         nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
  729 }
  730 
  731 static int
  732 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
  733 {
  734         struct mbuf *mbuf;
  735         struct ifnet *ifp;
  736         int processed_cqe, tx_done = 0;
  737 #ifdef DEBUG
  738         int work_done = 0;
  739 #endif
  740         int cqe_count, cqe_head;
  741         struct queue_set *qs = nic->qs;
  742         struct cmp_queue *cq = &qs->cq[cq_idx];
  743         struct snd_queue *sq = &qs->sq[cq_idx];
  744         struct rcv_queue *rq;
  745         struct cqe_rx_t *cq_desc;
  746         struct lro_ctrl *lro;
  747         int rq_idx;
  748         int cmp_err;
  749 
  750         NICVF_CMP_LOCK(cq);
  751         cmp_err = 0;
  752         processed_cqe = 0;
  753         /* Get no of valid CQ entries to process */
  754         cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
  755         cqe_count &= CQ_CQE_COUNT;
  756         if (cqe_count == 0)
  757                 goto out;
  758 
  759         /* Get head of the valid CQ entries */
  760         cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
  761         cqe_head &= 0xFFFF;
  762 
  763         dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
  764             __func__, cq_idx, cqe_count, cqe_head);
  765         while (processed_cqe < cqe_count) {
  766                 /* Get the CQ descriptor */
  767                 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
  768                 cqe_head++;
  769                 cqe_head &= (cq->dmem.q_len - 1);
  770                 /* Prefetch next CQ descriptor */
  771                 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
  772 
  773                 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
  774                     cq_desc->cqe_type);
  775                 switch (cq_desc->cqe_type) {
  776                 case CQE_TYPE_RX:
  777                         cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
  778                             CQE_TYPE_RX);
  779                         if (__predict_false(cmp_err != 0)) {
  780                                 /*
  781                                  * Ups. Cannot finish now.
  782                                  * Let's try again later.
  783                                  */
  784                                 goto done;
  785                         }
  786 #ifdef DEBUG
  787                         work_done++;
  788 #endif
  789                         break;
  790                 case CQE_TYPE_SEND:
  791                         nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
  792                             CQE_TYPE_SEND);
  793                         tx_done++;
  794                         break;
  795                 case CQE_TYPE_INVALID:
  796                 case CQE_TYPE_RX_SPLIT:
  797                 case CQE_TYPE_RX_TCP:
  798                 case CQE_TYPE_SEND_PTP:
  799                         /* Ignore for now */
  800                         break;
  801                 }
  802                 processed_cqe++;
  803         }
  804 done:
  805         dprintf(nic->dev,
  806             "%s CQ%d processed_cqe %d work_done %d\n",
  807             __func__, cq_idx, processed_cqe, work_done);
  808 
  809         /* Ring doorbell to inform H/W to reuse processed CQEs */
  810         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
  811 
  812         if ((tx_done > 0) &&
  813             ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
  814                 /* Reenable TXQ if its stopped earlier due to SQ full */
  815                 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
  816                 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
  817         }
  818 out:
  819         /*
  820          * Flush any outstanding LRO work
  821          */
  822         rq_idx = cq_idx;
  823         rq = &nic->qs->rq[rq_idx];
  824         lro = &rq->lro;
  825         tcp_lro_flush_all(lro);
  826 
  827         NICVF_CMP_UNLOCK(cq);
  828 
  829         ifp = nic->ifp;
  830         /* Push received MBUFs to the stack */
  831         while (!buf_ring_empty(cq->rx_br)) {
  832                 mbuf = buf_ring_dequeue_mc(cq->rx_br);
  833                 if (__predict_true(mbuf != NULL))
  834                         (*ifp->if_input)(ifp, mbuf);
  835         }
  836 
  837         return (cmp_err);
  838 }
  839 
  840 /*
  841  * Qset error interrupt handler
  842  *
  843  * As of now only CQ errors are handled
  844  */
  845 static void
  846 nicvf_qs_err_task(void *arg, int pending)
  847 {
  848         struct nicvf *nic;
  849         struct queue_set *qs;
  850         int qidx;
  851         uint64_t status;
  852         boolean_t enable = TRUE;
  853 
  854         nic = (struct nicvf *)arg;
  855         qs = nic->qs;
  856 
  857         /* Deactivate network interface */
  858         if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
  859 
  860         /* Check if it is CQ err */
  861         for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
  862                 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
  863                     qidx);
  864                 if ((status & CQ_ERR_MASK) == 0)
  865                         continue;
  866                 /* Process already queued CQEs and reconfig CQ */
  867                 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
  868                 nicvf_sq_disable(nic, qidx);
  869                 (void)nicvf_cq_intr_handler(nic, qidx);
  870                 nicvf_cmp_queue_config(nic, qs, qidx, enable);
  871                 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
  872                 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
  873                 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
  874         }
  875 
  876         if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
  877         /* Re-enable Qset error interrupt */
  878         nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
  879 }
  880 
  881 static void
  882 nicvf_cmp_task(void *arg, int pending)
  883 {
  884         struct cmp_queue *cq;
  885         struct nicvf *nic;
  886         int cmp_err;
  887 
  888         cq = (struct cmp_queue *)arg;
  889         nic = cq->nic;
  890 
  891         /* Handle CQ descriptors */
  892         cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
  893         if (__predict_false(cmp_err != 0)) {
  894                 /*
  895                  * Schedule another thread here since we did not
  896                  * process the entire CQ due to Tx or Rx CQ parse error.
  897                  */
  898                 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
  899         }
  900 
  901         nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
  902         /* Reenable interrupt (previously disabled in nicvf_intr_handler() */
  903         nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
  904 
  905 }
  906 
  907 /* Initialize completion queue */
  908 static int
  909 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
  910     int qidx)
  911 {
  912         int err;
  913 
  914         /* Initizalize lock */
  915         snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
  916             device_get_nameunit(nic->dev), qidx);
  917         mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
  918 
  919         err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
  920                                      NICVF_CQ_BASE_ALIGN_BYTES);
  921 
  922         if (err != 0) {
  923                 device_printf(nic->dev,
  924                     "Could not allocate DMA memory for CQ\n");
  925                 return (err);
  926         }
  927 
  928         cq->desc = cq->dmem.base;
  929         cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
  930         cq->nic = nic;
  931         cq->idx = qidx;
  932         nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
  933 
  934         cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
  935             &cq->mtx);
  936 
  937         /* Allocate taskqueue */
  938         NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
  939         cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
  940             taskqueue_thread_enqueue, &cq->cmp_taskq);
  941         taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
  942             device_get_nameunit(nic->dev), qidx);
  943 
  944         return (0);
  945 }
  946 
  947 static void
  948 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
  949 {
  950 
  951         if (cq == NULL)
  952                 return;
  953         /*
  954          * The completion queue itself should be disabled by now
  955          * (ref. nicvf_snd_queue_config()).
  956          * Ensure that it is safe to disable it or panic.
  957          */
  958         if (cq->enable)
  959                 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
  960 
  961         if (cq->cmp_taskq != NULL) {
  962                 /* Remove task */
  963                 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
  964                         taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
  965 
  966                 taskqueue_free(cq->cmp_taskq);
  967                 cq->cmp_taskq = NULL;
  968         }
  969         /*
  970          * Completion interrupt will possibly enable interrupts again
  971          * so disable interrupting now after we finished processing
  972          * completion task. It is safe to do so since the corresponding CQ
  973          * was already disabled.
  974          */
  975         nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
  976         nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
  977 
  978         NICVF_CMP_LOCK(cq);
  979         nicvf_free_q_desc_mem(nic, &cq->dmem);
  980         drbr_free(cq->rx_br, M_DEVBUF);
  981         NICVF_CMP_UNLOCK(cq);
  982         mtx_destroy(&cq->mtx);
  983         memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
  984 }
  985 
  986 int
  987 nicvf_xmit_locked(struct snd_queue *sq)
  988 {
  989         struct nicvf *nic;
  990         struct ifnet *ifp;
  991         struct mbuf *next;
  992         int err;
  993 
  994         NICVF_TX_LOCK_ASSERT(sq);
  995 
  996         nic = sq->nic;
  997         ifp = nic->ifp;
  998         err = 0;
  999 
 1000         while ((next = drbr_peek(ifp, sq->br)) != NULL) {
 1001                 /* Send a copy of the frame to the BPF listener */
 1002                 ETHER_BPF_MTAP(ifp, next);
 1003 
 1004                 err = nicvf_tx_mbuf_locked(sq, &next);
 1005                 if (err != 0) {
 1006                         if (next == NULL)
 1007                                 drbr_advance(ifp, sq->br);
 1008                         else
 1009                                 drbr_putback(ifp, sq->br, next);
 1010 
 1011                         break;
 1012                 }
 1013                 drbr_advance(ifp, sq->br);
 1014         }
 1015         return (err);
 1016 }
 1017 
 1018 static void
 1019 nicvf_snd_task(void *arg, int pending)
 1020 {
 1021         struct snd_queue *sq = (struct snd_queue *)arg;
 1022         struct nicvf *nic;
 1023         struct ifnet *ifp;
 1024         int err;
 1025 
 1026         nic = sq->nic;
 1027         ifp = nic->ifp;
 1028 
 1029         /*
 1030          * Skip sending anything if the driver is not running,
 1031          * SQ full or link is down.
 1032          */
 1033         if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1034             IFF_DRV_RUNNING) || !nic->link_up)
 1035                 return;
 1036 
 1037         NICVF_TX_LOCK(sq);
 1038         err = nicvf_xmit_locked(sq);
 1039         NICVF_TX_UNLOCK(sq);
 1040         /* Try again */
 1041         if (err != 0)
 1042                 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
 1043 }
 1044 
 1045 /* Initialize transmit queue */
 1046 static int
 1047 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
 1048     int qidx)
 1049 {
 1050         size_t i;
 1051         int err;
 1052 
 1053         /* Initizalize TX lock for this queue */
 1054         snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
 1055             device_get_nameunit(nic->dev), qidx);
 1056         mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
 1057 
 1058         NICVF_TX_LOCK(sq);
 1059         /* Allocate buffer ring */
 1060         sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
 1061             M_NOWAIT, &sq->mtx);
 1062         if (sq->br == NULL) {
 1063                 device_printf(nic->dev,
 1064                     "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
 1065                 err = ENOMEM;
 1066                 goto error;
 1067         }
 1068 
 1069         /* Allocate DMA memory for Tx descriptors */
 1070         err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
 1071                                      NICVF_SQ_BASE_ALIGN_BYTES);
 1072         if (err != 0) {
 1073                 device_printf(nic->dev,
 1074                     "Could not allocate DMA memory for SQ\n");
 1075                 goto error;
 1076         }
 1077 
 1078         sq->desc = sq->dmem.base;
 1079         sq->head = sq->tail = 0;
 1080         atomic_store_rel_int(&sq->free_cnt, q_len - 1);
 1081         sq->thresh = SND_QUEUE_THRESH;
 1082         sq->idx = qidx;
 1083         sq->nic = nic;
 1084 
 1085         /*
 1086          * Allocate DMA maps for Tx buffers
 1087          */
 1088 
 1089         /* Create DMA tag first */
 1090         err = bus_dma_tag_create(
 1091             bus_get_dma_tag(nic->dev),          /* parent tag */
 1092             1,                                  /* alignment */
 1093             0,                                  /* boundary */
 1094             BUS_SPACE_MAXADDR,                  /* lowaddr */
 1095             BUS_SPACE_MAXADDR,                  /* highaddr */
 1096             NULL, NULL,                         /* filtfunc, filtfuncarg */
 1097             NICVF_TSO_MAXSIZE,                  /* maxsize */
 1098             NICVF_TSO_NSEGS,                    /* nsegments */
 1099             MCLBYTES,                           /* maxsegsize */
 1100             0,                                  /* flags */
 1101             NULL, NULL,                         /* lockfunc, lockfuncarg */
 1102             &sq->snd_buff_dmat);                /* dmat */
 1103 
 1104         if (err != 0) {
 1105                 device_printf(nic->dev,
 1106                     "Failed to create busdma tag for Tx buffers\n");
 1107                 goto error;
 1108         }
 1109 
 1110         /* Allocate send buffers array */
 1111         sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
 1112             (M_NOWAIT | M_ZERO));
 1113         if (sq->snd_buff == NULL) {
 1114                 device_printf(nic->dev,
 1115                     "Could not allocate memory for Tx buffers array\n");
 1116                 err = ENOMEM;
 1117                 goto error;
 1118         }
 1119 
 1120         /* Now populate maps */
 1121         for (i = 0; i < q_len; i++) {
 1122                 err = bus_dmamap_create(sq->snd_buff_dmat, 0,
 1123                     &sq->snd_buff[i].dmap);
 1124                 if (err != 0) {
 1125                         device_printf(nic->dev,
 1126                             "Failed to create DMA maps for Tx buffers\n");
 1127                         goto error;
 1128                 }
 1129         }
 1130         NICVF_TX_UNLOCK(sq);
 1131 
 1132         /* Allocate taskqueue */
 1133         TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
 1134         sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
 1135             taskqueue_thread_enqueue, &sq->snd_taskq);
 1136         taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
 1137             device_get_nameunit(nic->dev), qidx);
 1138 
 1139         return (0);
 1140 error:
 1141         NICVF_TX_UNLOCK(sq);
 1142         return (err);
 1143 }
 1144 
 1145 static void
 1146 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 1147 {
 1148         struct queue_set *qs = nic->qs;
 1149         size_t i;
 1150         int err __diagused;
 1151 
 1152         if (sq == NULL)
 1153                 return;
 1154 
 1155         if (sq->snd_taskq != NULL) {
 1156                 /* Remove task */
 1157                 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
 1158                         taskqueue_drain(sq->snd_taskq, &sq->snd_task);
 1159 
 1160                 taskqueue_free(sq->snd_taskq);
 1161                 sq->snd_taskq = NULL;
 1162         }
 1163 
 1164         NICVF_TX_LOCK(sq);
 1165         if (sq->snd_buff_dmat != NULL) {
 1166                 if (sq->snd_buff != NULL) {
 1167                         for (i = 0; i < qs->sq_len; i++) {
 1168                                 m_freem(sq->snd_buff[i].mbuf);
 1169                                 sq->snd_buff[i].mbuf = NULL;
 1170 
 1171                                 bus_dmamap_unload(sq->snd_buff_dmat,
 1172                                     sq->snd_buff[i].dmap);
 1173                                 err = bus_dmamap_destroy(sq->snd_buff_dmat,
 1174                                     sq->snd_buff[i].dmap);
 1175                                 /*
 1176                                  * If bus_dmamap_destroy fails it can cause
 1177                                  * random panic later if the tag is also
 1178                                  * destroyed in the process.
 1179                                  */
 1180                                 KASSERT(err == 0,
 1181                                     ("%s: Could not destroy DMA map for SQ",
 1182                                     __func__));
 1183                         }
 1184                 }
 1185 
 1186                 free(sq->snd_buff, M_NICVF);
 1187 
 1188                 err = bus_dma_tag_destroy(sq->snd_buff_dmat);
 1189                 KASSERT(err == 0,
 1190                     ("%s: Trying to destroy BUSY DMA tag", __func__));
 1191         }
 1192 
 1193         /* Free private driver ring for this send queue */
 1194         if (sq->br != NULL)
 1195                 drbr_free(sq->br, M_DEVBUF);
 1196 
 1197         if (sq->dmem.base != NULL)
 1198                 nicvf_free_q_desc_mem(nic, &sq->dmem);
 1199 
 1200         NICVF_TX_UNLOCK(sq);
 1201         /* Destroy Tx lock */
 1202         mtx_destroy(&sq->mtx);
 1203         memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
 1204 }
 1205 
 1206 static void
 1207 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1208 {
 1209 
 1210         /* Disable send queue */
 1211         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
 1212         /* Check if SQ is stopped */
 1213         if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
 1214                 return;
 1215         /* Reset send queue */
 1216         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 1217 }
 1218 
 1219 static void
 1220 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1221 {
 1222         union nic_mbx mbx = {};
 1223 
 1224         /* Make sure all packets in the pipeline are written back into mem */
 1225         mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
 1226         nicvf_send_msg_to_pf(nic, &mbx);
 1227 }
 1228 
 1229 static void
 1230 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
 1231 {
 1232 
 1233         /* Disable timer threshold (doesn't get reset upon CQ reset */
 1234         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
 1235         /* Disable completion queue */
 1236         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
 1237         /* Reset completion queue */
 1238         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 1239 }
 1240 
 1241 static void
 1242 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
 1243 {
 1244         uint64_t tmp, fifo_state;
 1245         int timeout = 10;
 1246 
 1247         /* Save head and tail pointers for feeing up buffers */
 1248         rbdr->head =
 1249             nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
 1250         rbdr->tail =
 1251             nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
 1252 
 1253         /*
 1254          * If RBDR FIFO is in 'FAIL' state then do a reset first
 1255          * before relaiming.
 1256          */
 1257         fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
 1258         if (((fifo_state >> 62) & 0x03) == 0x3) {
 1259                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
 1260                     qidx, NICVF_RBDR_RESET);
 1261         }
 1262 
 1263         /* Disable RBDR */
 1264         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
 1265         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 1266                 return;
 1267         while (1) {
 1268                 tmp = nicvf_queue_reg_read(nic,
 1269                     NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
 1270                 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
 1271                         break;
 1272 
 1273                 DELAY(1000);
 1274                 timeout--;
 1275                 if (!timeout) {
 1276                         device_printf(nic->dev,
 1277                             "Failed polling on prefetch status\n");
 1278                         return;
 1279                 }
 1280         }
 1281         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
 1282             NICVF_RBDR_RESET);
 1283 
 1284         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
 1285                 return;
 1286         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
 1287         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 1288                 return;
 1289 }
 1290 
 1291 /* Configures receive queue */
 1292 static void
 1293 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 1294     int qidx, bool enable)
 1295 {
 1296         union nic_mbx mbx = {};
 1297         struct rcv_queue *rq;
 1298         struct rq_cfg rq_cfg;
 1299         struct ifnet *ifp;
 1300         struct lro_ctrl *lro;
 1301 
 1302         ifp = nic->ifp;
 1303 
 1304         rq = &qs->rq[qidx];
 1305         rq->enable = enable;
 1306 
 1307         lro = &rq->lro;
 1308 
 1309         /* Disable receive queue */
 1310         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
 1311 
 1312         if (!rq->enable) {
 1313                 nicvf_reclaim_rcv_queue(nic, qs, qidx);
 1314                 /* Free LRO memory */
 1315                 tcp_lro_free(lro);
 1316                 rq->lro_enabled = FALSE;
 1317                 return;
 1318         }
 1319 
 1320         /* Configure LRO if enabled */
 1321         rq->lro_enabled = FALSE;
 1322         if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
 1323                 if (tcp_lro_init(lro) != 0) {
 1324                         device_printf(nic->dev,
 1325                             "Failed to initialize LRO for RXQ%d\n", qidx);
 1326                 } else {
 1327                         rq->lro_enabled = TRUE;
 1328                         lro->ifp = nic->ifp;
 1329                 }
 1330         }
 1331 
 1332         rq->cq_qs = qs->vnic_id;
 1333         rq->cq_idx = qidx;
 1334         rq->start_rbdr_qs = qs->vnic_id;
 1335         rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
 1336         rq->cont_rbdr_qs = qs->vnic_id;
 1337         rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
 1338         /* all writes of RBDR data to be loaded into L2 Cache as well*/
 1339         rq->caching = 1;
 1340 
 1341         /* Send a mailbox msg to PF to config RQ */
 1342         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
 1343         mbx.rq.qs_num = qs->vnic_id;
 1344         mbx.rq.rq_num = qidx;
 1345         mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
 1346             (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
 1347             (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
 1348             (rq->start_qs_rbdr_idx);
 1349         nicvf_send_msg_to_pf(nic, &mbx);
 1350 
 1351         mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
 1352         mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
 1353         nicvf_send_msg_to_pf(nic, &mbx);
 1354 
 1355         /*
 1356          * RQ drop config
 1357          * Enable CQ drop to reserve sufficient CQEs for all tx packets
 1358          */
 1359         mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
 1360         mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
 1361         nicvf_send_msg_to_pf(nic, &mbx);
 1362 
 1363         nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
 1364 
 1365         /* Enable Receive queue */
 1366         rq_cfg.ena = 1;
 1367         rq_cfg.tcp_ena = 0;
 1368         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
 1369             *(uint64_t *)&rq_cfg);
 1370 }
 1371 
 1372 /* Configures completion queue */
 1373 static void
 1374 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
 1375     int qidx, boolean_t enable)
 1376 {
 1377         struct cmp_queue *cq;
 1378         struct cq_cfg cq_cfg;
 1379 
 1380         cq = &qs->cq[qidx];
 1381         cq->enable = enable;
 1382 
 1383         if (!cq->enable) {
 1384                 nicvf_reclaim_cmp_queue(nic, qs, qidx);
 1385                 return;
 1386         }
 1387 
 1388         /* Reset completion queue */
 1389         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 1390 
 1391         /* Set completion queue base address */
 1392         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
 1393             (uint64_t)(cq->dmem.phys_base));
 1394 
 1395         /* Enable Completion queue */
 1396         cq_cfg.ena = 1;
 1397         cq_cfg.reset = 0;
 1398         cq_cfg.caching = 0;
 1399         cq_cfg.qsize = CMP_QSIZE;
 1400         cq_cfg.avg_con = 0;
 1401         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
 1402 
 1403         /* Set threshold value for interrupt generation */
 1404         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
 1405         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
 1406             nic->cq_coalesce_usecs);
 1407 }
 1408 
 1409 /* Configures transmit queue */
 1410 static void
 1411 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
 1412     boolean_t enable)
 1413 {
 1414         union nic_mbx mbx = {};
 1415         struct snd_queue *sq;
 1416         struct sq_cfg sq_cfg;
 1417 
 1418         sq = &qs->sq[qidx];
 1419         sq->enable = enable;
 1420 
 1421         if (!sq->enable) {
 1422                 nicvf_reclaim_snd_queue(nic, qs, qidx);
 1423                 return;
 1424         }
 1425 
 1426         /* Reset send queue */
 1427         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 1428 
 1429         sq->cq_qs = qs->vnic_id;
 1430         sq->cq_idx = qidx;
 1431 
 1432         /* Send a mailbox msg to PF to config SQ */
 1433         mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
 1434         mbx.sq.qs_num = qs->vnic_id;
 1435         mbx.sq.sq_num = qidx;
 1436         mbx.sq.sqs_mode = nic->sqs_mode;
 1437         mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
 1438         nicvf_send_msg_to_pf(nic, &mbx);
 1439 
 1440         /* Set queue base address */
 1441         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
 1442             (uint64_t)(sq->dmem.phys_base));
 1443 
 1444         /* Enable send queue  & set queue size */
 1445         sq_cfg.ena = 1;
 1446         sq_cfg.reset = 0;
 1447         sq_cfg.ldwb = 0;
 1448         sq_cfg.qsize = SND_QSIZE;
 1449         sq_cfg.tstmp_bgx_intf = 0;
 1450         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
 1451 
 1452         /* Set threshold value for interrupt generation */
 1453         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
 1454 }
 1455 
 1456 /* Configures receive buffer descriptor ring */
 1457 static void
 1458 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
 1459     boolean_t enable)
 1460 {
 1461         struct rbdr *rbdr;
 1462         struct rbdr_cfg rbdr_cfg;
 1463 
 1464         rbdr = &qs->rbdr[qidx];
 1465         nicvf_reclaim_rbdr(nic, rbdr, qidx);
 1466         if (!enable)
 1467                 return;
 1468 
 1469         /* Set descriptor base address */
 1470         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
 1471             (uint64_t)(rbdr->dmem.phys_base));
 1472 
 1473         /* Enable RBDR  & set queue size */
 1474         /* Buffer size should be in multiples of 128 bytes */
 1475         rbdr_cfg.ena = 1;
 1476         rbdr_cfg.reset = 0;
 1477         rbdr_cfg.ldwb = 0;
 1478         rbdr_cfg.qsize = RBDR_SIZE;
 1479         rbdr_cfg.avg_con = 0;
 1480         rbdr_cfg.lines = rbdr->dma_size / 128;
 1481         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
 1482             *(uint64_t *)&rbdr_cfg);
 1483 
 1484         /* Notify HW */
 1485         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
 1486             qs->rbdr_len - 1);
 1487 
 1488         /* Set threshold value for interrupt generation */
 1489         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
 1490             rbdr->thresh - 1);
 1491 }
 1492 
 1493 /* Requests PF to assign and enable Qset */
 1494 void
 1495 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
 1496 {
 1497         union nic_mbx mbx = {};
 1498         struct queue_set *qs;
 1499         struct qs_cfg *qs_cfg;
 1500 
 1501         qs = nic->qs;
 1502         if (qs == NULL) {
 1503                 device_printf(nic->dev,
 1504                     "Qset is still not allocated, don't init queues\n");
 1505                 return;
 1506         }
 1507 
 1508         qs->enable = enable;
 1509         qs->vnic_id = nic->vf_id;
 1510 
 1511         /* Send a mailbox msg to PF to config Qset */
 1512         mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
 1513         mbx.qs.num = qs->vnic_id;
 1514 
 1515         mbx.qs.cfg = 0;
 1516         qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
 1517         if (qs->enable) {
 1518                 qs_cfg->ena = 1;
 1519                 qs_cfg->vnic = qs->vnic_id;
 1520         }
 1521         nicvf_send_msg_to_pf(nic, &mbx);
 1522 }
 1523 
 1524 static void
 1525 nicvf_free_resources(struct nicvf *nic)
 1526 {
 1527         int qidx;
 1528         struct queue_set *qs;
 1529 
 1530         qs = nic->qs;
 1531         /*
 1532          * Remove QS error task first since it has to be dead
 1533          * to safely free completion queue tasks.
 1534          */
 1535         if (qs->qs_err_taskq != NULL) {
 1536                 /* Shut down QS error tasks */
 1537                 while (taskqueue_cancel(qs->qs_err_taskq,
 1538                     &qs->qs_err_task,  NULL) != 0) {
 1539                         taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
 1540                 }
 1541                 taskqueue_free(qs->qs_err_taskq);
 1542                 qs->qs_err_taskq = NULL;
 1543         }
 1544         /* Free receive buffer descriptor ring */
 1545         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1546                 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
 1547 
 1548         /* Free completion queue */
 1549         for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1550                 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
 1551 
 1552         /* Free send queue */
 1553         for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1554                 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
 1555 }
 1556 
 1557 static int
 1558 nicvf_alloc_resources(struct nicvf *nic)
 1559 {
 1560         struct queue_set *qs = nic->qs;
 1561         int qidx;
 1562 
 1563         /* Alloc receive buffer descriptor ring */
 1564         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
 1565                 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
 1566                                     DMA_BUFFER_LEN, qidx))
 1567                         goto alloc_fail;
 1568         }
 1569 
 1570         /* Alloc send queue */
 1571         for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
 1572                 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
 1573                         goto alloc_fail;
 1574         }
 1575 
 1576         /* Alloc completion queue */
 1577         for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 1578                 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
 1579                         goto alloc_fail;
 1580         }
 1581 
 1582         /* Allocate QS error taskqueue */
 1583         NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
 1584         qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
 1585             taskqueue_thread_enqueue, &qs->qs_err_taskq);
 1586         taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
 1587             device_get_nameunit(nic->dev));
 1588 
 1589         return (0);
 1590 alloc_fail:
 1591         nicvf_free_resources(nic);
 1592         return (ENOMEM);
 1593 }
 1594 
 1595 int
 1596 nicvf_set_qset_resources(struct nicvf *nic)
 1597 {
 1598         struct queue_set *qs;
 1599 
 1600         qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
 1601         nic->qs = qs;
 1602 
 1603         /* Set count of each queue */
 1604         qs->rbdr_cnt = RBDR_CNT;
 1605         qs->rq_cnt = RCV_QUEUE_CNT;
 1606 
 1607         qs->sq_cnt = SND_QUEUE_CNT;
 1608         qs->cq_cnt = CMP_QUEUE_CNT;
 1609 
 1610         /* Set queue lengths */
 1611         qs->rbdr_len = RCV_BUF_COUNT;
 1612         qs->sq_len = SND_QUEUE_LEN;
 1613         qs->cq_len = CMP_QUEUE_LEN;
 1614 
 1615         nic->rx_queues = qs->rq_cnt;
 1616         nic->tx_queues = qs->sq_cnt;
 1617 
 1618         return (0);
 1619 }
 1620 
 1621 int
 1622 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
 1623 {
 1624         boolean_t disable = FALSE;
 1625         struct queue_set *qs;
 1626         int qidx;
 1627 
 1628         qs = nic->qs;
 1629         if (qs == NULL)
 1630                 return (0);
 1631 
 1632         if (enable) {
 1633                 if (nicvf_alloc_resources(nic) != 0)
 1634                         return (ENOMEM);
 1635 
 1636                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1637                         nicvf_snd_queue_config(nic, qs, qidx, enable);
 1638                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1639                         nicvf_cmp_queue_config(nic, qs, qidx, enable);
 1640                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1641                         nicvf_rbdr_config(nic, qs, qidx, enable);
 1642                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 1643                         nicvf_rcv_queue_config(nic, qs, qidx, enable);
 1644         } else {
 1645                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 1646                         nicvf_rcv_queue_config(nic, qs, qidx, disable);
 1647                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 1648                         nicvf_rbdr_config(nic, qs, qidx, disable);
 1649                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 1650                         nicvf_snd_queue_config(nic, qs, qidx, disable);
 1651                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 1652                         nicvf_cmp_queue_config(nic, qs, qidx, disable);
 1653 
 1654                 nicvf_free_resources(nic);
 1655         }
 1656 
 1657         return (0);
 1658 }
 1659 
 1660 /*
 1661  * Get a free desc from SQ
 1662  * returns descriptor ponter & descriptor number
 1663  */
 1664 static __inline int
 1665 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
 1666 {
 1667         int qentry;
 1668 
 1669         qentry = sq->tail;
 1670         atomic_subtract_int(&sq->free_cnt, desc_cnt);
 1671         sq->tail += desc_cnt;
 1672         sq->tail &= (sq->dmem.q_len - 1);
 1673 
 1674         return (qentry);
 1675 }
 1676 
 1677 /* Free descriptor back to SQ for future use */
 1678 static void
 1679 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
 1680 {
 1681 
 1682         atomic_add_int(&sq->free_cnt, desc_cnt);
 1683         sq->head += desc_cnt;
 1684         sq->head &= (sq->dmem.q_len - 1);
 1685 }
 1686 
 1687 static __inline int
 1688 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
 1689 {
 1690         qentry++;
 1691         qentry &= (sq->dmem.q_len - 1);
 1692         return (qentry);
 1693 }
 1694 
 1695 static void
 1696 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
 1697 {
 1698         uint64_t sq_cfg;
 1699 
 1700         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 1701         sq_cfg |= NICVF_SQ_EN;
 1702         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 1703         /* Ring doorbell so that H/W restarts processing SQEs */
 1704         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
 1705 }
 1706 
 1707 static void
 1708 nicvf_sq_disable(struct nicvf *nic, int qidx)
 1709 {
 1710         uint64_t sq_cfg;
 1711 
 1712         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 1713         sq_cfg &= ~NICVF_SQ_EN;
 1714         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 1715 }
 1716 
 1717 static void
 1718 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
 1719 {
 1720         uint64_t head;
 1721         struct snd_buff *snd_buff;
 1722         struct sq_hdr_subdesc *hdr;
 1723 
 1724         NICVF_TX_LOCK(sq);
 1725         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
 1726         while (sq->head != head) {
 1727                 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
 1728                 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
 1729                         nicvf_put_sq_desc(sq, 1);
 1730                         continue;
 1731                 }
 1732                 snd_buff = &sq->snd_buff[sq->head];
 1733                 if (snd_buff->mbuf != NULL) {
 1734                         bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1735                         m_freem(snd_buff->mbuf);
 1736                         sq->snd_buff[sq->head].mbuf = NULL;
 1737                 }
 1738                 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
 1739         }
 1740         NICVF_TX_UNLOCK(sq);
 1741 }
 1742 
 1743 /*
 1744  * Add SQ HEADER subdescriptor.
 1745  * First subdescriptor for every send descriptor.
 1746  */
 1747 static __inline int
 1748 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
 1749                          int subdesc_cnt, struct mbuf *mbuf, int len)
 1750 {
 1751         struct nicvf *nic;
 1752         struct sq_hdr_subdesc *hdr;
 1753         struct ether_vlan_header *eh;
 1754 #ifdef INET
 1755         struct ip *ip;
 1756         struct tcphdr *th;
 1757 #endif
 1758         uint16_t etype;
 1759         int ehdrlen, iphlen, poff, proto;
 1760 
 1761         nic = sq->nic;
 1762 
 1763         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
 1764         sq->snd_buff[qentry].mbuf = mbuf;
 1765 
 1766         memset(hdr, 0, SND_QUEUE_DESC_SIZE);
 1767         hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
 1768         /* Enable notification via CQE after processing SQE */
 1769         hdr->post_cqe = 1;
 1770         /* No of subdescriptors following this */
 1771         hdr->subdesc_cnt = subdesc_cnt;
 1772         hdr->tot_len = len;
 1773 
 1774         eh = mtod(mbuf, struct ether_vlan_header *);
 1775         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 1776                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1777                 etype = ntohs(eh->evl_proto);
 1778         } else {
 1779                 ehdrlen = ETHER_HDR_LEN;
 1780                 etype = ntohs(eh->evl_encap_proto);
 1781         }
 1782 
 1783         poff = proto = -1;
 1784         switch (etype) {
 1785 #ifdef INET6
 1786         case ETHERTYPE_IPV6:
 1787                 if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
 1788                         mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
 1789                         sq->snd_buff[qentry].mbuf = NULL;
 1790                         if (mbuf == NULL)
 1791                                 return (ENOBUFS);
 1792                 }
 1793                 poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
 1794                 if (poff < 0)
 1795                         return (ENOBUFS);
 1796                 poff += ehdrlen;
 1797                 break;
 1798 #endif
 1799 #ifdef INET
 1800         case ETHERTYPE_IP:
 1801                 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
 1802                         mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
 1803                         sq->snd_buff[qentry].mbuf = mbuf;
 1804                         if (mbuf == NULL)
 1805                                 return (ENOBUFS);
 1806                 }
 1807                 if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
 1808                         hdr->csum_l3 = 1; /* Enable IP csum calculation */
 1809 
 1810                 ip = (struct ip *)(mbuf->m_data + ehdrlen);
 1811                 iphlen = ip->ip_hl << 2;
 1812                 poff = ehdrlen + iphlen;
 1813                 proto = ip->ip_p;
 1814                 break;
 1815 #endif
 1816         }
 1817 
 1818 #if defined(INET6) || defined(INET)
 1819         if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
 1820                 switch (proto) {
 1821                 case IPPROTO_TCP:
 1822                         if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
 1823                                 break;
 1824 
 1825                         if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
 1826                                 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
 1827                                 sq->snd_buff[qentry].mbuf = mbuf;
 1828                                 if (mbuf == NULL)
 1829                                         return (ENOBUFS);
 1830                         }
 1831                         hdr->csum_l4 = SEND_L4_CSUM_TCP;
 1832                         break;
 1833                 case IPPROTO_UDP:
 1834                         if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
 1835                                 break;
 1836 
 1837                         if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
 1838                                 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
 1839                                 sq->snd_buff[qentry].mbuf = mbuf;
 1840                                 if (mbuf == NULL)
 1841                                         return (ENOBUFS);
 1842                         }
 1843                         hdr->csum_l4 = SEND_L4_CSUM_UDP;
 1844                         break;
 1845                 case IPPROTO_SCTP:
 1846                         if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
 1847                                 break;
 1848 
 1849                         if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
 1850                                 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
 1851                                 sq->snd_buff[qentry].mbuf = mbuf;
 1852                                 if (mbuf == NULL)
 1853                                         return (ENOBUFS);
 1854                         }
 1855                         hdr->csum_l4 = SEND_L4_CSUM_SCTP;
 1856                         break;
 1857                 default:
 1858                         break;
 1859                 }
 1860                 hdr->l3_offset = ehdrlen;
 1861                 hdr->l4_offset = poff;
 1862         }
 1863 
 1864         if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
 1865                 th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
 1866 
 1867                 hdr->tso = 1;
 1868                 hdr->tso_start = poff + (th->th_off * 4);
 1869                 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
 1870                 hdr->inner_l3_offset = ehdrlen - 2;
 1871                 nic->drv_stats.tx_tso++;
 1872         }
 1873 #endif
 1874 
 1875         return (0);
 1876 }
 1877 
 1878 /*
 1879  * SQ GATHER subdescriptor
 1880  * Must follow HDR descriptor
 1881  */
 1882 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
 1883                                                int size, uint64_t data)
 1884 {
 1885         struct sq_gather_subdesc *gather;
 1886 
 1887         qentry &= (sq->dmem.q_len - 1);
 1888         gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
 1889 
 1890         memset(gather, 0, SND_QUEUE_DESC_SIZE);
 1891         gather->subdesc_type = SQ_DESC_TYPE_GATHER;
 1892         gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
 1893         gather->size = size;
 1894         gather->addr = data;
 1895 }
 1896 
 1897 /* Put an mbuf to a SQ for packet transfer. */
 1898 static int
 1899 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
 1900 {
 1901         bus_dma_segment_t segs[256];
 1902         struct snd_buff *snd_buff;
 1903         size_t seg;
 1904         int nsegs, qentry;
 1905         int subdesc_cnt;
 1906         int err;
 1907 
 1908         NICVF_TX_LOCK_ASSERT(sq);
 1909 
 1910         if (sq->free_cnt == 0)
 1911                 return (ENOBUFS);
 1912 
 1913         snd_buff = &sq->snd_buff[sq->tail];
 1914 
 1915         err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
 1916             *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
 1917         if (__predict_false(err != 0)) {
 1918                 /* ARM64TODO: Add mbuf defragmenting if we lack maps */
 1919                 m_freem(*mbufp);
 1920                 *mbufp = NULL;
 1921                 return (err);
 1922         }
 1923 
 1924         /* Set how many subdescriptors is required */
 1925         subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
 1926         if (subdesc_cnt > sq->free_cnt) {
 1927                 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
 1928                 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1929                 return (ENOBUFS);
 1930         }
 1931 
 1932         qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 1933 
 1934         /* Add SQ header subdesc */
 1935         err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
 1936             (*mbufp)->m_pkthdr.len);
 1937         if (err != 0) {
 1938                 nicvf_put_sq_desc(sq, subdesc_cnt);
 1939                 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
 1940                 if (err == ENOBUFS) {
 1941                         m_freem(*mbufp);
 1942                         *mbufp = NULL;
 1943                 }
 1944                 return (err);
 1945         }
 1946 
 1947         /* Add SQ gather subdescs */
 1948         for (seg = 0; seg < nsegs; seg++) {
 1949                 qentry = nicvf_get_nxt_sqentry(sq, qentry);
 1950                 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
 1951                     segs[seg].ds_addr);
 1952         }
 1953 
 1954         /* make sure all memory stores are done before ringing doorbell */
 1955         bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
 1956 
 1957         dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
 1958             __func__, sq->idx, subdesc_cnt);
 1959         /* Inform HW to xmit new packet */
 1960         nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
 1961             sq->idx, subdesc_cnt);
 1962         return (0);
 1963 }
 1964 
 1965 static __inline u_int
 1966 frag_num(u_int i)
 1967 {
 1968 #if BYTE_ORDER == BIG_ENDIAN
 1969         return ((i & ~3) + 3 - (i & 3));
 1970 #else
 1971         return (i);
 1972 #endif
 1973 }
 1974 
 1975 /* Returns MBUF for a received packet */
 1976 struct mbuf *
 1977 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 1978 {
 1979         int frag;
 1980         int payload_len = 0;
 1981         struct mbuf *mbuf;
 1982         struct mbuf *mbuf_frag;
 1983         uint16_t *rb_lens = NULL;
 1984         uint64_t *rb_ptrs = NULL;
 1985 
 1986         mbuf = NULL;
 1987         rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
 1988         rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
 1989 
 1990         dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
 1991             __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
 1992 
 1993         for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
 1994                 payload_len = rb_lens[frag_num(frag)];
 1995                 if (frag == 0) {
 1996                         /* First fragment */
 1997                         mbuf = nicvf_rb_ptr_to_mbuf(nic,
 1998                             (*rb_ptrs - cqe_rx->align_pad));
 1999                         mbuf->m_len = payload_len;
 2000                         mbuf->m_data += cqe_rx->align_pad;
 2001                         if_setrcvif(mbuf, nic->ifp);
 2002                 } else {
 2003                         /* Add fragments */
 2004                         mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
 2005                         m_append(mbuf, payload_len, mbuf_frag->m_data);
 2006                         m_freem(mbuf_frag);
 2007                 }
 2008                 /* Next buffer pointer */
 2009                 rb_ptrs++;
 2010         }
 2011 
 2012         if (__predict_true(mbuf != NULL)) {
 2013                 m_fixhdr(mbuf);
 2014                 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
 2015                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
 2016                 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
 2017                         /*
 2018                          * HW by default verifies IP & TCP/UDP/SCTP checksums
 2019                          */
 2020                         if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
 2021                                 mbuf->m_pkthdr.csum_flags =
 2022                                     (CSUM_IP_CHECKED | CSUM_IP_VALID);
 2023                         }
 2024 
 2025                         switch (cqe_rx->l4_type) {
 2026                         case L4TYPE_UDP:
 2027                         case L4TYPE_TCP: /* fall through */
 2028                                 mbuf->m_pkthdr.csum_flags |=
 2029                                     (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 2030                                 mbuf->m_pkthdr.csum_data = 0xffff;
 2031                                 break;
 2032                         case L4TYPE_SCTP:
 2033                                 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
 2034                                 break;
 2035                         default:
 2036                                 break;
 2037                         }
 2038                 }
 2039         }
 2040 
 2041         return (mbuf);
 2042 }
 2043 
 2044 /* Enable interrupt */
 2045 void
 2046 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
 2047 {
 2048         uint64_t reg_val;
 2049 
 2050         reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
 2051 
 2052         switch (int_type) {
 2053         case NICVF_INTR_CQ:
 2054                 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2055                 break;
 2056         case NICVF_INTR_SQ:
 2057                 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2058                 break;
 2059         case NICVF_INTR_RBDR:
 2060                 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2061                 break;
 2062         case NICVF_INTR_PKT_DROP:
 2063                 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2064                 break;
 2065         case NICVF_INTR_TCP_TIMER:
 2066                 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2067                 break;
 2068         case NICVF_INTR_MBOX:
 2069                 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
 2070                 break;
 2071         case NICVF_INTR_QS_ERR:
 2072                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2073                 break;
 2074         default:
 2075                 device_printf(nic->dev,
 2076                            "Failed to enable interrupt: unknown type\n");
 2077                 break;
 2078         }
 2079 
 2080         nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
 2081 }
 2082 
 2083 /* Disable interrupt */
 2084 void
 2085 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
 2086 {
 2087         uint64_t reg_val = 0;
 2088 
 2089         switch (int_type) {
 2090         case NICVF_INTR_CQ:
 2091                 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2092                 break;
 2093         case NICVF_INTR_SQ:
 2094                 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2095                 break;
 2096         case NICVF_INTR_RBDR:
 2097                 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2098                 break;
 2099         case NICVF_INTR_PKT_DROP:
 2100                 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2101                 break;
 2102         case NICVF_INTR_TCP_TIMER:
 2103                 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2104                 break;
 2105         case NICVF_INTR_MBOX:
 2106                 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
 2107                 break;
 2108         case NICVF_INTR_QS_ERR:
 2109                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2110                 break;
 2111         default:
 2112                 device_printf(nic->dev,
 2113                            "Failed to disable interrupt: unknown type\n");
 2114                 break;
 2115         }
 2116 
 2117         nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
 2118 }
 2119 
 2120 /* Clear interrupt */
 2121 void
 2122 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
 2123 {
 2124         uint64_t reg_val = 0;
 2125 
 2126         switch (int_type) {
 2127         case NICVF_INTR_CQ:
 2128                 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2129                 break;
 2130         case NICVF_INTR_SQ:
 2131                 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2132                 break;
 2133         case NICVF_INTR_RBDR:
 2134                 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2135                 break;
 2136         case NICVF_INTR_PKT_DROP:
 2137                 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
 2138                 break;
 2139         case NICVF_INTR_TCP_TIMER:
 2140                 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
 2141                 break;
 2142         case NICVF_INTR_MBOX:
 2143                 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
 2144                 break;
 2145         case NICVF_INTR_QS_ERR:
 2146                 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
 2147                 break;
 2148         default:
 2149                 device_printf(nic->dev,
 2150                            "Failed to clear interrupt: unknown type\n");
 2151                 break;
 2152         }
 2153 
 2154         nicvf_reg_write(nic, NIC_VF_INT, reg_val);
 2155 }
 2156 
 2157 /* Check if interrupt is enabled */
 2158 int
 2159 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
 2160 {
 2161         uint64_t reg_val;
 2162         uint64_t mask = 0xff;
 2163 
 2164         reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
 2165 
 2166         switch (int_type) {
 2167         case NICVF_INTR_CQ:
 2168                 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
 2169                 break;
 2170         case NICVF_INTR_SQ:
 2171                 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
 2172                 break;
 2173         case NICVF_INTR_RBDR:
 2174                 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 2175                 break;
 2176         case NICVF_INTR_PKT_DROP:
 2177                 mask = NICVF_INTR_PKT_DROP_MASK;
 2178                 break;
 2179         case NICVF_INTR_TCP_TIMER:
 2180                 mask = NICVF_INTR_TCP_TIMER_MASK;
 2181                 break;
 2182         case NICVF_INTR_MBOX:
 2183                 mask = NICVF_INTR_MBOX_MASK;
 2184                 break;
 2185         case NICVF_INTR_QS_ERR:
 2186                 mask = NICVF_INTR_QS_ERR_MASK;
 2187                 break;
 2188         default:
 2189                 device_printf(nic->dev,
 2190                            "Failed to check interrupt enable: unknown type\n");
 2191                 break;
 2192         }
 2193 
 2194         return (reg_val & mask);
 2195 }
 2196 
 2197 void
 2198 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
 2199 {
 2200         struct rcv_queue *rq;
 2201 
 2202 #define GET_RQ_STATS(reg) \
 2203         nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
 2204                             (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
 2205 
 2206         rq = &nic->qs->rq[rq_idx];
 2207         rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
 2208         rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
 2209 }
 2210 
 2211 void
 2212 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
 2213 {
 2214         struct snd_queue *sq;
 2215 
 2216 #define GET_SQ_STATS(reg) \
 2217         nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
 2218                             (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
 2219 
 2220         sq = &nic->qs->sq[sq_idx];
 2221         sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
 2222         sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
 2223 }
 2224 
 2225 /* Check for errors in the receive cmp.queue entry */
 2226 int
 2227 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
 2228     struct cqe_rx_t *cqe_rx)
 2229 {
 2230         struct nicvf_hw_stats *stats = &nic->hw_stats;
 2231         struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 2232 
 2233         if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
 2234                 drv_stats->rx_frames_ok++;
 2235                 return (0);
 2236         }
 2237 
 2238         switch (cqe_rx->err_opcode) {
 2239         case CQ_RX_ERROP_RE_PARTIAL:
 2240                 stats->rx_bgx_truncated_pkts++;
 2241                 break;
 2242         case CQ_RX_ERROP_RE_JABBER:
 2243                 stats->rx_jabber_errs++;
 2244                 break;
 2245         case CQ_RX_ERROP_RE_FCS:
 2246                 stats->rx_fcs_errs++;
 2247                 break;
 2248         case CQ_RX_ERROP_RE_RX_CTL:
 2249                 stats->rx_bgx_errs++;
 2250                 break;
 2251         case CQ_RX_ERROP_PREL2_ERR:
 2252                 stats->rx_prel2_errs++;
 2253                 break;
 2254         case CQ_RX_ERROP_L2_MAL:
 2255                 stats->rx_l2_hdr_malformed++;
 2256                 break;
 2257         case CQ_RX_ERROP_L2_OVERSIZE:
 2258                 stats->rx_oversize++;
 2259                 break;
 2260         case CQ_RX_ERROP_L2_UNDERSIZE:
 2261                 stats->rx_undersize++;
 2262                 break;
 2263         case CQ_RX_ERROP_L2_LENMISM:
 2264                 stats->rx_l2_len_mismatch++;
 2265                 break;
 2266         case CQ_RX_ERROP_L2_PCLP:
 2267                 stats->rx_l2_pclp++;
 2268                 break;
 2269         case CQ_RX_ERROP_IP_NOT:
 2270                 stats->rx_ip_ver_errs++;
 2271                 break;
 2272         case CQ_RX_ERROP_IP_CSUM_ERR:
 2273                 stats->rx_ip_csum_errs++;
 2274                 break;
 2275         case CQ_RX_ERROP_IP_MAL:
 2276                 stats->rx_ip_hdr_malformed++;
 2277                 break;
 2278         case CQ_RX_ERROP_IP_MALD:
 2279                 stats->rx_ip_payload_malformed++;
 2280                 break;
 2281         case CQ_RX_ERROP_IP_HOP:
 2282                 stats->rx_ip_ttl_errs++;
 2283                 break;
 2284         case CQ_RX_ERROP_L3_PCLP:
 2285                 stats->rx_l3_pclp++;
 2286                 break;
 2287         case CQ_RX_ERROP_L4_MAL:
 2288                 stats->rx_l4_malformed++;
 2289                 break;
 2290         case CQ_RX_ERROP_L4_CHK:
 2291                 stats->rx_l4_csum_errs++;
 2292                 break;
 2293         case CQ_RX_ERROP_UDP_LEN:
 2294                 stats->rx_udp_len_errs++;
 2295                 break;
 2296         case CQ_RX_ERROP_L4_PORT:
 2297                 stats->rx_l4_port_errs++;
 2298                 break;
 2299         case CQ_RX_ERROP_TCP_FLAG:
 2300                 stats->rx_tcp_flag_errs++;
 2301                 break;
 2302         case CQ_RX_ERROP_TCP_OFFSET:
 2303                 stats->rx_tcp_offset_errs++;
 2304                 break;
 2305         case CQ_RX_ERROP_L4_PCLP:
 2306                 stats->rx_l4_pclp++;
 2307                 break;
 2308         case CQ_RX_ERROP_RBDR_TRUNC:
 2309                 stats->rx_truncated_pkts++;
 2310                 break;
 2311         }
 2312 
 2313         return (1);
 2314 }
 2315 
 2316 /* Check for errors in the send cmp.queue entry */
 2317 int
 2318 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
 2319     struct cqe_send_t *cqe_tx)
 2320 {
 2321         struct cmp_queue_stats *stats = &cq->stats;
 2322 
 2323         switch (cqe_tx->send_status) {
 2324         case CQ_TX_ERROP_GOOD:
 2325                 stats->tx.good++;
 2326                 return (0);
 2327         case CQ_TX_ERROP_DESC_FAULT:
 2328                 stats->tx.desc_fault++;
 2329                 break;
 2330         case CQ_TX_ERROP_HDR_CONS_ERR:
 2331                 stats->tx.hdr_cons_err++;
 2332                 break;
 2333         case CQ_TX_ERROP_SUBDC_ERR:
 2334                 stats->tx.subdesc_err++;
 2335                 break;
 2336         case CQ_TX_ERROP_IMM_SIZE_OFLOW:
 2337                 stats->tx.imm_size_oflow++;
 2338                 break;
 2339         case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
 2340                 stats->tx.data_seq_err++;
 2341                 break;
 2342         case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
 2343                 stats->tx.mem_seq_err++;
 2344                 break;
 2345         case CQ_TX_ERROP_LOCK_VIOL:
 2346                 stats->tx.lock_viol++;
 2347                 break;
 2348         case CQ_TX_ERROP_DATA_FAULT:
 2349                 stats->tx.data_fault++;
 2350                 break;
 2351         case CQ_TX_ERROP_TSTMP_CONFLICT:
 2352                 stats->tx.tstmp_conflict++;
 2353                 break;
 2354         case CQ_TX_ERROP_TSTMP_TIMEOUT:
 2355                 stats->tx.tstmp_timeout++;
 2356                 break;
 2357         case CQ_TX_ERROP_MEM_FAULT:
 2358                 stats->tx.mem_fault++;
 2359                 break;
 2360         case CQ_TX_ERROP_CK_OVERLAP:
 2361                 stats->tx.csum_overlap++;
 2362                 break;
 2363         case CQ_TX_ERROP_CK_OFLOW:
 2364                 stats->tx.csum_overflow++;
 2365                 break;
 2366         }
 2367 
 2368         return (1);
 2369 }

Cache object: 5c807c02a1c268263f9244dcc7bfb98d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.