The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/irdma/irdma_puda.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
    3  *
    4  * Copyright (c) 2015 - 2022 Intel Corporation
    5  *
    6  * This software is available to you under a choice of one of two
    7  * licenses.  You may choose to be licensed under the terms of the GNU
    8  * General Public License (GPL) Version 2, available from the file
    9  * COPYING in the main directory of this source tree, or the
   10  * OpenFabrics.org BSD license below:
   11  *
   12  *   Redistribution and use in source and binary forms, with or
   13  *   without modification, are permitted provided that the following
   14  *   conditions are met:
   15  *
   16  *    - Redistributions of source code must retain the above
   17  *      copyright notice, this list of conditions and the following
   18  *      disclaimer.
   19  *
   20  *    - Redistributions in binary form must reproduce the above
   21  *      copyright notice, this list of conditions and the following
   22  *      disclaimer in the documentation and/or other materials
   23  *      provided with the distribution.
   24  *
   25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   32  * SOFTWARE.
   33  */
   34 /*$FreeBSD$*/
   35 
   36 #include "osdep.h"
   37 #include "irdma_hmc.h"
   38 #include "irdma_defs.h"
   39 #include "irdma_type.h"
   40 #include "irdma_protos.h"
   41 #include "irdma_puda.h"
   42 #include "irdma_ws.h"
   43 
   44 static void
   45 irdma_ieq_receive(struct irdma_sc_vsi *vsi,
   46                   struct irdma_puda_buf *buf);
   47 static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
   48 static void
   49 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
   50                          struct irdma_puda_buf *buf, u32 wqe_idx);
   51 /**
   52  * irdma_puda_get_listbuf - get buffer from puda list
   53  * @list: list to use for buffers (ILQ or IEQ)
   54  */
   55 static struct irdma_puda_buf *
   56 irdma_puda_get_listbuf(struct list_head *list)
   57 {
   58         struct irdma_puda_buf *buf = NULL;
   59 
   60         if (!list_empty(list)) {
   61                 buf = (struct irdma_puda_buf *)(list)->next;
   62                 list_del((struct list_head *)&buf->list);
   63         }
   64 
   65         return buf;
   66 }
   67 
   68 /**
   69  * irdma_puda_get_bufpool - return buffer from resource
   70  * @rsrc: resource to use for buffer
   71  */
   72 struct irdma_puda_buf *
   73 irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
   74 {
   75         struct irdma_puda_buf *buf = NULL;
   76         struct list_head *list = &rsrc->bufpool;
   77         unsigned long flags;
   78 
   79         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
   80         buf = irdma_puda_get_listbuf(list);
   81         if (buf) {
   82                 rsrc->avail_buf_count--;
   83                 buf->vsi = rsrc->vsi;
   84         } else {
   85                 rsrc->stats_buf_alloc_fail++;
   86         }
   87         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
   88 
   89         return buf;
   90 }
   91 
   92 /**
   93  * irdma_puda_ret_bufpool - return buffer to rsrc list
   94  * @rsrc: resource to use for buffer
   95  * @buf: buffer to return to resource
   96  */
   97 void
   98 irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
   99                        struct irdma_puda_buf *buf)
  100 {
  101         unsigned long flags;
  102 
  103         buf->do_lpb = false;
  104         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  105         list_add(&buf->list, &rsrc->bufpool);
  106         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  107         rsrc->avail_buf_count++;
  108 }
  109 
  110 /**
  111  * irdma_puda_post_recvbuf - set wqe for rcv buffer
  112  * @rsrc: resource ptr
  113  * @wqe_idx: wqe index to use
  114  * @buf: puda buffer for rcv q
  115  * @initial: flag if during init time
  116  */
  117 static void
  118 irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
  119                         struct irdma_puda_buf *buf, bool initial)
  120 {
  121         __le64 *wqe;
  122         struct irdma_sc_qp *qp = &rsrc->qp;
  123         u64 offset24 = 0;
  124 
  125         /* Synch buffer for use by device */
  126         dma_sync_single_for_device(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
  127         qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
  128         wqe = qp->qp_uk.rq_base[wqe_idx].elem;
  129         if (!initial)
  130                 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24);
  131 
  132         offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
  133 
  134         set_64bit_val(wqe, IRDMA_BYTE_16, 0);
  135         set_64bit_val(wqe, 0, buf->mem.pa);
  136         if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
  137                 set_64bit_val(wqe, IRDMA_BYTE_8,
  138                               FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
  139         } else {
  140                 set_64bit_val(wqe, IRDMA_BYTE_8,
  141                               FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
  142                               offset24);
  143         }
  144         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  145 
  146         set_64bit_val(wqe, IRDMA_BYTE_24, offset24);
  147 }
  148 
  149 /**
  150  * irdma_puda_replenish_rq - post rcv buffers
  151  * @rsrc: resource to use for buffer
  152  * @initial: flag if during init time
  153  */
  154 static int
  155 irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
  156 {
  157         u32 i;
  158         u32 invalid_cnt = rsrc->rxq_invalid_cnt;
  159         struct irdma_puda_buf *buf = NULL;
  160 
  161         for (i = 0; i < invalid_cnt; i++) {
  162                 buf = irdma_puda_get_bufpool(rsrc);
  163                 if (!buf)
  164                         return -ENOBUFS;
  165                 irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
  166                 rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
  167                 rsrc->rxq_invalid_cnt--;
  168         }
  169 
  170         return 0;
  171 }
  172 
  173 /**
  174  * irdma_puda_alloc_buf - allocate mem for buffer
  175  * @dev: iwarp device
  176  * @len: length of buffer
  177  */
  178 static struct irdma_puda_buf *
  179 irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
  180                      u32 len)
  181 {
  182         struct irdma_puda_buf *buf;
  183         struct irdma_virt_mem buf_mem;
  184 
  185         buf_mem.size = sizeof(struct irdma_puda_buf);
  186         buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
  187         if (!buf_mem.va)
  188                 return NULL;
  189 
  190         buf = buf_mem.va;
  191         buf->mem.size = len;
  192         buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
  193         if (!buf->mem.va)
  194                 goto free_virt;
  195         buf->mem.pa = dma_map_single(hw_to_dev(dev->hw), buf->mem.va, buf->mem.size, DMA_BIDIRECTIONAL);
  196         if (dma_mapping_error(hw_to_dev(dev->hw), buf->mem.pa)) {
  197                 kfree(buf->mem.va);
  198                 goto free_virt;
  199         }
  200 
  201         buf->buf_mem.va = buf_mem.va;
  202         buf->buf_mem.size = buf_mem.size;
  203 
  204         return buf;
  205 
  206 free_virt:
  207         kfree(buf_mem.va);
  208         return NULL;
  209 }
  210 
  211 /**
  212  * irdma_puda_dele_buf - delete buffer back to system
  213  * @dev: iwarp device
  214  * @buf: buffer to free
  215  */
  216 static void
  217 irdma_puda_dele_buf(struct irdma_sc_dev *dev,
  218                     struct irdma_puda_buf *buf)
  219 {
  220         if (!buf->virtdma) {
  221                 irdma_free_dma_mem(dev->hw, &buf->mem);
  222                 kfree(buf->buf_mem.va);
  223         }
  224 }
  225 
  226 /**
  227  * irdma_puda_get_next_send_wqe - return next wqe for processing
  228  * @qp: puda qp for wqe
  229  * @wqe_idx: wqe index for caller
  230  */
  231 static __le64 * irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
  232                                              u32 *wqe_idx){
  233         __le64 *wqe = NULL;
  234         int ret_code = 0;
  235 
  236         *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
  237         if (!*wqe_idx)
  238                 qp->swqe_polarity = !qp->swqe_polarity;
  239         IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
  240         if (ret_code)
  241                 return wqe;
  242 
  243         wqe = qp->sq_base[*wqe_idx].elem;
  244 
  245         return wqe;
  246 }
  247 
  248 /**
  249  * irdma_puda_poll_info - poll cq for completion
  250  * @cq: cq for poll
  251  * @info: info return for successful completion
  252  */
  253 static int
  254 irdma_puda_poll_info(struct irdma_sc_cq *cq,
  255                      struct irdma_puda_cmpl_info *info)
  256 {
  257         struct irdma_cq_uk *cq_uk = &cq->cq_uk;
  258         u64 qword0, qword2, qword3, qword6;
  259         __le64 *cqe;
  260         __le64 *ext_cqe = NULL;
  261         u64 qword7 = 0;
  262         u64 comp_ctx;
  263         bool valid_bit;
  264         bool ext_valid = 0;
  265         u32 major_err, minor_err;
  266         u32 peek_head;
  267         bool error;
  268         u8 polarity;
  269 
  270         cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
  271         get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
  272         valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
  273         if (valid_bit != cq_uk->polarity)
  274                 return -ENOENT;
  275 
  276         if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  277                 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
  278 
  279         if (ext_valid) {
  280                 peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
  281                 ext_cqe = cq_uk->cq_base[peek_head].buf;
  282                 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
  283                 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
  284                 if (!peek_head)
  285                         polarity ^= 1;
  286                 if (polarity != cq_uk->polarity)
  287                         return -ENOENT;
  288 
  289                 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
  290                 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
  291                         cq_uk->polarity = !cq_uk->polarity;
  292                 /* update cq tail in cq shadow memory also */
  293                 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
  294         }
  295 
  296         irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA CQE", cqe, 32);
  297         if (ext_valid)
  298                 irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA EXT-CQE",
  299                                 ext_cqe, 32);
  300 
  301         error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
  302         if (error) {
  303                 irdma_debug(cq->dev, IRDMA_DEBUG_PUDA, "receive error\n");
  304                 major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
  305                 minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
  306                 info->compl_error = major_err << 16 | minor_err;
  307                 return -EIO;
  308         }
  309 
  310         get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
  311         get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
  312 
  313         info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
  314         info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
  315         if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  316                 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
  317 
  318         get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
  319         info->qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
  320         info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
  321 
  322         if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
  323                 if (ext_valid) {
  324                         info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
  325                         if (info->vlan_valid) {
  326                                 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
  327                                 info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
  328                         }
  329                         info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
  330                         if (info->smac_valid) {
  331                                 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
  332                                 info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
  333                                 info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
  334                                 info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
  335                                 info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
  336                                 info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
  337                                 info->smac[5] = (u8)(qword6 & 0xFF);
  338                         }
  339                 }
  340 
  341                 if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
  342                         info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
  343                         info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
  344                         info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
  345                 }
  346 
  347                 info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
  348         }
  349 
  350         return 0;
  351 }
  352 
  353 /**
  354  * irdma_puda_poll_cmpl - processes completion for cq
  355  * @dev: iwarp device
  356  * @cq: cq getting interrupt
  357  * @compl_err: return any completion err
  358  */
  359 int
  360 irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
  361                      u32 *compl_err)
  362 {
  363         struct irdma_qp_uk *qp;
  364         struct irdma_cq_uk *cq_uk = &cq->cq_uk;
  365         struct irdma_puda_cmpl_info info = {0};
  366         int ret = 0;
  367         struct irdma_puda_buf *buf;
  368         struct irdma_puda_rsrc *rsrc;
  369         u8 cq_type = cq->cq_type;
  370         unsigned long flags;
  371 
  372         if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
  373                 rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
  374                     cq->vsi->ieq;
  375         } else {
  376                 irdma_debug(dev, IRDMA_DEBUG_PUDA, "qp_type error\n");
  377                 return -EFAULT;
  378         }
  379 
  380         ret = irdma_puda_poll_info(cq, &info);
  381         *compl_err = info.compl_error;
  382         if (ret == -ENOENT)
  383                 return ret;
  384         if (ret)
  385                 goto done;
  386 
  387         qp = info.qp;
  388         if (!qp || !rsrc) {
  389                 ret = -EFAULT;
  390                 goto done;
  391         }
  392 
  393         if (qp->qp_id != rsrc->qp_id) {
  394                 ret = -EFAULT;
  395                 goto done;
  396         }
  397 
  398         if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
  399                 buf = (struct irdma_puda_buf *)(uintptr_t)
  400                     qp->rq_wrid_array[info.wqe_idx];
  401 
  402                 /* reusing so synch the buffer for CPU use */
  403                 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
  404                 /* Get all the tcpip information in the buf header */
  405                 ret = irdma_puda_get_tcpip_info(&info, buf);
  406                 if (ret) {
  407                         rsrc->stats_rcvd_pkt_err++;
  408                         if (cq_type == IRDMA_CQ_TYPE_ILQ) {
  409                                 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
  410                                                          info.wqe_idx);
  411                         } else {
  412                                 irdma_puda_ret_bufpool(rsrc, buf);
  413                                 irdma_puda_replenish_rq(rsrc, false);
  414                         }
  415                         goto done;
  416                 }
  417 
  418                 rsrc->stats_pkt_rcvd++;
  419                 rsrc->compl_rxwqe_idx = info.wqe_idx;
  420                 irdma_debug(dev, IRDMA_DEBUG_PUDA, "RQ completion\n");
  421                 rsrc->receive(rsrc->vsi, buf);
  422                 if (cq_type == IRDMA_CQ_TYPE_ILQ)
  423                         irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
  424                 else
  425                         irdma_puda_replenish_rq(rsrc, false);
  426 
  427         } else {
  428                 irdma_debug(dev, IRDMA_DEBUG_PUDA, "SQ completion\n");
  429                 buf = (struct irdma_puda_buf *)(uintptr_t)
  430                     qp->sq_wrtrk_array[info.wqe_idx].wrid;
  431 
  432                 /* reusing so synch the buffer for CPU use */
  433                 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
  434                 IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
  435                 rsrc->xmit_complete(rsrc->vsi, buf);
  436                 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  437                 rsrc->tx_wqe_avail_cnt++;
  438                 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  439                 if (!list_empty(&rsrc->txpend))
  440                         irdma_puda_send_buf(rsrc, NULL);
  441         }
  442 
  443 done:
  444         IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
  445         if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
  446                 cq_uk->polarity = !cq_uk->polarity;
  447         /* update cq tail in cq shadow memory also */
  448         IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
  449         set_64bit_val(cq_uk->shadow_area, IRDMA_BYTE_0,
  450                       IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
  451 
  452         return ret;
  453 }
  454 
  455 /**
  456  * irdma_puda_send - complete send wqe for transmit
  457  * @qp: puda qp for send
  458  * @info: buffer information for transmit
  459  */
  460 int
  461 irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
  462 {
  463         __le64 *wqe;
  464         u32 iplen, l4len;
  465         u64 hdr[2];
  466         u32 wqe_idx;
  467         u8 iipt;
  468 
  469         /* number of 32 bits DWORDS in header */
  470         l4len = info->tcplen >> 2;
  471         if (info->ipv4) {
  472                 iipt = 3;
  473                 iplen = 5;
  474         } else {
  475                 iipt = 1;
  476                 iplen = 10;
  477         }
  478 
  479         wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
  480         if (!wqe)
  481                 return -ENOSPC;
  482 
  483         qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
  484         /* Third line of WQE descriptor */
  485         /* maclen is in words */
  486 
  487         if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  488                 hdr[0] = 0;     /* Dest_QPN and Dest_QKey only for UD */
  489                 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
  490                     FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
  491                     FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
  492                     FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
  493                     FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
  494                                qp->qp_uk.swqe_polarity);
  495 
  496                 /* Forth line of WQE descriptor */
  497 
  498                 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
  499                 set_64bit_val(wqe, IRDMA_BYTE_8,
  500                               FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
  501                               FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
  502         } else {
  503                 hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
  504                     FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
  505                     FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
  506                     FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
  507                     FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
  508 
  509                 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
  510                     FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
  511                     FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
  512                     FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
  513 
  514                 /* Forth line of WQE descriptor */
  515 
  516                 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
  517                 set_64bit_val(wqe, IRDMA_BYTE_8,
  518                               FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
  519         }
  520 
  521         set_64bit_val(wqe, IRDMA_BYTE_16, hdr[0]);
  522         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  523 
  524         set_64bit_val(wqe, IRDMA_BYTE_24, hdr[1]);
  525 
  526         irdma_debug_buf(qp->dev, IRDMA_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
  527         irdma_uk_qp_post_wr(&qp->qp_uk);
  528         return 0;
  529 }
  530 
  531 /**
  532  * irdma_puda_send_buf - transmit puda buffer
  533  * @rsrc: resource to use for buffer
  534  * @buf: puda buffer to transmit
  535  */
  536 void
  537 irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
  538                     struct irdma_puda_buf *buf)
  539 {
  540         struct irdma_puda_send_info info;
  541         int ret = 0;
  542         unsigned long flags;
  543 
  544         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  545         /*
  546          * if no wqe available or not from a completion and we have pending buffers, we must queue new buffer
  547          */
  548         if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
  549                 list_add_tail(&buf->list, &rsrc->txpend);
  550                 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  551                 rsrc->stats_sent_pkt_q++;
  552                 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
  553                         irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
  554                                     "adding to txpend\n");
  555                 return;
  556         }
  557         rsrc->tx_wqe_avail_cnt--;
  558         /*
  559          * if we are coming from a completion and have pending buffers then Get one from pending list
  560          */
  561         if (!buf) {
  562                 buf = irdma_puda_get_listbuf(&rsrc->txpend);
  563                 if (!buf)
  564                         goto done;
  565         }
  566 
  567         info.scratch = buf;
  568         info.paddr = buf->mem.pa;
  569         info.len = buf->totallen;
  570         info.tcplen = buf->tcphlen;
  571         info.ipv4 = buf->ipv4;
  572 
  573         if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  574                 info.ah_id = buf->ah_id;
  575         } else {
  576                 info.maclen = buf->maclen;
  577                 info.do_lpb = buf->do_lpb;
  578         }
  579 
  580         /* Synch buffer for use by device */
  581         dma_sync_single_for_cpu(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
  582         ret = irdma_puda_send(&rsrc->qp, &info);
  583         if (ret) {
  584                 rsrc->tx_wqe_avail_cnt++;
  585                 rsrc->stats_sent_pkt_q++;
  586                 list_add(&buf->list, &rsrc->txpend);
  587                 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
  588                         irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
  589                                     "adding to puda_send\n");
  590         } else {
  591                 rsrc->stats_pkt_sent++;
  592         }
  593 done:
  594         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  595 }
  596 
  597 /**
  598  * irdma_puda_qp_setctx - during init, set qp's context
  599  * @rsrc: qp's resource
  600  */
  601 static void
  602 irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
  603 {
  604         struct irdma_sc_qp *qp = &rsrc->qp;
  605         __le64 *qp_ctx = qp->hw_host_ctx;
  606 
  607         set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
  608         set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
  609         set_64bit_val(qp_ctx, IRDMA_BYTE_24,
  610                       FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
  611                       FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
  612         set_64bit_val(qp_ctx, IRDMA_BYTE_48,
  613                       FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
  614         set_64bit_val(qp_ctx, IRDMA_BYTE_56, 0);
  615         if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  616                 set_64bit_val(qp_ctx, IRDMA_BYTE_64, 1);
  617         set_64bit_val(qp_ctx, IRDMA_BYTE_136,
  618                       FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
  619                       FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
  620         set_64bit_val(qp_ctx, IRDMA_BYTE_144,
  621                       FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
  622         set_64bit_val(qp_ctx, IRDMA_BYTE_160,
  623                       FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
  624                       FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
  625         set_64bit_val(qp_ctx, IRDMA_BYTE_168,
  626                       FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
  627         set_64bit_val(qp_ctx, IRDMA_BYTE_176,
  628                       FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
  629                       FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
  630                       FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
  631 
  632         irdma_debug_buf(rsrc->dev, IRDMA_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx,
  633                         IRDMA_QP_CTX_SIZE);
  634 }
  635 
  636 /**
  637  * irdma_puda_qp_wqe - setup wqe for qp create
  638  * @dev: Device
  639  * @qp: Resource qp
  640  */
  641 static int
  642 irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
  643 {
  644         struct irdma_sc_cqp *cqp;
  645         __le64 *wqe;
  646         u64 hdr;
  647         struct irdma_ccq_cqe_info compl_info;
  648         int status = 0;
  649 
  650         cqp = dev->cqp;
  651         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
  652         if (!wqe)
  653                 return -ENOSPC;
  654 
  655         set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
  656         set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
  657 
  658         hdr = qp->qp_uk.qp_id |
  659             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
  660             FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
  661             FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
  662             FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
  663             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  664         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  665 
  666         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  667 
  668         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_PUDA, "PUDA QP CREATE", wqe, 40);
  669         irdma_sc_cqp_post_sq(cqp);
  670         status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
  671                                                &compl_info);
  672 
  673         return status;
  674 }
  675 
  676 /**
  677  * irdma_puda_qp_create - create qp for resource
  678  * @rsrc: resource to use for buffer
  679  */
  680 static int
  681 irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
  682 {
  683         struct irdma_sc_qp *qp = &rsrc->qp;
  684         struct irdma_qp_uk *ukqp = &qp->qp_uk;
  685         int ret = 0;
  686         u32 sq_size, rq_size;
  687         struct irdma_dma_mem *mem;
  688 
  689         sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
  690         rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
  691         rsrc->qpmem.size = (sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) +
  692                             IRDMA_QP_CTX_SIZE);
  693         rsrc->qpmem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem,
  694                                                 rsrc->qpmem.size, IRDMA_HW_PAGE_SIZE);
  695         if (!rsrc->qpmem.va)
  696                 return -ENOMEM;
  697 
  698         mem = &rsrc->qpmem;
  699         memset(mem->va, 0, rsrc->qpmem.size);
  700         qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
  701         qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
  702         qp->pd = &rsrc->sc_pd;
  703         qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
  704         qp->dev = rsrc->dev;
  705         qp->qp_uk.back_qp = rsrc;
  706         qp->sq_pa = mem->pa;
  707         qp->rq_pa = qp->sq_pa + sq_size;
  708         qp->vsi = rsrc->vsi;
  709         ukqp->sq_base = mem->va;
  710         ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
  711         ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
  712         ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
  713         qp->shadow_area_pa = qp->rq_pa + rq_size;
  714         qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
  715         qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
  716         qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
  717         ukqp->qp_id = rsrc->qp_id;
  718         ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
  719         ukqp->rq_wrid_array = rsrc->rq_wrid_array;
  720         ukqp->sq_size = rsrc->sq_size;
  721         ukqp->rq_size = rsrc->rq_size;
  722 
  723         IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
  724         IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
  725         IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
  726         ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
  727 
  728         ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
  729         if (ret) {
  730                 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
  731                 return ret;
  732         }
  733 
  734         irdma_qp_add_qos(qp);
  735         irdma_puda_qp_setctx(rsrc);
  736 
  737         if (rsrc->dev->ceq_valid)
  738                 ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
  739         else
  740                 ret = irdma_puda_qp_wqe(rsrc->dev, qp);
  741         if (ret) {
  742                 irdma_qp_rem_qos(qp);
  743                 rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
  744                 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
  745         }
  746 
  747         return ret;
  748 }
  749 
  750 /**
  751  * irdma_puda_cq_wqe - setup wqe for CQ create
  752  * @dev: Device
  753  * @cq: resource for cq
  754  */
  755 static int
  756 irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
  757 {
  758         __le64 *wqe;
  759         struct irdma_sc_cqp *cqp;
  760         u64 hdr;
  761         struct irdma_ccq_cqe_info compl_info;
  762         int status = 0;
  763 
  764         cqp = dev->cqp;
  765         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
  766         if (!wqe)
  767                 return -ENOSPC;
  768 
  769         set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
  770         set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
  771         set_64bit_val(wqe, IRDMA_BYTE_16,
  772                       FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
  773         set_64bit_val(wqe, IRDMA_BYTE_32, cq->cq_pa);
  774         set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
  775         set_64bit_val(wqe, IRDMA_BYTE_56,
  776                       FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
  777                       FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
  778 
  779         hdr = cq->cq_uk.cq_id |
  780             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
  781             FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
  782             FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
  783             FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
  784             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  785         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  786 
  787         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  788 
  789         irdma_debug_buf(dev, IRDMA_DEBUG_PUDA, "PUDA CREATE CQ", wqe,
  790                         IRDMA_CQP_WQE_SIZE * 8);
  791         irdma_sc_cqp_post_sq(dev->cqp);
  792         status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
  793                                                &compl_info);
  794         if (!status) {
  795                 struct irdma_sc_ceq *ceq = dev->ceq[0];
  796 
  797                 if (ceq && ceq->reg_cq)
  798                         status = irdma_sc_add_cq_ctx(ceq, cq);
  799         }
  800 
  801         return status;
  802 }
  803 
  804 /**
  805  * irdma_puda_cq_create - create cq for resource
  806  * @rsrc: resource for which cq to create
  807  */
  808 static int
  809 irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
  810 {
  811         struct irdma_sc_dev *dev = rsrc->dev;
  812         struct irdma_sc_cq *cq = &rsrc->cq;
  813         int ret = 0;
  814         u32 cqsize;
  815         struct irdma_dma_mem *mem;
  816         struct irdma_cq_init_info info = {0};
  817         struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
  818 
  819         cq->vsi = rsrc->vsi;
  820         cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
  821         rsrc->cqmem.size = cqsize + sizeof(struct irdma_cq_shadow_area);
  822         rsrc->cqmem.va = irdma_allocate_dma_mem(dev->hw, &rsrc->cqmem,
  823                                                 rsrc->cqmem.size,
  824                                                 IRDMA_CQ0_ALIGNMENT);
  825         if (!rsrc->cqmem.va)
  826                 return -ENOMEM;
  827 
  828         mem = &rsrc->cqmem;
  829         info.dev = dev;
  830         info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
  831             IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
  832         info.shadow_read_threshold = rsrc->cq_size >> 2;
  833         info.cq_base_pa = mem->pa;
  834         info.shadow_area_pa = mem->pa + cqsize;
  835         init_info->cq_base = mem->va;
  836         init_info->shadow_area = (__le64 *) ((u8 *)mem->va + cqsize);
  837         init_info->cq_size = rsrc->cq_size;
  838         init_info->cq_id = rsrc->cq_id;
  839         info.ceqe_mask = true;
  840         info.ceq_id_valid = true;
  841         info.vsi = rsrc->vsi;
  842 
  843         ret = irdma_sc_cq_init(cq, &info);
  844         if (ret)
  845                 goto error;
  846 
  847         if (rsrc->dev->ceq_valid)
  848                 ret = irdma_cqp_cq_create_cmd(dev, cq);
  849         else
  850                 ret = irdma_puda_cq_wqe(dev, cq);
  851 error:
  852         if (ret)
  853                 irdma_free_dma_mem(dev->hw, &rsrc->cqmem);
  854 
  855         return ret;
  856 }
  857 
  858 /**
  859  * irdma_puda_free_qp - free qp for resource
  860  * @rsrc: resource for which qp to free
  861  */
  862 static void
  863 irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
  864 {
  865         int ret;
  866         struct irdma_ccq_cqe_info compl_info;
  867         struct irdma_sc_dev *dev = rsrc->dev;
  868 
  869         if (rsrc->dev->ceq_valid) {
  870                 irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
  871                 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
  872                 return;
  873         }
  874 
  875         ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
  876         if (ret)
  877                 irdma_debug(dev, IRDMA_DEBUG_PUDA,
  878                             "error puda qp destroy wqe, status = %d\n", ret);
  879         if (!ret) {
  880                 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
  881                                                     &compl_info);
  882                 if (ret)
  883                         irdma_debug(dev, IRDMA_DEBUG_PUDA,
  884                                     "error puda qp destroy failed, status = %d\n",
  885                                     ret);
  886         }
  887         rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
  888 }
  889 
  890 /**
  891  * irdma_puda_free_cq - free cq for resource
  892  * @rsrc: resource for which cq to free
  893  */
  894 static void
  895 irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
  896 {
  897         int ret;
  898         struct irdma_ccq_cqe_info compl_info;
  899         struct irdma_sc_dev *dev = rsrc->dev;
  900 
  901         if (rsrc->dev->ceq_valid) {
  902                 irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
  903                 return;
  904         }
  905 
  906         ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
  907         if (ret)
  908                 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error ieq cq destroy\n");
  909         if (!ret) {
  910                 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
  911                                                     &compl_info);
  912                 if (ret)
  913                         irdma_debug(dev, IRDMA_DEBUG_PUDA,
  914                                     "error ieq qp destroy done\n");
  915         }
  916 }
  917 
  918 /**
  919  * irdma_puda_dele_rsrc - delete all resources during close
  920  * @vsi: VSI structure of device
  921  * @type: type of resource to dele
  922  * @reset: true if reset chip
  923  */
  924 void
  925 irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
  926                      bool reset)
  927 {
  928         struct irdma_sc_dev *dev = vsi->dev;
  929         struct irdma_puda_rsrc *rsrc;
  930         struct irdma_puda_buf *buf = NULL;
  931         struct irdma_puda_buf *nextbuf = NULL;
  932         struct irdma_virt_mem *vmem;
  933         struct irdma_sc_ceq *ceq;
  934 
  935         ceq = vsi->dev->ceq[0];
  936         switch (type) {
  937         case IRDMA_PUDA_RSRC_TYPE_ILQ:
  938                 rsrc = vsi->ilq;
  939                 vmem = &vsi->ilq_mem;
  940                 vsi->ilq = NULL;
  941                 if (ceq && ceq->reg_cq)
  942                         irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
  943                 break;
  944         case IRDMA_PUDA_RSRC_TYPE_IEQ:
  945                 rsrc = vsi->ieq;
  946                 vmem = &vsi->ieq_mem;
  947                 vsi->ieq = NULL;
  948                 if (ceq && ceq->reg_cq)
  949                         irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
  950                 break;
  951         default:
  952                 irdma_debug(dev, IRDMA_DEBUG_PUDA,
  953                             "error resource type = 0x%x\n", type);
  954                 return;
  955         }
  956 
  957         spin_lock_destroy(&rsrc->bufpool_lock);
  958         switch (rsrc->cmpl) {
  959         case PUDA_HASH_CRC_COMPLETE:
  960                 irdma_free_hash_desc(rsrc->hash_desc);
  961                 /* fallthrough */
  962         case PUDA_QP_CREATED:
  963                 irdma_qp_rem_qos(&rsrc->qp);
  964 
  965                 if (!reset)
  966                         irdma_puda_free_qp(rsrc);
  967 
  968                 irdma_free_dma_mem(dev->hw, &rsrc->qpmem);
  969                 /* fallthrough */
  970         case PUDA_CQ_CREATED:
  971                 if (!reset)
  972                         irdma_puda_free_cq(rsrc);
  973 
  974                 irdma_free_dma_mem(dev->hw, &rsrc->cqmem);
  975                 break;
  976         default:
  977                 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
  978                             "error no resources\n");
  979                 break;
  980         }
  981         /* Free all allocated puda buffers for both tx and rx */
  982         buf = rsrc->alloclist;
  983         while (buf) {
  984                 nextbuf = buf->next;
  985                 irdma_puda_dele_buf(dev, buf);
  986                 buf = nextbuf;
  987                 rsrc->alloc_buf_count--;
  988         }
  989 
  990         kfree(vmem->va);
  991 }
  992 
  993 /**
  994  * irdma_puda_allocbufs - allocate buffers for resource
  995  * @rsrc: resource for buffer allocation
  996  * @count: number of buffers to create
  997  */
  998 static int
  999 irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
 1000 {
 1001         u32 i;
 1002         struct irdma_puda_buf *buf;
 1003         struct irdma_puda_buf *nextbuf;
 1004         struct irdma_virt_mem buf_mem;
 1005         struct irdma_dma_mem *dma_mem;
 1006         bool virtdma = false;
 1007         unsigned long flags;
 1008 
 1009         buf_mem.size = count * sizeof(struct irdma_puda_buf);
 1010         buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
 1011         if (!buf_mem.va) {
 1012                 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
 1013                             "error virt_mem for buf\n");
 1014                 rsrc->stats_buf_alloc_fail++;
 1015                 goto trysmall;
 1016         }
 1017 
 1018         /*
 1019          * Allocate the large dma chunk and setup dma attributes into first puda buffer. This is required during free
 1020          */
 1021         buf = (struct irdma_puda_buf *)buf_mem.va;
 1022         buf->mem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &buf->mem,
 1023                                              rsrc->buf_size * count, 1);
 1024         if (!buf->mem.va) {
 1025                 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
 1026                             "error dma_mem for buf\n");
 1027                 kfree(buf_mem.va);
 1028                 rsrc->stats_buf_alloc_fail++;
 1029                 goto trysmall;
 1030         }
 1031 
 1032         /*
 1033          * dma_mem points to start of the large DMA chunk
 1034          */
 1035         dma_mem = &buf->mem;
 1036 
 1037         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
 1038         for (i = 0; i < count; i++) {
 1039                 buf = ((struct irdma_puda_buf *)buf_mem.va) + i;
 1040 
 1041                 buf->mem.va = (char *)dma_mem->va + (i * rsrc->buf_size);
 1042                 buf->mem.pa = dma_mem->pa + (i * rsrc->buf_size);
 1043                 buf->mem.size = rsrc->buf_size;
 1044                 buf->virtdma = virtdma;
 1045                 virtdma = true;
 1046 
 1047                 buf->buf_mem.va = buf_mem.va;
 1048                 buf->buf_mem.size = buf_mem.size;
 1049 
 1050                 list_add(&buf->list, &rsrc->bufpool);
 1051                 rsrc->alloc_buf_count++;
 1052                 if (!rsrc->alloclist) {
 1053                         rsrc->alloclist = buf;
 1054                 } else {
 1055                         nextbuf = rsrc->alloclist;
 1056                         rsrc->alloclist = buf;
 1057                         buf->next = nextbuf;
 1058                 }
 1059         }
 1060         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
 1061 
 1062         rsrc->avail_buf_count = rsrc->alloc_buf_count;
 1063         return 0;
 1064 trysmall:
 1065         for (i = 0; i < count; i++) {
 1066                 buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
 1067                 if (!buf) {
 1068                         rsrc->stats_buf_alloc_fail++;
 1069                         return -ENOMEM;
 1070                 }
 1071                 irdma_puda_ret_bufpool(rsrc, buf);
 1072                 rsrc->alloc_buf_count++;
 1073                 if (!rsrc->alloclist) {
 1074                         rsrc->alloclist = buf;
 1075                 } else {
 1076                         nextbuf = rsrc->alloclist;
 1077                         rsrc->alloclist = buf;
 1078                         buf->next = nextbuf;
 1079                 }
 1080         }
 1081 
 1082         rsrc->avail_buf_count = rsrc->alloc_buf_count;
 1083 
 1084         return 0;
 1085 }
 1086 
 1087 /**
 1088  * irdma_puda_create_rsrc - create resource (ilq or ieq)
 1089  * @vsi: sc VSI struct
 1090  * @info: resource information
 1091  */
 1092 int
 1093 irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
 1094                        struct irdma_puda_rsrc_info *info)
 1095 {
 1096         struct irdma_sc_dev *dev = vsi->dev;
 1097         int ret = 0;
 1098         struct irdma_puda_rsrc *rsrc;
 1099         u32 pudasize;
 1100         u32 sqwridsize, rqwridsize;
 1101         struct irdma_virt_mem *vmem;
 1102 
 1103         info->count = 1;
 1104         pudasize = sizeof(struct irdma_puda_rsrc);
 1105         sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
 1106         rqwridsize = info->rq_size * 8;
 1107         switch (info->type) {
 1108         case IRDMA_PUDA_RSRC_TYPE_ILQ:
 1109                 vmem = &vsi->ilq_mem;
 1110                 break;
 1111         case IRDMA_PUDA_RSRC_TYPE_IEQ:
 1112                 vmem = &vsi->ieq_mem;
 1113                 break;
 1114         default:
 1115                 return -EOPNOTSUPP;
 1116         }
 1117         vmem->size = pudasize + sqwridsize + rqwridsize;
 1118         vmem->va = kzalloc(vmem->size, GFP_KERNEL);
 1119         if (!vmem->va)
 1120                 return -ENOMEM;
 1121 
 1122         rsrc = vmem->va;
 1123         spin_lock_init(&rsrc->bufpool_lock);
 1124         switch (info->type) {
 1125         case IRDMA_PUDA_RSRC_TYPE_ILQ:
 1126                 vsi->ilq = vmem->va;
 1127                 vsi->ilq_count = info->count;
 1128                 rsrc->receive = info->receive;
 1129                 rsrc->xmit_complete = info->xmit_complete;
 1130                 break;
 1131         case IRDMA_PUDA_RSRC_TYPE_IEQ:
 1132                 vsi->ieq_count = info->count;
 1133                 vsi->ieq = vmem->va;
 1134                 rsrc->receive = irdma_ieq_receive;
 1135                 rsrc->xmit_complete = irdma_ieq_tx_compl;
 1136                 break;
 1137         default:
 1138                 return -EOPNOTSUPP;
 1139         }
 1140 
 1141         rsrc->type = info->type;
 1142         rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
 1143             ((u8 *)vmem->va + pudasize);
 1144         rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
 1145         /* Initialize all ieq lists */
 1146         INIT_LIST_HEAD(&rsrc->bufpool);
 1147         INIT_LIST_HEAD(&rsrc->txpend);
 1148 
 1149         rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
 1150         irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
 1151         rsrc->qp_id = info->qp_id;
 1152         rsrc->cq_id = info->cq_id;
 1153         rsrc->sq_size = info->sq_size;
 1154         rsrc->rq_size = info->rq_size;
 1155         rsrc->cq_size = info->rq_size + info->sq_size;
 1156         if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1157                 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
 1158                         rsrc->cq_size += info->rq_size;
 1159         }
 1160         rsrc->buf_size = info->buf_size;
 1161         rsrc->dev = dev;
 1162         rsrc->vsi = vsi;
 1163         rsrc->stats_idx = info->stats_idx;
 1164         rsrc->stats_idx_valid = info->stats_idx_valid;
 1165 
 1166         ret = irdma_puda_cq_create(rsrc);
 1167         if (!ret) {
 1168                 rsrc->cmpl = PUDA_CQ_CREATED;
 1169                 ret = irdma_puda_qp_create(rsrc);
 1170         }
 1171         if (ret) {
 1172                 irdma_debug(dev, IRDMA_DEBUG_PUDA,
 1173                             "error qp_create type=%d, status=%d\n", rsrc->type,
 1174                             ret);
 1175                 goto error;
 1176         }
 1177         rsrc->cmpl = PUDA_QP_CREATED;
 1178 
 1179         ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
 1180         if (ret) {
 1181                 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error alloc_buf\n");
 1182                 goto error;
 1183         }
 1184 
 1185         rsrc->rxq_invalid_cnt = info->rq_size;
 1186         ret = irdma_puda_replenish_rq(rsrc, true);
 1187         if (ret)
 1188                 goto error;
 1189 
 1190         if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
 1191                 if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
 1192                         rsrc->check_crc = true;
 1193                         rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
 1194                         ret = 0;
 1195                 }
 1196         }
 1197 
 1198         irdma_sc_ccq_arm(&rsrc->cq);
 1199         return ret;
 1200 
 1201 error:
 1202         irdma_puda_dele_rsrc(vsi, info->type, false);
 1203 
 1204         return ret;
 1205 }
 1206 
 1207 /**
 1208  * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq
 1209  * @qp: ilq's qp resource
 1210  * @buf: puda buffer for rcv q
 1211  * @wqe_idx:  wqe index of completed rcvbuf
 1212  */
 1213 static void
 1214 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
 1215                          struct irdma_puda_buf *buf, u32 wqe_idx)
 1216 {
 1217         __le64 *wqe;
 1218         u64 offset8, offset24;
 1219 
 1220         /* Synch buffer for use by device */
 1221         dma_sync_single_for_device(hw_to_dev(qp->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
 1222         wqe = qp->qp_uk.rq_base[wqe_idx].elem;
 1223         get_64bit_val(wqe, IRDMA_BYTE_24, &offset24);
 1224         if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1225                 get_64bit_val(wqe, IRDMA_BYTE_8, &offset8);
 1226                 if (offset24)
 1227                         offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
 1228                 else
 1229                         offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
 1230                 set_64bit_val(wqe, IRDMA_BYTE_8, offset8);
 1231                 irdma_wmb();    /* make sure WQE is written before valid bit is set */
 1232         }
 1233         if (offset24)
 1234                 offset24 = 0;
 1235         else
 1236                 offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
 1237 
 1238         set_64bit_val(wqe, IRDMA_BYTE_24, offset24);
 1239 }
 1240 
 1241 /**
 1242  * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker
 1243  * @pfpdu: pointer to fpdu
 1244  * @datap: pointer to data in the buffer
 1245  * @rcv_seq: seqnum of the data buffer
 1246  */
 1247 static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
 1248                                   u32 rcv_seq){
 1249         u32 marker_seq, end_seq, blk_start;
 1250         u8 marker_len = pfpdu->marker_len;
 1251         u16 total_len = 0;
 1252         u16 fpdu_len;
 1253 
 1254         blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
 1255         if (!blk_start) {
 1256                 total_len = marker_len;
 1257                 marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
 1258                 if (marker_len && *(u32 *)datap)
 1259                         return 0;
 1260         } else {
 1261                 marker_seq = rcv_seq + blk_start;
 1262         }
 1263 
 1264         datap += total_len;
 1265         fpdu_len = IRDMA_NTOHS(*(__be16 *) datap);
 1266         fpdu_len += IRDMA_IEQ_MPA_FRAMING;
 1267         fpdu_len = (fpdu_len + 3) & 0xfffc;
 1268 
 1269         if (fpdu_len > pfpdu->max_fpdu_data)
 1270                 return 0;
 1271 
 1272         total_len += fpdu_len;
 1273         end_seq = rcv_seq + total_len;
 1274         while ((int)(marker_seq - end_seq) < 0) {
 1275                 total_len += marker_len;
 1276                 end_seq += marker_len;
 1277                 marker_seq += IRDMA_MRK_BLK_SZ;
 1278         }
 1279 
 1280         return total_len;
 1281 }
 1282 
 1283 /**
 1284  * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
 1285  * @buf: rcv buffer with partial
 1286  * @txbuf: tx buffer for sending back
 1287  * @buf_offset: rcv buffer offset to copy from
 1288  * @txbuf_offset: at offset in tx buf to copy
 1289  * @len: length of data to copy
 1290  */
 1291 static void
 1292 irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
 1293                         struct irdma_puda_buf *txbuf,
 1294                         u16 buf_offset, u32 txbuf_offset, u32 len)
 1295 {
 1296         void *mem1 = (u8 *)buf->mem.va + buf_offset;
 1297         void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
 1298 
 1299         irdma_memcpy(mem2, mem1, len);
 1300 }
 1301 
 1302 /**
 1303  * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling
 1304  * @buf: reeive buffer with partial
 1305  * @txbuf: buffer to prepare
 1306  */
 1307 static void
 1308 irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
 1309                        struct irdma_puda_buf *txbuf)
 1310 {
 1311         txbuf->tcphlen = buf->tcphlen;
 1312         txbuf->ipv4 = buf->ipv4;
 1313 
 1314         if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1315                 txbuf->hdrlen = txbuf->tcphlen;
 1316                 irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
 1317                                         txbuf->hdrlen);
 1318         } else {
 1319                 txbuf->maclen = buf->maclen;
 1320                 txbuf->hdrlen = buf->hdrlen;
 1321                 irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
 1322         }
 1323 }
 1324 
 1325 /**
 1326  * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range
 1327  * @buf: receive exception buffer
 1328  * @fps: first partial sequence number
 1329  */
 1330 static void
 1331 irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
 1332 {
 1333         u32 offset;
 1334 
 1335         if (buf->seqnum < fps) {
 1336                 offset = fps - buf->seqnum;
 1337                 if (offset > buf->datalen)
 1338                         return;
 1339                 buf->data += offset;
 1340                 buf->datalen -= (u16)offset;
 1341                 buf->seqnum = fps;
 1342         }
 1343 }
 1344 
 1345 /**
 1346  * irdma_ieq_compl_pfpdu - write txbuf with full fpdu
 1347  * @ieq: ieq resource
 1348  * @rxlist: ieq's received buffer list
 1349  * @pbufl: temporary list for buffers for fpddu
 1350  * @txbuf: tx buffer for fpdu
 1351  * @fpdu_len: total length of fpdu
 1352  */
 1353 static void
 1354 irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
 1355                       struct list_head *rxlist,
 1356                       struct list_head *pbufl,
 1357                       struct irdma_puda_buf *txbuf, u16 fpdu_len)
 1358 {
 1359         struct irdma_puda_buf *buf;
 1360         u32 nextseqnum;
 1361         u16 txoffset, bufoffset;
 1362 
 1363         buf = irdma_puda_get_listbuf(pbufl);
 1364         if (!buf)
 1365                 return;
 1366 
 1367         nextseqnum = buf->seqnum + fpdu_len;
 1368         irdma_ieq_setup_tx_buf(buf, txbuf);
 1369         if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1370                 txoffset = txbuf->hdrlen;
 1371                 txbuf->totallen = txbuf->hdrlen + fpdu_len;
 1372                 txbuf->data = (u8 *)txbuf->mem.va + txoffset;
 1373         } else {
 1374                 txoffset = buf->hdrlen;
 1375                 txbuf->totallen = buf->hdrlen + fpdu_len;
 1376                 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
 1377         }
 1378         bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
 1379 
 1380         do {
 1381                 if (buf->datalen >= fpdu_len) {
 1382                         /* copied full fpdu */
 1383                         irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
 1384                                                 fpdu_len);
 1385                         buf->datalen -= fpdu_len;
 1386                         buf->data += fpdu_len;
 1387                         buf->seqnum = nextseqnum;
 1388                         break;
 1389                 }
 1390                 /* copy partial fpdu */
 1391                 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
 1392                                         buf->datalen);
 1393                 txoffset += buf->datalen;
 1394                 fpdu_len -= buf->datalen;
 1395                 irdma_puda_ret_bufpool(ieq, buf);
 1396                 buf = irdma_puda_get_listbuf(pbufl);
 1397                 if (!buf)
 1398                         return;
 1399 
 1400                 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
 1401         } while (1);
 1402 
 1403         /* last buffer on the list */
 1404         if (buf->datalen)
 1405                 list_add(&buf->list, rxlist);
 1406         else
 1407                 irdma_puda_ret_bufpool(ieq, buf);
 1408 }
 1409 
 1410 /**
 1411  * irdma_ieq_create_pbufl - create buffer list for single fpdu
 1412  * @pfpdu: pointer to fpdu
 1413  * @rxlist: resource list for receive ieq buffes
 1414  * @pbufl: temp. list for buffers for fpddu
 1415  * @buf: first receive buffer
 1416  * @fpdu_len: total length of fpdu
 1417  */
 1418 static int
 1419 irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu,
 1420                        struct list_head *rxlist,
 1421                        struct list_head *pbufl,
 1422                        struct irdma_puda_buf *buf, u16 fpdu_len)
 1423 {
 1424         int status = 0;
 1425         struct irdma_puda_buf *nextbuf;
 1426         u32 nextseqnum;
 1427         u16 plen = fpdu_len - buf->datalen;
 1428         bool done = false;
 1429 
 1430         nextseqnum = buf->seqnum + buf->datalen;
 1431         do {
 1432                 nextbuf = irdma_puda_get_listbuf(rxlist);
 1433                 if (!nextbuf) {
 1434                         status = -ENOBUFS;
 1435                         break;
 1436                 }
 1437                 list_add_tail(&nextbuf->list, pbufl);
 1438                 if (nextbuf->seqnum != nextseqnum) {
 1439                         pfpdu->bad_seq_num++;
 1440                         status = -ERANGE;
 1441                         break;
 1442                 }
 1443                 if (nextbuf->datalen >= plen) {
 1444                         done = true;
 1445                 } else {
 1446                         plen -= nextbuf->datalen;
 1447                         nextseqnum = nextbuf->seqnum + nextbuf->datalen;
 1448                 }
 1449 
 1450         } while (!done);
 1451 
 1452         return status;
 1453 }
 1454 
 1455 /**
 1456  * irdma_ieq_handle_partial - process partial fpdu buffer
 1457  * @ieq: ieq resource
 1458  * @pfpdu: partial management per user qp
 1459  * @buf: receive buffer
 1460  * @fpdu_len: fpdu len in the buffer
 1461  */
 1462 static int
 1463 irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
 1464                          struct irdma_pfpdu *pfpdu,
 1465                          struct irdma_puda_buf *buf, u16 fpdu_len)
 1466 {
 1467         int status = 0;
 1468         u8 *crcptr;
 1469         u32 mpacrc;
 1470         u32 seqnum = buf->seqnum;
 1471         struct list_head pbufl; /* partial buffer list */
 1472         struct irdma_puda_buf *txbuf = NULL;
 1473         struct list_head *rxlist = &pfpdu->rxlist;
 1474 
 1475         ieq->partials_handled++;
 1476 
 1477         INIT_LIST_HEAD(&pbufl);
 1478         list_add(&buf->list, &pbufl);
 1479 
 1480         status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
 1481         if (status)
 1482                 goto error;
 1483 
 1484         txbuf = irdma_puda_get_bufpool(ieq);
 1485         if (!txbuf) {
 1486                 pfpdu->no_tx_bufs++;
 1487                 status = -ENOBUFS;
 1488                 goto error;
 1489         }
 1490 
 1491         irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
 1492         irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
 1493 
 1494         crcptr = txbuf->data + fpdu_len - 4;
 1495         mpacrc = *(u32 *)crcptr;
 1496         if (ieq->check_crc) {
 1497                 status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
 1498                                                 (fpdu_len - 4), mpacrc);
 1499                 if (status) {
 1500                         irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
 1501                                     "error bad crc\n");
 1502                         pfpdu->mpa_crc_err = true;
 1503                         goto error;
 1504                 }
 1505         }
 1506 
 1507         irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER",
 1508                         txbuf->mem.va, txbuf->totallen);
 1509         if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
 1510                 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
 1511         txbuf->do_lpb = true;
 1512         irdma_puda_send_buf(ieq, txbuf);
 1513         pfpdu->rcv_nxt = seqnum + fpdu_len;
 1514         return status;
 1515 
 1516 error:
 1517         while (!list_empty(&pbufl)) {
 1518                 buf = (struct irdma_puda_buf *)(&pbufl)->prev;
 1519                 list_del(&buf->list);
 1520                 list_add(&buf->list, rxlist);
 1521         }
 1522         if (txbuf)
 1523                 irdma_puda_ret_bufpool(ieq, txbuf);
 1524 
 1525         return status;
 1526 }
 1527 
 1528 /**
 1529  * irdma_ieq_process_buf - process buffer rcvd for ieq
 1530  * @ieq: ieq resource
 1531  * @pfpdu: partial management per user qp
 1532  * @buf: receive buffer
 1533  */
 1534 static int
 1535 irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
 1536                       struct irdma_pfpdu *pfpdu,
 1537                       struct irdma_puda_buf *buf)
 1538 {
 1539         u16 fpdu_len = 0;
 1540         u16 datalen = buf->datalen;
 1541         u8 *datap = buf->data;
 1542         u8 *crcptr;
 1543         u16 ioffset = 0;
 1544         u32 mpacrc;
 1545         u32 seqnum = buf->seqnum;
 1546         u16 len = 0;
 1547         u16 full = 0;
 1548         bool partial = false;
 1549         struct irdma_puda_buf *txbuf;
 1550         struct list_head *rxlist = &pfpdu->rxlist;
 1551         int ret = 0;
 1552 
 1553         ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
 1554         while (datalen) {
 1555                 fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
 1556                 if (!fpdu_len) {
 1557                         irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
 1558                                     "error bad fpdu len\n");
 1559                         list_add(&buf->list, rxlist);
 1560                         pfpdu->mpa_crc_err = true;
 1561                         return -EINVAL;
 1562                 }
 1563 
 1564                 if (datalen < fpdu_len) {
 1565                         partial = true;
 1566                         break;
 1567                 }
 1568                 crcptr = datap + fpdu_len - 4;
 1569                 mpacrc = *(u32 *)crcptr;
 1570                 if (ieq->check_crc)
 1571                         ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
 1572                                                      fpdu_len - 4, mpacrc);
 1573                 if (ret) {
 1574                         list_add(&buf->list, rxlist);
 1575                         irdma_debug(ieq->dev, IRDMA_DEBUG_ERR,
 1576                                     "IRDMA_ERR_MPA_CRC\n");
 1577                         pfpdu->mpa_crc_err = true;
 1578                         return ret;
 1579                 }
 1580                 full++;
 1581                 pfpdu->fpdu_processed++;
 1582                 ieq->fpdu_processed++;
 1583                 datap += fpdu_len;
 1584                 len += fpdu_len;
 1585                 datalen -= fpdu_len;
 1586         }
 1587         if (full) {
 1588                 /* copy full pdu's in the txbuf and send them out */
 1589                 txbuf = irdma_puda_get_bufpool(ieq);
 1590                 if (!txbuf) {
 1591                         pfpdu->no_tx_bufs++;
 1592                         list_add(&buf->list, rxlist);
 1593                         return -ENOBUFS;
 1594                 }
 1595                 /* modify txbuf's buffer header */
 1596                 irdma_ieq_setup_tx_buf(buf, txbuf);
 1597                 /* copy full fpdu's to new buffer */
 1598                 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1599                         irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
 1600                                                 txbuf->hdrlen, len);
 1601                         txbuf->totallen = txbuf->hdrlen + len;
 1602                         txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
 1603                 } else {
 1604                         irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
 1605                                                 buf->hdrlen, len);
 1606                         txbuf->totallen = buf->hdrlen + len;
 1607                 }
 1608                 irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
 1609                 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER",
 1610                                 txbuf->mem.va, txbuf->totallen);
 1611                 txbuf->do_lpb = true;
 1612                 irdma_puda_send_buf(ieq, txbuf);
 1613 
 1614                 if (!datalen) {
 1615                         pfpdu->rcv_nxt = buf->seqnum + len;
 1616                         irdma_puda_ret_bufpool(ieq, buf);
 1617                         return 0;
 1618                 }
 1619                 buf->data = datap;
 1620                 buf->seqnum = seqnum + len;
 1621                 buf->datalen = datalen;
 1622                 pfpdu->rcv_nxt = buf->seqnum;
 1623         }
 1624         if (partial)
 1625                 return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
 1626 
 1627         return 0;
 1628 }
 1629 
 1630 /**
 1631  * irdma_ieq_process_fpdus - process fpdu's buffers on its list
 1632  * @qp: qp for which partial fpdus
 1633  * @ieq: ieq resource
 1634  */
 1635 void
 1636 irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
 1637                         struct irdma_puda_rsrc *ieq)
 1638 {
 1639         struct irdma_pfpdu *pfpdu = &qp->pfpdu;
 1640         struct list_head *rxlist = &pfpdu->rxlist;
 1641         struct irdma_puda_buf *buf;
 1642         int status;
 1643 
 1644         do {
 1645                 if (list_empty(rxlist))
 1646                         break;
 1647                 buf = irdma_puda_get_listbuf(rxlist);
 1648                 if (!buf) {
 1649                         irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
 1650                                     "error no buf\n");
 1651                         break;
 1652                 }
 1653                 if (buf->seqnum != pfpdu->rcv_nxt) {
 1654                         /* This could be out of order or missing packet */
 1655                         pfpdu->out_of_order++;
 1656                         list_add(&buf->list, rxlist);
 1657                         break;
 1658                 }
 1659                 /* keep processing buffers from the head of the list */
 1660                 status = irdma_ieq_process_buf(ieq, pfpdu, buf);
 1661                 if (status && pfpdu->mpa_crc_err) {
 1662                         while (!list_empty(rxlist)) {
 1663                                 buf = irdma_puda_get_listbuf(rxlist);
 1664                                 irdma_puda_ret_bufpool(ieq, buf);
 1665                                 pfpdu->crc_err++;
 1666                                 ieq->crc_err++;
 1667                         }
 1668                         /* create CQP for AE */
 1669                         irdma_ieq_mpa_crc_ae(ieq->dev, qp);
 1670                 }
 1671         } while (!status);
 1672 }
 1673 
 1674 /**
 1675  * irdma_ieq_create_ah - create an address handle for IEQ
 1676  * @qp: qp pointer
 1677  * @buf: buf received on IEQ used to create AH
 1678  */
 1679 static int
 1680 irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf)
 1681 {
 1682         struct irdma_ah_info ah_info = {0};
 1683 
 1684         qp->pfpdu.ah_buf = buf;
 1685         irdma_puda_ieq_get_ah_info(qp, &ah_info);
 1686         return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
 1687                                     IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
 1688                                     &qp->pfpdu.ah);
 1689 }
 1690 
 1691 /**
 1692  * irdma_ieq_handle_exception - handle qp's exception
 1693  * @ieq: ieq resource
 1694  * @qp: qp receiving excpetion
 1695  * @buf: receive buffer
 1696  */
 1697 static void
 1698 irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
 1699                            struct irdma_sc_qp *qp,
 1700                            struct irdma_puda_buf *buf)
 1701 {
 1702         struct irdma_pfpdu *pfpdu = &qp->pfpdu;
 1703         u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
 1704         u32 rcv_wnd = hw_host_ctx[23];
 1705         /* first partial seq # in q2 */
 1706 u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
 1707         struct list_head *rxlist = &pfpdu->rxlist;
 1708         struct list_head *plist;
 1709         struct irdma_puda_buf *tmpbuf = NULL;
 1710         unsigned long flags = 0;
 1711         u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
 1712 
 1713         irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ RX BUFFER", buf->mem.va,
 1714                         buf->totallen);
 1715 
 1716         spin_lock_irqsave(&pfpdu->lock, flags);
 1717         pfpdu->total_ieq_bufs++;
 1718         if (pfpdu->mpa_crc_err) {
 1719                 pfpdu->crc_err++;
 1720                 goto error;
 1721         }
 1722         if (pfpdu->mode && fps != pfpdu->fps) {
 1723                 /* clean up qp as it is new partial sequence */
 1724                 irdma_ieq_cleanup_qp(ieq, qp);
 1725                 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
 1726                             "restarting new partial\n");
 1727                 pfpdu->mode = false;
 1728         }
 1729 
 1730         if (!pfpdu->mode) {
 1731                 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "Q2 BUFFER",
 1732                                 (u64 *)qp->q2_buf, 128);
 1733                 /* First_Partial_Sequence_Number check */
 1734                 pfpdu->rcv_nxt = fps;
 1735                 pfpdu->fps = fps;
 1736                 pfpdu->mode = true;
 1737                 pfpdu->max_fpdu_data = (buf->ipv4) ?
 1738                     (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
 1739                     (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
 1740                 pfpdu->pmode_count++;
 1741                 ieq->pmode_count++;
 1742                 INIT_LIST_HEAD(rxlist);
 1743                 irdma_ieq_check_first_buf(buf, fps);
 1744         }
 1745 
 1746         if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
 1747                 pfpdu->bad_seq_num++;
 1748                 ieq->bad_seq_num++;
 1749                 goto error;
 1750         }
 1751 
 1752         if (!list_empty(rxlist)) {
 1753                 tmpbuf = (struct irdma_puda_buf *)(rxlist)->next;
 1754                 while ((struct list_head *)tmpbuf != rxlist) {
 1755                         if (buf->seqnum == tmpbuf->seqnum)
 1756                                 goto error;
 1757                         if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
 1758                                 break;
 1759                         plist = &tmpbuf->list;
 1760                         tmpbuf = (struct irdma_puda_buf *)(plist)->next;
 1761                 }
 1762                 /* Insert buf before tmpbuf */
 1763                 list_add_tail(&buf->list, &tmpbuf->list);
 1764         } else {
 1765                 list_add_tail(&buf->list, rxlist);
 1766         }
 1767         pfpdu->nextseqnum = buf->seqnum + buf->datalen;
 1768         pfpdu->lastrcv_buf = buf;
 1769         if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
 1770                 irdma_ieq_create_ah(qp, buf);
 1771                 if (!pfpdu->ah)
 1772                         goto error;
 1773                 goto exit;
 1774         }
 1775         if (hw_rev == IRDMA_GEN_1)
 1776                 irdma_ieq_process_fpdus(qp, ieq);
 1777         else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
 1778                 irdma_ieq_process_fpdus(qp, ieq);
 1779 exit:
 1780         spin_unlock_irqrestore(&pfpdu->lock, flags);
 1781 
 1782         return;
 1783 
 1784 error:
 1785         irdma_puda_ret_bufpool(ieq, buf);
 1786         spin_unlock_irqrestore(&pfpdu->lock, flags);
 1787 }
 1788 
 1789 /**
 1790  * irdma_ieq_receive - received exception buffer
 1791  * @vsi: VSI of device
 1792  * @buf: exception buffer received
 1793  */
 1794 static void
 1795 irdma_ieq_receive(struct irdma_sc_vsi *vsi,
 1796                   struct irdma_puda_buf *buf)
 1797 {
 1798         struct irdma_puda_rsrc *ieq = vsi->ieq;
 1799         struct irdma_sc_qp *qp = NULL;
 1800         u32 wqe_idx = ieq->compl_rxwqe_idx;
 1801 
 1802         qp = irdma_ieq_get_qp(vsi->dev, buf);
 1803         if (!qp) {
 1804                 ieq->stats_bad_qp_id++;
 1805                 irdma_puda_ret_bufpool(ieq, buf);
 1806         } else {
 1807                 irdma_ieq_handle_exception(ieq, qp, buf);
 1808         }
 1809         /*
 1810          * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq() on which wqe_idx to start replenish rq
 1811          */
 1812         if (!ieq->rxq_invalid_cnt)
 1813                 ieq->rx_wqe_idx = wqe_idx;
 1814         ieq->rxq_invalid_cnt++;
 1815 }
 1816 
 1817 /**
 1818  * irdma_ieq_tx_compl - put back after sending completed exception buffer
 1819  * @vsi: sc VSI struct
 1820  * @sqwrid: pointer to puda buffer
 1821  */
 1822 static void
 1823 irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
 1824 {
 1825         struct irdma_puda_rsrc *ieq = vsi->ieq;
 1826         struct irdma_puda_buf *buf = sqwrid;
 1827 
 1828         irdma_puda_ret_bufpool(ieq, buf);
 1829 }
 1830 
 1831 /**
 1832  * irdma_ieq_cleanup_qp - qp is being destroyed
 1833  * @ieq: ieq resource
 1834  * @qp: all pending fpdu buffers
 1835  */
 1836 void
 1837 irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
 1838 {
 1839         struct irdma_puda_buf *buf;
 1840         struct irdma_pfpdu *pfpdu = &qp->pfpdu;
 1841         struct list_head *rxlist = &pfpdu->rxlist;
 1842 
 1843         if (qp->pfpdu.ah) {
 1844                 irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
 1845                 qp->pfpdu.ah = NULL;
 1846                 qp->pfpdu.ah_buf = NULL;
 1847         }
 1848 
 1849         if (!pfpdu->mode)
 1850                 return;
 1851 
 1852         while (!list_empty(rxlist)) {
 1853                 buf = irdma_puda_get_listbuf(rxlist);
 1854                 irdma_puda_ret_bufpool(ieq, buf);
 1855         }
 1856 }

Cache object: 9f46d2f48d1e414007c33515e9253fe1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.