The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/irdma/irdma_ctrl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
    3  *
    4  * Copyright (c) 2015 - 2022 Intel Corporation
    5  *
    6  * This software is available to you under a choice of one of two
    7  * licenses.  You may choose to be licensed under the terms of the GNU
    8  * General Public License (GPL) Version 2, available from the file
    9  * COPYING in the main directory of this source tree, or the
   10  * OpenFabrics.org BSD license below:
   11  *
   12  *   Redistribution and use in source and binary forms, with or
   13  *   without modification, are permitted provided that the following
   14  *   conditions are met:
   15  *
   16  *    - Redistributions of source code must retain the above
   17  *      copyright notice, this list of conditions and the following
   18  *      disclaimer.
   19  *
   20  *    - Redistributions in binary form must reproduce the above
   21  *      copyright notice, this list of conditions and the following
   22  *      disclaimer in the documentation and/or other materials
   23  *      provided with the distribution.
   24  *
   25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   32  * SOFTWARE.
   33  */
   34 /*$FreeBSD$*/
   35 
   36 #include "osdep.h"
   37 #include "irdma_hmc.h"
   38 #include "irdma_defs.h"
   39 #include "irdma_type.h"
   40 #include "irdma_ws.h"
   41 #include "irdma_protos.h"
   42 
   43 /**
   44  * irdma_qp_from_entry - Given entry, get to the qp structure
   45  * @entry: Points to list of qp structure
   46  */
   47 static struct irdma_sc_qp *
   48 irdma_qp_from_entry(struct list_head *entry)
   49 {
   50         if (!entry)
   51                 return NULL;
   52 
   53         return (struct irdma_sc_qp *)((char *)entry -
   54                                       offsetof(struct irdma_sc_qp, list));
   55 }
   56 
   57 /**
   58  * irdma_get_qp_from_list - get next qp from a list
   59  * @head: Listhead of qp's
   60  * @qp: current qp
   61  */
   62 struct irdma_sc_qp *
   63 irdma_get_qp_from_list(struct list_head *head,
   64                        struct irdma_sc_qp *qp)
   65 {
   66         struct list_head *lastentry;
   67         struct list_head *entry = NULL;
   68 
   69         if (list_empty(head))
   70                 return NULL;
   71 
   72         if (!qp) {
   73                 entry = (head)->next;
   74         } else {
   75                 lastentry = &qp->list;
   76                 entry = (lastentry)->next;
   77                 if (entry == head)
   78                         return NULL;
   79         }
   80 
   81         return irdma_qp_from_entry(entry);
   82 }
   83 
   84 /**
   85  * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
   86  * @vsi: the VSI struct pointer
   87  * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
   88  */
   89 void
   90 irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
   91 {
   92         struct irdma_sc_qp *qp = NULL;
   93         u8 i;
   94 
   95         for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
   96                 mutex_lock(&vsi->qos[i].qos_mutex);
   97                 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
   98                 while (qp) {
   99                         if (op == IRDMA_OP_RESUME) {
  100                                 if (!qp->dev->ws_add(vsi, i)) {
  101                                         qp->qs_handle =
  102                                             vsi->qos[qp->user_pri].qs_handle;
  103                                         irdma_cqp_qp_suspend_resume(qp, op);
  104                                 } else {
  105                                         irdma_cqp_qp_suspend_resume(qp, op);
  106                                         irdma_modify_qp_to_err(qp);
  107                                 }
  108                         } else if (op == IRDMA_OP_SUSPEND) {
  109                                 /* issue cqp suspend command */
  110                                 if (!irdma_cqp_qp_suspend_resume(qp, op))
  111                                         atomic_inc(&vsi->qp_suspend_reqs);
  112                         }
  113                         qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
  114                 }
  115                 mutex_unlock(&vsi->qos[i].qos_mutex);
  116         }
  117 }
  118 
  119 static void
  120 irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p)
  121 {
  122         u8 i;
  123 
  124         vsi->qos_rel_bw = l2p->vsi_rel_bw;
  125         vsi->qos_prio_type = l2p->vsi_prio_type;
  126         vsi->dscp_mode = l2p->dscp_mode;
  127         if (l2p->dscp_mode) {
  128                 irdma_memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
  129                 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
  130                         l2p->up2tc[i] = i;
  131         }
  132         for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
  133                 vsi->tc_print_warning[i] = true;
  134         for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
  135                 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  136                         vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
  137                 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
  138                         irdma_init_config_check(&vsi->cfg_check[i],
  139                                                 l2p->up2tc[i],
  140                                                 l2p->qs_handle_list[i]);
  141                 vsi->qos[i].traffic_class = l2p->up2tc[i];
  142                 vsi->qos[i].rel_bw =
  143                     l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
  144                 vsi->qos[i].prio_type =
  145                     l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
  146                 vsi->qos[i].valid = false;
  147         }
  148 }
  149 
  150 /**
  151  * irdma_change_l2params - given the new l2 parameters, change all qp
  152  * @vsi: RDMA VSI pointer
  153  * @l2params: New parameters from l2
  154  */
  155 void
  156 irdma_change_l2params(struct irdma_sc_vsi *vsi,
  157                       struct irdma_l2params *l2params)
  158 {
  159         if (l2params->tc_changed) {
  160                 vsi->tc_change_pending = false;
  161                 irdma_set_qos_info(vsi, l2params);
  162                 irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
  163         }
  164         if (l2params->mtu_changed) {
  165                 vsi->mtu = l2params->mtu;
  166                 if (vsi->ieq)
  167                         irdma_reinitialize_ieq(vsi);
  168         }
  169 }
  170 
  171 /**
  172  * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
  173  * @qp: qp to be removed from qos
  174  */
  175 void
  176 irdma_qp_rem_qos(struct irdma_sc_qp *qp)
  177 {
  178         struct irdma_sc_vsi *vsi = qp->vsi;
  179 
  180         irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
  181                     "DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
  182                     qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
  183         mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
  184         if (qp->on_qoslist) {
  185                 qp->on_qoslist = false;
  186                 list_del(&qp->list);
  187         }
  188         mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
  189 }
  190 
  191 /**
  192  * irdma_qp_add_qos - called during setctx for qp to be added to qos
  193  * @qp: qp to be added to qos
  194  */
  195 void
  196 irdma_qp_add_qos(struct irdma_sc_qp *qp)
  197 {
  198         struct irdma_sc_vsi *vsi = qp->vsi;
  199 
  200         irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
  201                     "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
  202                     qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
  203         mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
  204         if (!qp->on_qoslist) {
  205                 list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
  206                 qp->on_qoslist = true;
  207                 qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
  208         }
  209         mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
  210 }
  211 
  212 /**
  213  * irdma_sc_pd_init - initialize sc pd struct
  214  * @dev: sc device struct
  215  * @pd: sc pd ptr
  216  * @pd_id: pd_id for allocated pd
  217  * @abi_ver: User/Kernel ABI version
  218  */
  219 void
  220 irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
  221                  int abi_ver)
  222 {
  223         pd->pd_id = pd_id;
  224         pd->abi_ver = abi_ver;
  225         pd->dev = dev;
  226 }
  227 
  228 /**
  229  * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
  230  * @cqp: struct for cqp hw
  231  * @info: arp entry information
  232  * @scratch: u64 saved to be used during cqp completion
  233  * @post_sq: flag for cqp db to ring
  234  */
  235 static int
  236 irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
  237                              struct irdma_add_arp_cache_entry_info *info,
  238                              u64 scratch, bool post_sq)
  239 {
  240         __le64 *wqe;
  241         u64 temp, hdr;
  242 
  243         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  244         if (!wqe)
  245                 return -ENOSPC;
  246         set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max);
  247 
  248         temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
  249             LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
  250             LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
  251         set_64bit_val(wqe, IRDMA_BYTE_16, temp);
  252 
  253         hdr = info->arp_index |
  254             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
  255             FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, info->permanent) |
  256             FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, true) |
  257             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  258         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  259 
  260         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  261 
  262         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe,
  263                         IRDMA_CQP_WQE_SIZE * 8);
  264         if (post_sq)
  265                 irdma_sc_cqp_post_sq(cqp);
  266 
  267         return 0;
  268 }
  269 
  270 /**
  271  * irdma_sc_del_arp_cache_entry - dele arp cache entry
  272  * @cqp: struct for cqp hw
  273  * @scratch: u64 saved to be used during cqp completion
  274  * @arp_index: arp index to delete arp entry
  275  * @post_sq: flag for cqp db to ring
  276  */
  277 static int
  278 irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
  279                              u16 arp_index, bool post_sq)
  280 {
  281         __le64 *wqe;
  282         u64 hdr;
  283 
  284         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  285         if (!wqe)
  286                 return -ENOSPC;
  287 
  288         hdr = arp_index |
  289             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
  290             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  291         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  292 
  293         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  294 
  295         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
  296                         wqe, IRDMA_CQP_WQE_SIZE * 8);
  297         if (post_sq)
  298                 irdma_sc_cqp_post_sq(cqp);
  299 
  300         return 0;
  301 }
  302 
  303 /**
  304  * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
  305  * @cqp: struct for cqp hw
  306  * @info: info for apbvt entry to add or delete
  307  * @scratch: u64 saved to be used during cqp completion
  308  * @post_sq: flag for cqp db to ring
  309  */
  310 static int
  311 irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
  312                             struct irdma_apbvt_info *info,
  313                             u64 scratch, bool post_sq)
  314 {
  315         __le64 *wqe;
  316         u64 hdr;
  317 
  318         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  319         if (!wqe)
  320                 return -ENOSPC;
  321 
  322         set_64bit_val(wqe, IRDMA_BYTE_16, info->port);
  323 
  324         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
  325             FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
  326             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  327         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  328 
  329         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  330 
  331         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_APBVT WQE", wqe,
  332                         IRDMA_CQP_WQE_SIZE * 8);
  333         if (post_sq)
  334                 irdma_sc_cqp_post_sq(cqp);
  335 
  336         return 0;
  337 }
  338 
  339 /**
  340  * irdma_sc_manage_qhash_table_entry - manage quad hash entries
  341  * @cqp: struct for cqp hw
  342  * @info: info for quad hash to manage
  343  * @scratch: u64 saved to be used during cqp completion
  344  * @post_sq: flag for cqp db to ring
  345  *
  346  * This is called before connection establishment is started.
  347  * For passive connections, when listener is created, it will
  348  * call with entry type of  IRDMA_QHASH_TYPE_TCP_SYN with local
  349  * ip address and tcp port. When SYN is received (passive
  350  * connections) or sent (active connections), this routine is
  351  * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
  352  * and quad is passed in info.
  353  *
  354  * When iwarp connection is done and its state moves to RTS, the
  355  * quad hash entry in the hardware will point to iwarp's qp
  356  * number and requires no calls from the driver.
  357  */
  358 static int
  359 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
  360                                   struct irdma_qhash_table_info *info,
  361                                   u64 scratch, bool post_sq)
  362 {
  363         __le64 *wqe;
  364         u64 qw1 = 0;
  365         u64 qw2 = 0;
  366         u64 temp;
  367         struct irdma_sc_vsi *vsi = info->vsi;
  368 
  369         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  370         if (!wqe)
  371                 return -ENOSPC;
  372         temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
  373             LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
  374             LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
  375         set_64bit_val(wqe, IRDMA_BYTE_0, temp);
  376 
  377         qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
  378             FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
  379         if (info->ipv4_valid) {
  380                 set_64bit_val(wqe, IRDMA_BYTE_48,
  381                               FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
  382         } else {
  383                 set_64bit_val(wqe, IRDMA_BYTE_56,
  384                               FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
  385                               FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
  386 
  387                 set_64bit_val(wqe, IRDMA_BYTE_48,
  388                               FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
  389                               FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
  390         }
  391         qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
  392                          vsi->qos[info->user_pri].qs_handle);
  393         if (info->vlan_valid)
  394                 qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
  395         set_64bit_val(wqe, IRDMA_BYTE_16, qw2);
  396         if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
  397                 qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
  398                 if (!info->ipv4_valid) {
  399                         set_64bit_val(wqe, IRDMA_BYTE_40,
  400                                       FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
  401                                       FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
  402                         set_64bit_val(wqe, IRDMA_BYTE_32,
  403                                       FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
  404                                       FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
  405                 } else {
  406                         set_64bit_val(wqe, IRDMA_BYTE_32,
  407                                       FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
  408                 }
  409         }
  410 
  411         set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
  412         temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
  413             FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
  414                        IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
  415             FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
  416             FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
  417             FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
  418             FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
  419         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  420 
  421         set_64bit_val(wqe, IRDMA_BYTE_24, temp);
  422 
  423         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_QHASH WQE", wqe,
  424                         IRDMA_CQP_WQE_SIZE * 8);
  425         if (post_sq)
  426                 irdma_sc_cqp_post_sq(cqp);
  427 
  428         return 0;
  429 }
  430 
  431 /**
  432  * irdma_sc_qp_init - initialize qp
  433  * @qp: sc qp
  434  * @info: initialization qp info
  435  */
  436 int
  437 irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
  438 {
  439         int ret_code;
  440         u32 pble_obj_cnt;
  441         u16 wqe_size;
  442 
  443         if (info->qp_uk_init_info.max_sq_frag_cnt >
  444             info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
  445             info->qp_uk_init_info.max_rq_frag_cnt >
  446             info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
  447                 return -EINVAL;
  448 
  449         qp->dev = info->pd->dev;
  450         qp->vsi = info->vsi;
  451         qp->ieq_qp = info->vsi->exception_lan_q;
  452         qp->sq_pa = info->sq_pa;
  453         qp->rq_pa = info->rq_pa;
  454         qp->hw_host_ctx_pa = info->host_ctx_pa;
  455         qp->q2_pa = info->q2_pa;
  456         qp->shadow_area_pa = info->shadow_area_pa;
  457         qp->q2_buf = info->q2;
  458         qp->pd = info->pd;
  459         qp->hw_host_ctx = info->host_ctx;
  460         info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
  461         ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
  462         if (ret_code)
  463                 return ret_code;
  464 
  465         qp->virtual_map = info->virtual_map;
  466         pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  467 
  468         if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
  469             (info->virtual_map && info->rq_pa >= pble_obj_cnt))
  470                 return -EINVAL;
  471 
  472         qp->llp_stream_handle = (void *)(-1);
  473         qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
  474                                                     IRDMA_QUEUE_TYPE_SQ_RQ);
  475         irdma_debug(qp->dev, IRDMA_DEBUG_WQE,
  476                     "hw_sq_size[%04d] sq_ring.size[%04d]\n", qp->hw_sq_size,
  477                     qp->qp_uk.sq_ring.size);
  478         if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
  479                 wqe_size = IRDMA_WQE_SIZE_128;
  480         else
  481                 ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
  482                                                        &wqe_size);
  483         if (ret_code)
  484                 return ret_code;
  485 
  486         qp->hw_rq_size =
  487             irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
  488                                        (wqe_size / IRDMA_QP_WQE_MIN_SIZE),
  489                                        IRDMA_QUEUE_TYPE_SQ_RQ);
  490         irdma_debug(qp->dev, IRDMA_DEBUG_WQE,
  491                     "hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
  492                     qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
  493 
  494         qp->sq_tph_val = info->sq_tph_val;
  495         qp->rq_tph_val = info->rq_tph_val;
  496         qp->sq_tph_en = info->sq_tph_en;
  497         qp->rq_tph_en = info->rq_tph_en;
  498         qp->rcv_tph_en = info->rcv_tph_en;
  499         qp->xmit_tph_en = info->xmit_tph_en;
  500         qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
  501         qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
  502 
  503         return 0;
  504 }
  505 
  506 /**
  507  * irdma_sc_qp_create - create qp
  508  * @qp: sc qp
  509  * @info: qp create info
  510  * @scratch: u64 saved to be used during cqp completion
  511  * @post_sq: flag for cqp db to ring
  512  */
  513 int
  514 irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
  515                    u64 scratch, bool post_sq)
  516 {
  517         struct irdma_sc_cqp *cqp;
  518         __le64 *wqe;
  519         u64 hdr;
  520 
  521         cqp = qp->dev->cqp;
  522         if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
  523             qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
  524                 return -EINVAL;
  525 
  526         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  527         if (!wqe)
  528                 return -ENOSPC;
  529 
  530         set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
  531         set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
  532 
  533         hdr = qp->qp_uk.qp_id |
  534             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
  535             FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
  536             FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
  537             FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
  538             FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
  539             FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
  540             FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
  541             FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
  542             FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
  543                        info->arp_cache_idx_valid) |
  544             FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
  545             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  546 
  547         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  548 
  549         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  550 
  551         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_CREATE WQE", wqe,
  552                         IRDMA_CQP_WQE_SIZE * 8);
  553         if (post_sq)
  554                 irdma_sc_cqp_post_sq(cqp);
  555 
  556         return 0;
  557 }
  558 
  559 /**
  560  * irdma_sc_qp_modify - modify qp cqp wqe
  561  * @qp: sc qp
  562  * @info: modify qp info
  563  * @scratch: u64 saved to be used during cqp completion
  564  * @post_sq: flag for cqp db to ring
  565  */
  566 int
  567 irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
  568                    u64 scratch, bool post_sq)
  569 {
  570         __le64 *wqe;
  571         struct irdma_sc_cqp *cqp;
  572         u64 hdr;
  573         u8 term_actions = 0;
  574         u8 term_len = 0;
  575 
  576         cqp = qp->dev->cqp;
  577         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  578         if (!wqe)
  579                 return -ENOSPC;
  580 
  581         if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
  582                 if (info->dont_send_fin)
  583                         term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
  584                 if (info->dont_send_term)
  585                         term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
  586                 if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
  587                     term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
  588                         term_len = info->termlen;
  589         }
  590 
  591         set_64bit_val(wqe, IRDMA_BYTE_8,
  592                       FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
  593                       FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
  594         set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
  595         set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
  596 
  597         hdr = qp->qp_uk.qp_id |
  598             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
  599             FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
  600             FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
  601             FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
  602                        info->cached_var_valid) |
  603             FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
  604             FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
  605             FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
  606             FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
  607             FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
  608             FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
  609             FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
  610                        info->remove_hash_idx) |
  611             FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
  612             FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
  613             FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
  614                        info->arp_cache_idx_valid) |
  615             FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
  616             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  617 
  618         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  619 
  620         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  621 
  622         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_MODIFY WQE", wqe,
  623                         IRDMA_CQP_WQE_SIZE * 8);
  624         if (post_sq)
  625                 irdma_sc_cqp_post_sq(cqp);
  626 
  627         return 0;
  628 }
  629 
  630 /**
  631  * irdma_sc_qp_destroy - cqp destroy qp
  632  * @qp: sc qp
  633  * @scratch: u64 saved to be used during cqp completion
  634  * @remove_hash_idx: flag if to remove hash idx
  635  * @ignore_mw_bnd: memory window bind flag
  636  * @post_sq: flag for cqp db to ring
  637  */
  638 int
  639 irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
  640                     bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
  641 {
  642         __le64 *wqe;
  643         struct irdma_sc_cqp *cqp;
  644         u64 hdr;
  645 
  646         cqp = qp->dev->cqp;
  647         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  648         if (!wqe)
  649                 return -ENOSPC;
  650 
  651         set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
  652         set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
  653 
  654         hdr = qp->qp_uk.qp_id |
  655             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
  656             FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
  657             FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
  658             FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
  659             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  660         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  661 
  662         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  663 
  664         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_DESTROY WQE", wqe,
  665                         IRDMA_CQP_WQE_SIZE * 8);
  666         if (post_sq)
  667                 irdma_sc_cqp_post_sq(cqp);
  668 
  669         return 0;
  670 }
  671 
  672 /**
  673  * irdma_sc_get_encoded_ird_size -
  674  * @ird_size: IRD size
  675  * The ird from the connection is rounded to a supported HW setting and then encoded
  676  * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
  677  * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
  678  */
  679 static u8 irdma_sc_get_encoded_ird_size(u16 ird_size) {
  680         switch (ird_size ?
  681                 roundup_pow_of_two(2 * ird_size) : 4) {
  682         case 256:
  683                 return IRDMA_IRD_HW_SIZE_256;
  684         case 128:
  685                 return IRDMA_IRD_HW_SIZE_128;
  686         case 64:
  687         case 32:
  688                 return IRDMA_IRD_HW_SIZE_64;
  689         case 16:
  690         case 8:
  691                 return IRDMA_IRD_HW_SIZE_16;
  692         case 4:
  693         default:
  694                 break;
  695         }
  696 
  697         return IRDMA_IRD_HW_SIZE_4;
  698 }
  699 
  700 /**
  701  * irdma_sc_qp_setctx_roce - set qp's context
  702  * @qp: sc qp
  703  * @qp_ctx: context ptr
  704  * @info: ctx info
  705  */
  706 void
  707 irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
  708                         struct irdma_qp_host_ctx_info *info)
  709 {
  710         struct irdma_roce_offload_info *roce_info;
  711         struct irdma_udp_offload_info *udp;
  712         u8 push_mode_en;
  713         u32 push_idx;
  714         u64 mac;
  715 
  716         roce_info = info->roce_info;
  717         udp = info->udp_info;
  718 
  719         mac = LS_64_1(roce_info->mac_addr[5], 16) |
  720             LS_64_1(roce_info->mac_addr[4], 24) |
  721             LS_64_1(roce_info->mac_addr[3], 32) |
  722             LS_64_1(roce_info->mac_addr[2], 40) |
  723             LS_64_1(roce_info->mac_addr[1], 48) |
  724             LS_64_1(roce_info->mac_addr[0], 56);
  725 
  726         qp->user_pri = info->user_pri;
  727         if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
  728                 push_mode_en = 0;
  729                 push_idx = 0;
  730         } else {
  731                 push_mode_en = 1;
  732                 push_idx = qp->push_idx;
  733         }
  734         set_64bit_val(qp_ctx, IRDMA_BYTE_0,
  735                       FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
  736                       FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
  737                       FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
  738                       FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
  739                       FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
  740                       FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
  741                       FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
  742                       FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
  743                       FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
  744                       FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
  745                       FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
  746                       FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
  747                       FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
  748                       FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
  749         set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
  750         set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
  751         if (roce_info->dcqcn_en || roce_info->dctcp_en) {
  752                 udp->tos &= ~ECN_CODE_PT_MASK;
  753                 udp->tos |= ECN_CODE_PT_VAL;
  754         }
  755 
  756         set_64bit_val(qp_ctx, IRDMA_BYTE_24,
  757                       FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
  758                       FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
  759                       FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
  760                       FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
  761                       FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
  762         set_64bit_val(qp_ctx, IRDMA_BYTE_32,
  763                       FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
  764                       FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
  765         set_64bit_val(qp_ctx, IRDMA_BYTE_40,
  766                       FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
  767                       FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
  768         set_64bit_val(qp_ctx, IRDMA_BYTE_48,
  769                       FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
  770                       FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
  771                       FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
  772         set_64bit_val(qp_ctx, IRDMA_BYTE_56,
  773                       FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
  774                       FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
  775                       FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
  776                       FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
  777         set_64bit_val(qp_ctx, IRDMA_BYTE_64,
  778                       FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
  779                       FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
  780         set_64bit_val(qp_ctx, IRDMA_BYTE_80,
  781                       FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
  782                       FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
  783         set_64bit_val(qp_ctx, IRDMA_BYTE_88,
  784                       FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
  785         set_64bit_val(qp_ctx, IRDMA_BYTE_96,
  786                       FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
  787                       FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
  788         set_64bit_val(qp_ctx, IRDMA_BYTE_112,
  789                       FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
  790         set_64bit_val(qp_ctx, IRDMA_BYTE_128,
  791                       FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
  792                       FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
  793                       FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
  794                       FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
  795         set_64bit_val(qp_ctx, IRDMA_BYTE_136,
  796                       FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
  797                       FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
  798         set_64bit_val(qp_ctx, IRDMA_BYTE_144,
  799                       FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
  800         set_64bit_val(qp_ctx, IRDMA_BYTE_152, mac);
  801         set_64bit_val(qp_ctx, IRDMA_BYTE_160,
  802                       FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
  803                       FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
  804                       FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
  805                       FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
  806                       FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
  807                       FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
  808                       FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
  809                       FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
  810                       FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
  811                       FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
  812                       FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
  813                       FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
  814                       FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
  815         set_64bit_val(qp_ctx, IRDMA_BYTE_168,
  816                       FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
  817         set_64bit_val(qp_ctx, IRDMA_BYTE_176,
  818                       FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
  819                       FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
  820                       FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
  821         set_64bit_val(qp_ctx, IRDMA_BYTE_184,
  822                       FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
  823                       FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
  824         set_64bit_val(qp_ctx, IRDMA_BYTE_192,
  825                       FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
  826                       FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
  827         set_64bit_val(qp_ctx, IRDMA_BYTE_200,
  828                       FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
  829                       FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
  830         set_64bit_val(qp_ctx, IRDMA_BYTE_208,
  831                       FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
  832 
  833         irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX WQE", qp_ctx,
  834                         IRDMA_QP_CTX_SIZE);
  835 }
  836 
  837 /*
  838  * irdma_sc_alloc_local_mac_entry - allocate a mac entry @cqp: struct for cqp hw @scratch: u64 saved to be used during
  839  * cqp completion @post_sq: flag for cqp db to ring
  840  */
  841 static int
  842 irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
  843                                bool post_sq)
  844 {
  845         __le64 *wqe;
  846         u64 hdr;
  847 
  848         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  849         if (!wqe)
  850                 return -ENOSPC;
  851 
  852         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  853                          IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
  854             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  855 
  856         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  857 
  858         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
  859 
  860         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ALLOCATE_LOCAL_MAC WQE",
  861                         wqe, IRDMA_CQP_WQE_SIZE * 8);
  862 
  863         if (post_sq)
  864                 irdma_sc_cqp_post_sq(cqp);
  865         return 0;
  866 }
  867 
  868 /**
  869  * irdma_sc_add_local_mac_entry - add mac enry
  870  * @cqp: struct for cqp hw
  871  * @info:mac addr info
  872  * @scratch: u64 saved to be used during cqp completion
  873  * @post_sq: flag for cqp db to ring
  874  */
  875 static int
  876 irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
  877                              struct irdma_local_mac_entry_info *info,
  878                              u64 scratch, bool post_sq)
  879 {
  880         __le64 *wqe;
  881         u64 temp, header;
  882 
  883         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  884         if (!wqe)
  885                 return -ENOSPC;
  886         temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
  887             LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
  888             LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
  889 
  890         set_64bit_val(wqe, IRDMA_BYTE_32, temp);
  891 
  892         header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
  893             FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  894                        IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
  895             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  896 
  897         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  898 
  899         set_64bit_val(wqe, IRDMA_BYTE_24, header);
  900 
  901         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ADD_LOCAL_MAC WQE", wqe,
  902                         IRDMA_CQP_WQE_SIZE * 8);
  903 
  904         if (post_sq)
  905                 irdma_sc_cqp_post_sq(cqp);
  906         return 0;
  907 }
  908 
  909 /**
  910  * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
  911  * @cqp: struct for cqp hw
  912  * @scratch: u64 saved to be used during cqp completion
  913  * @entry_idx: index of mac entry
  914  * @ignore_ref_count: to force mac adde delete
  915  * @post_sq: flag for cqp db to ring
  916  */
  917 static int
  918 irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
  919                              u16 entry_idx, u8 ignore_ref_count,
  920                              bool post_sq)
  921 {
  922         __le64 *wqe;
  923         u64 header;
  924 
  925         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  926         if (!wqe)
  927                 return -ENOSPC;
  928         header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
  929             FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  930                        IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
  931             FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
  932             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
  933             FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
  934 
  935         irdma_wmb();            /* make sure WQE is written before valid bit is set */
  936 
  937         set_64bit_val(wqe, IRDMA_BYTE_24, header);
  938 
  939         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
  940                         wqe, IRDMA_CQP_WQE_SIZE * 8);
  941 
  942         if (post_sq)
  943                 irdma_sc_cqp_post_sq(cqp);
  944         return 0;
  945 }
  946 
  947 /**
  948  * irdma_sc_qp_setctx - set qp's context
  949  * @qp: sc qp
  950  * @qp_ctx: context ptr
  951  * @info: ctx info
  952  */
  953 void
  954 irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx,
  955                    struct irdma_qp_host_ctx_info *info)
  956 {
  957         struct irdma_iwarp_offload_info *iw;
  958         struct irdma_tcp_offload_info *tcp;
  959         struct irdma_sc_dev *dev;
  960         u8 push_mode_en;
  961         u32 push_idx;
  962         u64 qw0, qw3, qw7 = 0, qw16 = 0;
  963         u64 mac = 0;
  964 
  965         iw = info->iwarp_info;
  966         tcp = info->tcp_info;
  967         dev = qp->dev;
  968         if (iw->rcv_mark_en) {
  969                 qp->pfpdu.marker_len = 4;
  970                 qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
  971         }
  972         qp->user_pri = info->user_pri;
  973         if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
  974                 push_mode_en = 0;
  975                 push_idx = 0;
  976         } else {
  977                 push_mode_en = 1;
  978                 push_idx = qp->push_idx;
  979         }
  980         qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
  981             FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
  982             FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
  983             FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
  984             FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
  985             FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
  986             FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
  987 
  988         set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
  989         set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
  990 
  991         qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
  992             FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
  993         if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  994                 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
  995                                   qp->src_mac_addr_idx);
  996         set_64bit_val(qp_ctx, IRDMA_BYTE_136,
  997                       FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
  998                       FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
  999         set_64bit_val(qp_ctx, IRDMA_BYTE_168,
 1000                       FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
 1001         set_64bit_val(qp_ctx, IRDMA_BYTE_176,
 1002                       FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
 1003                       FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
 1004                       FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
 1005                       FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
 1006         if (info->iwarp_info_valid) {
 1007                 qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
 1008                     FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
 1009                     FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
 1010                     FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
 1011                     FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
 1012                     FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
 1013                     FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
 1014                                iw->err_rq_idx_valid);
 1015                 qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
 1016                 qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
 1017                     FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
 1018                 set_64bit_val(qp_ctx, IRDMA_BYTE_144,
 1019                               FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
 1020                               FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
 1021 
 1022                 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1023                         mac = LS_64_1(iw->mac_addr[5], 16) |
 1024                             LS_64_1(iw->mac_addr[4], 24) |
 1025                             LS_64_1(iw->mac_addr[3], 32) |
 1026                             LS_64_1(iw->mac_addr[2], 40) |
 1027                             LS_64_1(iw->mac_addr[1], 48) |
 1028                             LS_64_1(iw->mac_addr[0], 56);
 1029                 }
 1030 
 1031                 set_64bit_val(qp_ctx, IRDMA_BYTE_152,
 1032                               mac | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
 1033                 set_64bit_val(qp_ctx, IRDMA_BYTE_160,
 1034                               FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
 1035                               FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
 1036                               FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
 1037                               FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
 1038                               FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
 1039                               FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
 1040                               FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
 1041                               FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
 1042                               FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
 1043                               FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
 1044                               FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
 1045                               FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
 1046                               FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
 1047                               FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset) |
 1048                               FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset) |
 1049                               FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
 1050         }
 1051         if (info->tcp_info_valid) {
 1052                 qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
 1053                     FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
 1054                     FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
 1055                                tcp->insert_vlan_tag) |
 1056                     FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
 1057                     FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
 1058                     FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
 1059                     FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
 1060 
 1061                 if (iw->ecn_en || iw->dctcp_en) {
 1062                         tcp->tos &= ~ECN_CODE_PT_MASK;
 1063                         tcp->tos |= ECN_CODE_PT_VAL;
 1064                 }
 1065 
 1066                 qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
 1067                     FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
 1068                     FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
 1069                     FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
 1070                     FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
 1071                 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
 1072                         qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
 1073 
 1074                         qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
 1075                 }
 1076                 set_64bit_val(qp_ctx, IRDMA_BYTE_32,
 1077                               FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
 1078                               FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
 1079                 set_64bit_val(qp_ctx, IRDMA_BYTE_40,
 1080                               FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
 1081                               FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
 1082                 set_64bit_val(qp_ctx, IRDMA_BYTE_48,
 1083                               FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
 1084                               FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
 1085                               FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
 1086                               FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
 1087                 qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
 1088                     FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
 1089                     FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
 1090                                tcp->ignore_tcp_opt) |
 1091                     FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
 1092                                tcp->ignore_tcp_uns_opt) |
 1093                     FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
 1094                     FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
 1095                     FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
 1096                 set_64bit_val(qp_ctx, IRDMA_BYTE_72,
 1097                               FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
 1098                               FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
 1099                 set_64bit_val(qp_ctx, IRDMA_BYTE_80,
 1100                               FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
 1101                               FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
 1102                 set_64bit_val(qp_ctx, IRDMA_BYTE_88,
 1103                               FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
 1104                               FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
 1105                 set_64bit_val(qp_ctx, IRDMA_BYTE_96,
 1106                               FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
 1107                               FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
 1108                 set_64bit_val(qp_ctx, IRDMA_BYTE_104,
 1109                               FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
 1110                               FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
 1111                 set_64bit_val(qp_ctx, IRDMA_BYTE_112,
 1112                               FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
 1113                               FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
 1114                 set_64bit_val(qp_ctx, IRDMA_BYTE_120,
 1115                               FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
 1116                               FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
 1117                 qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
 1118                     FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
 1119                 set_64bit_val(qp_ctx, IRDMA_BYTE_184,
 1120                               FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
 1121                               FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
 1122                 set_64bit_val(qp_ctx, IRDMA_BYTE_192,
 1123                               FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
 1124                               FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
 1125                 set_64bit_val(qp_ctx, IRDMA_BYTE_200,
 1126                               FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
 1127                               FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
 1128                 set_64bit_val(qp_ctx, IRDMA_BYTE_208,
 1129                               FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
 1130         }
 1131 
 1132         set_64bit_val(qp_ctx, IRDMA_BYTE_0, qw0);
 1133         set_64bit_val(qp_ctx, IRDMA_BYTE_24, qw3);
 1134         set_64bit_val(qp_ctx, IRDMA_BYTE_56, qw7);
 1135         set_64bit_val(qp_ctx, IRDMA_BYTE_128, qw16);
 1136 
 1137         irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX", qp_ctx,
 1138                         IRDMA_QP_CTX_SIZE);
 1139 }
 1140 
 1141 /**
 1142  * irdma_sc_alloc_stag - mr stag alloc
 1143  * @dev: sc device struct
 1144  * @info: stag info
 1145  * @scratch: u64 saved to be used during cqp completion
 1146  * @post_sq: flag for cqp db to ring
 1147  */
 1148 static int
 1149 irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
 1150                     struct irdma_allocate_stag_info *info,
 1151                     u64 scratch, bool post_sq)
 1152 {
 1153         __le64 *wqe;
 1154         struct irdma_sc_cqp *cqp;
 1155         u64 hdr;
 1156         enum irdma_page_size page_size;
 1157 
 1158         if (!info->total_len && !info->all_memory)
 1159                 return -EINVAL;
 1160 
 1161         if (info->page_size == 0x40000000)
 1162                 page_size = IRDMA_PAGE_SIZE_1G;
 1163         else if (info->page_size == 0x200000)
 1164                 page_size = IRDMA_PAGE_SIZE_2M;
 1165         else
 1166                 page_size = IRDMA_PAGE_SIZE_4K;
 1167 
 1168         cqp = dev->cqp;
 1169         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 1170         if (!wqe)
 1171                 return -ENOSPC;
 1172 
 1173         set_64bit_val(wqe, IRDMA_BYTE_8,
 1174                       FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
 1175                       FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
 1176         set_64bit_val(wqe, IRDMA_BYTE_16,
 1177                       FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
 1178         set_64bit_val(wqe, IRDMA_BYTE_40,
 1179                       FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
 1180 
 1181         if (info->chunk_size)
 1182                 set_64bit_val(wqe, IRDMA_BYTE_48,
 1183                               FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
 1184 
 1185         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
 1186             FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
 1187             FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
 1188             FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
 1189             FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
 1190             FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
 1191             FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
 1192             FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
 1193             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 1194         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1195 
 1196         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1197 
 1198         irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "ALLOC_STAG WQE", wqe,
 1199                         IRDMA_CQP_WQE_SIZE * 8);
 1200         if (post_sq)
 1201                 irdma_sc_cqp_post_sq(cqp);
 1202 
 1203         return 0;
 1204 }
 1205 
 1206 /**
 1207  * irdma_sc_mr_reg_non_shared - non-shared mr registration
 1208  * @dev: sc device struct
 1209  * @info: mr info
 1210  * @scratch: u64 saved to be used during cqp completion
 1211  * @post_sq: flag for cqp db to ring
 1212  */
 1213 static int
 1214 irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
 1215                            struct irdma_reg_ns_stag_info *info,
 1216                            u64 scratch, bool post_sq)
 1217 {
 1218         __le64 *wqe;
 1219         u64 fbo;
 1220         struct irdma_sc_cqp *cqp;
 1221         u64 hdr;
 1222         u32 pble_obj_cnt;
 1223         bool remote_access;
 1224         u8 addr_type;
 1225         enum irdma_page_size page_size;
 1226 
 1227         if (!info->total_len && !info->all_memory)
 1228                 return -EINVAL;
 1229 
 1230         if (info->page_size == 0x40000000)
 1231                 page_size = IRDMA_PAGE_SIZE_1G;
 1232         else if (info->page_size == 0x200000)
 1233                 page_size = IRDMA_PAGE_SIZE_2M;
 1234         else if (info->page_size == 0x1000)
 1235                 page_size = IRDMA_PAGE_SIZE_4K;
 1236         else
 1237                 return -EINVAL;
 1238 
 1239         if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
 1240                                    IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
 1241                 remote_access = true;
 1242         else
 1243                 remote_access = false;
 1244 
 1245         pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
 1246         if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
 1247                 return -EINVAL;
 1248 
 1249         cqp = dev->cqp;
 1250         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 1251         if (!wqe)
 1252                 return -ENOSPC;
 1253         fbo = info->va & (info->page_size - 1);
 1254 
 1255         set_64bit_val(wqe, IRDMA_BYTE_0,
 1256                       (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
 1257                        info->va : fbo));
 1258         set_64bit_val(wqe, IRDMA_BYTE_8,
 1259                       FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
 1260                       FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
 1261         set_64bit_val(wqe, IRDMA_BYTE_16,
 1262                       FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
 1263                       FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
 1264         if (!info->chunk_size)
 1265                 set_64bit_val(wqe, IRDMA_BYTE_32, info->reg_addr_pa);
 1266         else
 1267                 set_64bit_val(wqe, IRDMA_BYTE_48,
 1268                               FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
 1269 
 1270         set_64bit_val(wqe, IRDMA_BYTE_40, info->hmc_fcn_index);
 1271 
 1272         addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
 1273         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
 1274             FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
 1275             FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
 1276             FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
 1277             FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
 1278             FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
 1279             FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
 1280             FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
 1281             FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
 1282             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 1283         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1284 
 1285         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1286 
 1287         irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MR_REG_NS WQE", wqe,
 1288                         IRDMA_CQP_WQE_SIZE * 8);
 1289         if (post_sq)
 1290                 irdma_sc_cqp_post_sq(cqp);
 1291 
 1292         return 0;
 1293 }
 1294 
 1295 /**
 1296  * irdma_sc_dealloc_stag - deallocate stag
 1297  * @dev: sc device struct
 1298  * @info: dealloc stag info
 1299  * @scratch: u64 saved to be used during cqp completion
 1300  * @post_sq: flag for cqp db to ring
 1301  */
 1302 static int
 1303 irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
 1304                       struct irdma_dealloc_stag_info *info,
 1305                       u64 scratch, bool post_sq)
 1306 {
 1307         u64 hdr;
 1308         __le64 *wqe;
 1309         struct irdma_sc_cqp *cqp;
 1310 
 1311         cqp = dev->cqp;
 1312         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 1313         if (!wqe)
 1314                 return -ENOSPC;
 1315 
 1316         set_64bit_val(wqe, IRDMA_BYTE_8,
 1317                       FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
 1318         set_64bit_val(wqe, IRDMA_BYTE_16,
 1319                       FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
 1320 
 1321         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
 1322             FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
 1323             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 1324         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1325 
 1326         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1327 
 1328         irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "DEALLOC_STAG WQE", wqe,
 1329                         IRDMA_CQP_WQE_SIZE * 8);
 1330         if (post_sq)
 1331                 irdma_sc_cqp_post_sq(cqp);
 1332 
 1333         return 0;
 1334 }
 1335 
 1336 /**
 1337  * irdma_sc_mw_alloc - mw allocate
 1338  * @dev: sc device struct
 1339  * @info: memory window allocation information
 1340  * @scratch: u64 saved to be used during cqp completion
 1341  * @post_sq: flag for cqp db to ring
 1342  */
 1343 static int
 1344 irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
 1345                   struct irdma_mw_alloc_info *info, u64 scratch,
 1346                   bool post_sq)
 1347 {
 1348         u64 hdr;
 1349         struct irdma_sc_cqp *cqp;
 1350         __le64 *wqe;
 1351 
 1352         cqp = dev->cqp;
 1353         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 1354         if (!wqe)
 1355                 return -ENOSPC;
 1356 
 1357         set_64bit_val(wqe, IRDMA_BYTE_8,
 1358                       FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
 1359         set_64bit_val(wqe, IRDMA_BYTE_16,
 1360                       FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
 1361 
 1362         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
 1363             FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
 1364             FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
 1365                        info->mw1_bind_dont_vldt_key) |
 1366             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 1367         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1368 
 1369         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1370 
 1371         irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MW_ALLOC WQE", wqe,
 1372                         IRDMA_CQP_WQE_SIZE * 8);
 1373         if (post_sq)
 1374                 irdma_sc_cqp_post_sq(cqp);
 1375 
 1376         return 0;
 1377 }
 1378 
 1379 /**
 1380  * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
 1381  * @qp: sc qp struct
 1382  * @info: fast mr info
 1383  * @post_sq: flag for cqp db to ring
 1384  */
 1385 int
 1386 irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
 1387                           struct irdma_fast_reg_stag_info *info,
 1388                           bool post_sq)
 1389 {
 1390         u64 temp, hdr;
 1391         __le64 *wqe;
 1392         u32 wqe_idx;
 1393         u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
 1394         enum irdma_page_size page_size;
 1395         struct irdma_post_sq_info sq_info = {0};
 1396 
 1397         if (info->page_size == 0x40000000)
 1398                 page_size = IRDMA_PAGE_SIZE_1G;
 1399         else if (info->page_size == 0x200000)
 1400                 page_size = IRDMA_PAGE_SIZE_2M;
 1401         else
 1402                 page_size = IRDMA_PAGE_SIZE_4K;
 1403 
 1404         sq_info.wr_id = info->wr_id;
 1405         sq_info.signaled = info->signaled;
 1406         sq_info.push_wqe = info->push_wqe;
 1407 
 1408         wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, &quanta, 0, &sq_info);
 1409         if (!wqe)
 1410                 return -ENOSPC;
 1411 
 1412         qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled;
 1413         irdma_debug(qp->dev, IRDMA_DEBUG_MR,
 1414                     "wr_id[%llxh] wqe_idx[%04d] location[%p]\n", (unsigned long long)info->wr_id,
 1415                     wqe_idx, &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
 1416 
 1417         temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
 1418             (uintptr_t)info->va : info->fbo;
 1419         set_64bit_val(wqe, IRDMA_BYTE_0, temp);
 1420 
 1421         temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
 1422                          info->first_pm_pbl_index >> 16);
 1423         set_64bit_val(wqe, IRDMA_BYTE_8,
 1424                       FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
 1425                       FIELD_PREP(IRDMAQPSQ_PBLADDR, info->reg_addr_pa >> IRDMA_HW_PAGE_SHIFT));
 1426         set_64bit_val(wqe, IRDMA_BYTE_16,
 1427                       info->total_len |
 1428                       FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
 1429 
 1430         hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
 1431             FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
 1432             FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
 1433             FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
 1434             FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
 1435             FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
 1436             FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
 1437             FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
 1438             FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
 1439             FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
 1440             FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
 1441             FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
 1442         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1443 
 1444         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1445 
 1446         irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe,
 1447                         IRDMA_QP_WQE_MIN_SIZE);
 1448         if (sq_info.push_wqe)
 1449                 irdma_qp_push_wqe(&qp->qp_uk, wqe, quanta, wqe_idx, post_sq);
 1450         else if (post_sq)
 1451                 irdma_uk_qp_post_wr(&qp->qp_uk);
 1452 
 1453         return 0;
 1454 }
 1455 
 1456 /**
 1457  * irdma_sc_gen_rts_ae - request AE generated after RTS
 1458  * @qp: sc qp struct
 1459  */
 1460 static void
 1461 irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
 1462 {
 1463         __le64 *wqe;
 1464         u64 hdr;
 1465         struct irdma_qp_uk *qp_uk;
 1466 
 1467         qp_uk = &qp->qp_uk;
 1468 
 1469         wqe = qp_uk->sq_base[1].elem;
 1470 
 1471         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
 1472             FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
 1473             FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
 1474         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1475 
 1476         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1477         irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "NOP W/LOCAL FENCE WQE", wqe,
 1478                         IRDMA_QP_WQE_MIN_SIZE);
 1479 
 1480         wqe = qp_uk->sq_base[2].elem;
 1481         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
 1482             FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
 1483         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1484 
 1485         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1486         irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe,
 1487                         IRDMA_QP_WQE_MIN_SIZE);
 1488 }
 1489 
 1490 /**
 1491  * irdma_sc_send_lsmm - send last streaming mode message
 1492  * @qp: sc qp struct
 1493  * @lsmm_buf: buffer with lsmm message
 1494  * @size: size of lsmm buffer
 1495  * @stag: stag of lsmm buffer
 1496  */
 1497 void
 1498 irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
 1499                    irdma_stag stag)
 1500 {
 1501         __le64 *wqe;
 1502         u64 hdr;
 1503         struct irdma_qp_uk *qp_uk;
 1504 
 1505         qp_uk = &qp->qp_uk;
 1506         wqe = qp_uk->sq_base->elem;
 1507 
 1508         set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
 1509         if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
 1510                 set_64bit_val(wqe, IRDMA_BYTE_8,
 1511                               FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
 1512                               FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
 1513         } else {
 1514                 set_64bit_val(wqe, IRDMA_BYTE_8,
 1515                               FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
 1516                               FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
 1517                               FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
 1518         }
 1519         set_64bit_val(wqe, IRDMA_BYTE_16, 0);
 1520 
 1521         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
 1522             FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
 1523             FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
 1524             FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
 1525         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1526 
 1527         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1528 
 1529         irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM WQE", wqe,
 1530                         IRDMA_QP_WQE_MIN_SIZE);
 1531 
 1532         if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
 1533                 irdma_sc_gen_rts_ae(qp);
 1534 }
 1535 
 1536 /**
 1537  * irdma_sc_send_lsmm_nostag - for privilege qp
 1538  * @qp: sc qp struct
 1539  * @lsmm_buf: buffer with lsmm message
 1540  * @size: size of lsmm buffer
 1541  */
 1542 void
 1543 irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
 1544 {
 1545         __le64 *wqe;
 1546         u64 hdr;
 1547         struct irdma_qp_uk *qp_uk;
 1548 
 1549         qp_uk = &qp->qp_uk;
 1550         wqe = qp_uk->sq_base->elem;
 1551 
 1552         set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
 1553 
 1554         if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
 1555                 set_64bit_val(wqe, IRDMA_BYTE_8,
 1556                               FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
 1557         else
 1558                 set_64bit_val(wqe, IRDMA_BYTE_8,
 1559                               FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
 1560                               FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
 1561         set_64bit_val(wqe, IRDMA_BYTE_16, 0);
 1562 
 1563         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
 1564             FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
 1565             FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
 1566             FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
 1567         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1568 
 1569         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1570 
 1571         irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", wqe,
 1572                         IRDMA_QP_WQE_MIN_SIZE);
 1573 }
 1574 
 1575 /**
 1576  * irdma_sc_send_rtt - send last read0 or write0
 1577  * @qp: sc qp struct
 1578  * @read: Do read0 or write0
 1579  */
 1580 void
 1581 irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
 1582 {
 1583         __le64 *wqe;
 1584         u64 hdr;
 1585         struct irdma_qp_uk *qp_uk;
 1586 
 1587         qp_uk = &qp->qp_uk;
 1588         wqe = qp_uk->sq_base->elem;
 1589 
 1590         set_64bit_val(wqe, IRDMA_BYTE_0, 0);
 1591         set_64bit_val(wqe, IRDMA_BYTE_16, 0);
 1592         if (read) {
 1593                 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
 1594                         set_64bit_val(wqe, IRDMA_BYTE_8,
 1595                                       FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
 1596                 } else {
 1597                         set_64bit_val(wqe, IRDMA_BYTE_8,
 1598                                       (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID,
 1599                                                                 qp->qp_uk.swqe_polarity));
 1600                 }
 1601                 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
 1602                     FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
 1603                     FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
 1604 
 1605         } else {
 1606                 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
 1607                         set_64bit_val(wqe, IRDMA_BYTE_8, 0);
 1608                 } else {
 1609                         set_64bit_val(wqe, IRDMA_BYTE_8,
 1610                                       FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
 1611                 }
 1612                 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
 1613                     FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
 1614         }
 1615 
 1616         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 1617 
 1618         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 1619 
 1620         irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "RTR WQE", wqe,
 1621                         IRDMA_QP_WQE_MIN_SIZE);
 1622 
 1623         if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
 1624                 irdma_sc_gen_rts_ae(qp);
 1625 }
 1626 
 1627 /**
 1628  * irdma_iwarp_opcode - determine if incoming is rdma layer
 1629  * @info: aeq info for the packet
 1630  * @pkt: packet for error
 1631  */
 1632 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt){
 1633         BE16 *mpa;
 1634         u32 opcode = 0xffffffff;
 1635 
 1636         if (info->q2_data_written) {
 1637                 mpa = (BE16 *) pkt;
 1638                 opcode = IRDMA_NTOHS(mpa[1]) & 0xf;
 1639         }
 1640 
 1641         return opcode;
 1642 }
 1643 
 1644 /**
 1645  * irdma_locate_mpa - return pointer to mpa in the pkt
 1646  * @pkt: packet with data
 1647  */
 1648 static u8 *irdma_locate_mpa(u8 *pkt) {
 1649         /* skip over ethernet header */
 1650         pkt += IRDMA_MAC_HLEN;
 1651 
 1652         /* Skip over IP and TCP headers */
 1653         pkt += 4 * (pkt[0] & 0x0f);
 1654         pkt += 4 * ((pkt[12] >> 4) & 0x0f);
 1655 
 1656         return pkt;
 1657 }
 1658 
 1659 /**
 1660  * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
 1661  * @qp: sc qp ptr for pkt
 1662  * @hdr: term hdr
 1663  * @opcode: flush opcode for termhdr
 1664  * @layer_etype: error layer + error type
 1665  * @err: error cod ein the header
 1666  */
 1667 static void
 1668 irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
 1669                        struct irdma_terminate_hdr *hdr,
 1670                        enum irdma_flush_opcode opcode,
 1671                        u8 layer_etype, u8 err)
 1672 {
 1673         qp->flush_code = opcode;
 1674         hdr->layer_etype = layer_etype;
 1675         hdr->error_code = err;
 1676 }
 1677 
 1678 /**
 1679  * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
 1680  * @pkt: ptr to mpa in offending pkt
 1681  * @hdr: term hdr
 1682  * @copy_len: offending pkt length to be copied to term hdr
 1683  * @is_tagged: DDP tagged or untagged
 1684  */
 1685 static void
 1686 irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
 1687                            int *copy_len, u8 *is_tagged)
 1688 {
 1689         u16 ddp_seg_len;
 1690 
 1691         ddp_seg_len = IRDMA_NTOHS(*(BE16 *) pkt);
 1692         if (ddp_seg_len) {
 1693                 *copy_len = 2;
 1694                 hdr->hdrct = DDP_LEN_FLAG;
 1695                 if (pkt[2] & 0x80) {
 1696                         *is_tagged = 1;
 1697                         if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
 1698                                 *copy_len += TERM_DDP_LEN_TAGGED;
 1699                                 hdr->hdrct |= DDP_HDR_FLAG;
 1700                         }
 1701                 } else {
 1702                         if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
 1703                                 *copy_len += TERM_DDP_LEN_UNTAGGED;
 1704                                 hdr->hdrct |= DDP_HDR_FLAG;
 1705                         }
 1706                         if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
 1707                             ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
 1708                                 *copy_len += TERM_RDMA_LEN;
 1709                                 hdr->hdrct |= RDMA_HDR_FLAG;
 1710                         }
 1711                 }
 1712         }
 1713 }
 1714 
 1715 /**
 1716  * irdma_bld_terminate_hdr - build terminate message header
 1717  * @qp: qp associated with received terminate AE
 1718  * @info: the struct contiaing AE information
 1719  */
 1720 static int
 1721 irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
 1722                         struct irdma_aeqe_info *info)
 1723 {
 1724         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
 1725         int copy_len = 0;
 1726         u8 is_tagged = 0;
 1727         u32 opcode;
 1728         struct irdma_terminate_hdr *termhdr;
 1729 
 1730         termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
 1731         memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
 1732 
 1733         if (info->q2_data_written) {
 1734                 pkt = irdma_locate_mpa(pkt);
 1735                 irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
 1736         }
 1737 
 1738         opcode = irdma_iwarp_opcode(info, pkt);
 1739         qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
 1740         qp->sq_flush_code = info->sq;
 1741         qp->rq_flush_code = info->rq;
 1742 
 1743         switch (info->ae_id) {
 1744         case IRDMA_AE_AMP_UNALLOCATED_STAG:
 1745                 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
 1746                 if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
 1747                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
 1748                                                (LAYER_DDP << 4) | DDP_TAGGED_BUF,
 1749                                                DDP_TAGGED_INV_STAG);
 1750                 else
 1751                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1752                                                (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
 1753                                                RDMAP_INV_STAG);
 1754                 break;
 1755         case IRDMA_AE_AMP_BOUNDS_VIOLATION:
 1756                 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
 1757                 if (info->q2_data_written)
 1758                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
 1759                                                (LAYER_DDP << 4) | DDP_TAGGED_BUF,
 1760                                                DDP_TAGGED_BOUNDS);
 1761                 else
 1762                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1763                                                (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
 1764                                                RDMAP_INV_BOUNDS);
 1765                 break;
 1766         case IRDMA_AE_AMP_BAD_PD:
 1767                 switch (opcode) {
 1768                 case IRDMA_OP_TYPE_RDMA_WRITE:
 1769                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
 1770                                                (LAYER_DDP << 4) | DDP_TAGGED_BUF,
 1771                                                DDP_TAGGED_UNASSOC_STAG);
 1772                         break;
 1773                 case IRDMA_OP_TYPE_SEND_INV:
 1774                 case IRDMA_OP_TYPE_SEND_SOL_INV:
 1775                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1776                                                (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
 1777                                                RDMAP_CANT_INV_STAG);
 1778                         break;
 1779                 default:
 1780                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1781                                                (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
 1782                                                RDMAP_UNASSOC_STAG);
 1783                 }
 1784                 break;
 1785         case IRDMA_AE_AMP_INVALID_STAG:
 1786                 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
 1787                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1788                                        (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
 1789                                        RDMAP_INV_STAG);
 1790                 break;
 1791         case IRDMA_AE_AMP_BAD_QP:
 1792                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
 1793                                        (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
 1794                                        DDP_UNTAGGED_INV_QN);
 1795                 break;
 1796         case IRDMA_AE_AMP_BAD_STAG_KEY:
 1797         case IRDMA_AE_AMP_BAD_STAG_INDEX:
 1798                 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
 1799                 switch (opcode) {
 1800                 case IRDMA_OP_TYPE_SEND_INV:
 1801                 case IRDMA_OP_TYPE_SEND_SOL_INV:
 1802                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
 1803                                                (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
 1804                                                RDMAP_CANT_INV_STAG);
 1805                         break;
 1806                 default:
 1807                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1808                                                (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
 1809                                                RDMAP_INV_STAG);
 1810                 }
 1811                 break;
 1812         case IRDMA_AE_AMP_RIGHTS_VIOLATION:
 1813         case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
 1814         case IRDMA_AE_PRIV_OPERATION_DENIED:
 1815                 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
 1816                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1817                                        (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
 1818                                        RDMAP_ACCESS);
 1819                 break;
 1820         case IRDMA_AE_AMP_TO_WRAP:
 1821                 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
 1822                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
 1823                                        (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
 1824                                        RDMAP_TO_WRAP);
 1825                 break;
 1826         case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
 1827                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
 1828                                        (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
 1829                 break;
 1830         case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
 1831                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
 1832                                        (LAYER_DDP << 4) | DDP_CATASTROPHIC,
 1833                                        DDP_CATASTROPHIC_LOCAL);
 1834                 break;
 1835         case IRDMA_AE_LCE_QP_CATASTROPHIC:
 1836         case IRDMA_AE_DDP_NO_L_BIT:
 1837                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
 1838                                        (LAYER_DDP << 4) | DDP_CATASTROPHIC,
 1839                                        DDP_CATASTROPHIC_LOCAL);
 1840                 break;
 1841         case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
 1842                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
 1843                                        (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
 1844                                        DDP_UNTAGGED_INV_MSN_RANGE);
 1845                 break;
 1846         case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
 1847                 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
 1848                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
 1849                                        (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
 1850                                        DDP_UNTAGGED_INV_TOO_LONG);
 1851                 break;
 1852         case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
 1853                 if (is_tagged)
 1854                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
 1855                                                (LAYER_DDP << 4) | DDP_TAGGED_BUF,
 1856                                                DDP_TAGGED_INV_DDP_VER);
 1857                 else
 1858                         irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
 1859                                                (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
 1860                                                DDP_UNTAGGED_INV_DDP_VER);
 1861                 break;
 1862         case IRDMA_AE_DDP_UBE_INVALID_MO:
 1863                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
 1864                                        (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
 1865                                        DDP_UNTAGGED_INV_MO);
 1866                 break;
 1867         case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
 1868                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
 1869                                        (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
 1870                                        DDP_UNTAGGED_INV_MSN_NO_BUF);
 1871                 break;
 1872         case IRDMA_AE_DDP_UBE_INVALID_QN:
 1873                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
 1874                                        (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
 1875                                        DDP_UNTAGGED_INV_QN);
 1876                 break;
 1877         case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
 1878                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
 1879                                        (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
 1880                                        RDMAP_INV_RDMAP_VER);
 1881                 break;
 1882         default:
 1883                 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
 1884                                        (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
 1885                                        RDMAP_UNSPECIFIED);
 1886                 break;
 1887         }
 1888 
 1889         if (copy_len)
 1890                 irdma_memcpy(termhdr + 1, pkt, copy_len);
 1891 
 1892         return sizeof(struct irdma_terminate_hdr) + copy_len;
 1893 }
 1894 
 1895 /**
 1896  * irdma_terminate_send_fin() - Send fin for terminate message
 1897  * @qp: qp associated with received terminate AE
 1898  */
 1899 void
 1900 irdma_terminate_send_fin(struct irdma_sc_qp *qp)
 1901 {
 1902         irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
 1903                              IRDMAQP_TERM_SEND_FIN_ONLY, 0);
 1904 }
 1905 
 1906 /**
 1907  * irdma_terminate_connection() - Bad AE and send terminate to remote QP
 1908  * @qp: qp associated with received terminate AE
 1909  * @info: the struct contiaing AE information
 1910  */
 1911 void
 1912 irdma_terminate_connection(struct irdma_sc_qp *qp,
 1913                            struct irdma_aeqe_info *info)
 1914 {
 1915         u8 termlen = 0;
 1916 
 1917         if (qp->term_flags & IRDMA_TERM_SENT)
 1918                 return;
 1919 
 1920         termlen = irdma_bld_terminate_hdr(qp, info);
 1921         irdma_terminate_start_timer(qp);
 1922         qp->term_flags |= IRDMA_TERM_SENT;
 1923         irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
 1924                              IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
 1925 }
 1926 
 1927 /**
 1928  * irdma_terminate_received - handle terminate received AE
 1929  * @qp: qp associated with received terminate AE
 1930  * @info: the struct contiaing AE information
 1931  */
 1932 void
 1933 irdma_terminate_received(struct irdma_sc_qp *qp,
 1934                          struct irdma_aeqe_info *info)
 1935 {
 1936         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
 1937         BE32 *mpa;
 1938         u8 ddp_ctl;
 1939         u8 rdma_ctl;
 1940         u16 aeq_id = 0;
 1941         struct irdma_terminate_hdr *termhdr;
 1942 
 1943         mpa = (BE32 *) irdma_locate_mpa(pkt);
 1944         if (info->q2_data_written) {
 1945                 /* did not validate the frame - do it now */
 1946                 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
 1947                 rdma_ctl = ntohl(mpa[0]) & 0xff;
 1948                 if ((ddp_ctl & 0xc0) != 0x40)
 1949                         aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
 1950                 else if ((ddp_ctl & 0x03) != 1)
 1951                         aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
 1952                 else if (ntohl(mpa[2]) != 2)
 1953                         aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
 1954                 else if (ntohl(mpa[3]) != 1)
 1955                         aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
 1956                 else if (ntohl(mpa[4]) != 0)
 1957                         aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
 1958                 else if ((rdma_ctl & 0xc0) != 0x40)
 1959                         aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
 1960 
 1961                 info->ae_id = aeq_id;
 1962                 if (info->ae_id) {
 1963                         /* Bad terminate recvd - send back a terminate */
 1964                         irdma_terminate_connection(qp, info);
 1965                         return;
 1966                 }
 1967         }
 1968 
 1969         qp->term_flags |= IRDMA_TERM_RCVD;
 1970         qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
 1971         termhdr = (struct irdma_terminate_hdr *)&mpa[5];
 1972         if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
 1973             termhdr->layer_etype == RDMAP_REMOTE_OP) {
 1974                 irdma_terminate_done(qp, 0);
 1975         } else {
 1976                 irdma_terminate_start_timer(qp);
 1977                 irdma_terminate_send_fin(qp);
 1978         }
 1979 }
 1980 
 1981 static int
 1982 irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
 1983 {
 1984         return 0;
 1985 }
 1986 
 1987 static void
 1988 irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
 1989 {
 1990         /* do nothing */
 1991 }
 1992 
 1993 static void
 1994 irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
 1995 {
 1996         /* do nothing */
 1997 }
 1998 
 1999 /**
 2000  * irdma_sc_vsi_init - Init the vsi structure
 2001  * @vsi: pointer to vsi structure to initialize
 2002  * @info: the info used to initialize the vsi struct
 2003  */
 2004 void
 2005 irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
 2006                   struct irdma_vsi_init_info *info)
 2007 {
 2008         u8 i;
 2009 
 2010         vsi->dev = info->dev;
 2011         vsi->back_vsi = info->back_vsi;
 2012         vsi->register_qset = info->register_qset;
 2013         vsi->unregister_qset = info->unregister_qset;
 2014         vsi->mtu = info->params->mtu;
 2015         vsi->exception_lan_q = info->exception_lan_q;
 2016         vsi->vsi_idx = info->pf_data_vsi_num;
 2017 
 2018         irdma_set_qos_info(vsi, info->params);
 2019         for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
 2020                 mutex_init(&vsi->qos[i].qos_mutex);
 2021                 INIT_LIST_HEAD(&vsi->qos[i].qplist);
 2022         }
 2023         if (vsi->register_qset) {
 2024                 vsi->dev->ws_add = irdma_ws_add;
 2025                 vsi->dev->ws_remove = irdma_ws_remove;
 2026                 vsi->dev->ws_reset = irdma_ws_reset;
 2027         } else {
 2028                 vsi->dev->ws_add = irdma_null_ws_add;
 2029                 vsi->dev->ws_remove = irdma_null_ws_remove;
 2030                 vsi->dev->ws_reset = irdma_null_ws_reset;
 2031         }
 2032 }
 2033 
 2034 /**
 2035  * irdma_get_stats_idx - Return stats index
 2036  * @vsi: pointer to the vsi
 2037  */
 2038 static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi){
 2039         struct irdma_stats_inst_info stats_info = {0};
 2040         struct irdma_sc_dev *dev = vsi->dev;
 2041 
 2042         if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 2043                 if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
 2044                                               &stats_info))
 2045                         return stats_info.stats_idx;
 2046         }
 2047 
 2048         return IRDMA_INVALID_STATS_IDX;
 2049 }
 2050 
 2051 /**
 2052  * irdma_vsi_stats_init - Initialize the vsi statistics
 2053  * @vsi: pointer to the vsi structure
 2054  * @info: The info structure used for initialization
 2055  */
 2056 int
 2057 irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
 2058                      struct irdma_vsi_stats_info *info)
 2059 {
 2060         struct irdma_dma_mem *stats_buff_mem;
 2061 
 2062         vsi->pestat = info->pestat;
 2063         vsi->pestat->hw = vsi->dev->hw;
 2064         vsi->pestat->vsi = vsi;
 2065 
 2066         stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
 2067         stats_buff_mem->size = IRDMA_GATHER_STATS_BUF_SIZE * 2;
 2068         stats_buff_mem->va = irdma_allocate_dma_mem(vsi->pestat->hw,
 2069                                                     stats_buff_mem,
 2070                                                     stats_buff_mem->size, 1);
 2071         if (!stats_buff_mem->va)
 2072                 return -ENOMEM;
 2073 
 2074         vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
 2075         vsi->pestat->gather_info.last_gather_stats_va =
 2076             (void *)((uintptr_t)stats_buff_mem->va +
 2077                      IRDMA_GATHER_STATS_BUF_SIZE);
 2078 
 2079         irdma_hw_stats_start_timer(vsi);
 2080 
 2081         /* when stat allocation is not required default to fcn_id. */
 2082         vsi->stats_idx = info->fcn_id;
 2083         if (info->alloc_stats_inst) {
 2084                 u8 stats_idx = irdma_get_stats_idx(vsi);
 2085 
 2086                 if (stats_idx != IRDMA_INVALID_STATS_IDX) {
 2087                         vsi->stats_inst_alloc = true;
 2088                         vsi->stats_idx = stats_idx;
 2089                         vsi->pestat->gather_info.use_stats_inst = true;
 2090                         vsi->pestat->gather_info.stats_inst_index = stats_idx;
 2091                 }
 2092         }
 2093 
 2094         return 0;
 2095 }
 2096 
 2097 /**
 2098  * irdma_vsi_stats_free - Free the vsi stats
 2099  * @vsi: pointer to the vsi structure
 2100  */
 2101 void
 2102 irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
 2103 {
 2104         struct irdma_stats_inst_info stats_info = {0};
 2105         struct irdma_sc_dev *dev = vsi->dev;
 2106 
 2107         if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 2108                 if (vsi->stats_inst_alloc) {
 2109                         stats_info.stats_idx = vsi->stats_idx;
 2110                         irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
 2111                                                  &stats_info);
 2112                 }
 2113         }
 2114 
 2115         if (!vsi->pestat)
 2116                 return;
 2117 
 2118         irdma_hw_stats_stop_timer(vsi);
 2119         irdma_free_dma_mem(vsi->pestat->hw,
 2120                            &vsi->pestat->gather_info.stats_buff_mem);
 2121 }
 2122 
 2123 /**
 2124  * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
 2125  * @wqsize: size of the wq (sq, rq) to encoded_size
 2126  * @queue_type: queue type selected for the calculation algorithm
 2127  */
 2128 u8
 2129 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
 2130 {
 2131         u8 encoded_size = 0;
 2132 
 2133         /*
 2134          * cqp sq's hw coded value starts from 1 for size of 4 while it starts from 0 for qp' wq's.
 2135          */
 2136         if (queue_type == IRDMA_QUEUE_TYPE_CQP)
 2137                 encoded_size = 1;
 2138         wqsize >>= 2;
 2139         while (wqsize >>= 1)
 2140                 encoded_size++;
 2141 
 2142         return encoded_size;
 2143 }
 2144 
 2145 /**
 2146  * irdma_sc_gather_stats - collect the statistics
 2147  * @cqp: struct for cqp hw
 2148  * @info: gather stats info structure
 2149  * @scratch: u64 saved to be used during cqp completion
 2150  */
 2151 static int
 2152 irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
 2153                       struct irdma_stats_gather_info *info,
 2154                       u64 scratch)
 2155 {
 2156         __le64 *wqe;
 2157         u64 temp;
 2158 
 2159         if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
 2160                 return -ENOSPC;
 2161 
 2162         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2163         if (!wqe)
 2164                 return -ENOSPC;
 2165 
 2166         set_64bit_val(wqe, IRDMA_BYTE_40,
 2167                       FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
 2168         set_64bit_val(wqe, IRDMA_BYTE_32, info->stats_buff_mem.pa);
 2169 
 2170         temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
 2171             FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
 2172             FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
 2173                        info->stats_inst_index) |
 2174             FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
 2175                        info->use_hmc_fcn_index) |
 2176             FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
 2177         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2178 
 2179         set_64bit_val(wqe, IRDMA_BYTE_24, temp);
 2180 
 2181         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_STATS, "GATHER_STATS WQE", wqe,
 2182                         IRDMA_CQP_WQE_SIZE * 8);
 2183 
 2184         irdma_sc_cqp_post_sq(cqp);
 2185         irdma_debug(cqp->dev, IRDMA_DEBUG_STATS,
 2186                     "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
 2187                     cqp->sq_ring.tail, cqp->sq_ring.size);
 2188 
 2189         return 0;
 2190 }
 2191 
 2192 /**
 2193  * irdma_sc_manage_stats_inst - allocate or free stats instance
 2194  * @cqp: struct for cqp hw
 2195  * @info: stats info structure
 2196  * @alloc: alloc vs. delete flag
 2197  * @scratch: u64 saved to be used during cqp completion
 2198  */
 2199 static int
 2200 irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
 2201                            struct irdma_stats_inst_info *info,
 2202                            bool alloc, u64 scratch)
 2203 {
 2204         __le64 *wqe;
 2205         u64 temp;
 2206 
 2207         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2208         if (!wqe)
 2209                 return -ENOSPC;
 2210 
 2211         set_64bit_val(wqe, IRDMA_BYTE_40,
 2212                       FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
 2213         temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
 2214             FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
 2215             FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
 2216                        info->use_hmc_fcn_index) |
 2217             FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
 2218             FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
 2219 
 2220         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2221 
 2222         set_64bit_val(wqe, IRDMA_BYTE_24, temp);
 2223 
 2224         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_STATS WQE", wqe,
 2225                         IRDMA_CQP_WQE_SIZE * 8);
 2226 
 2227         irdma_sc_cqp_post_sq(cqp);
 2228         return 0;
 2229 }
 2230 
 2231 /**
 2232  * irdma_sc_set_up_map - set the up map table
 2233  * @cqp: struct for cqp hw
 2234  * @info: User priority map info
 2235  * @scratch: u64 saved to be used during cqp completion
 2236  */
 2237 static int
 2238 irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
 2239                     struct irdma_up_info *info, u64 scratch)
 2240 {
 2241         __le64 *wqe;
 2242         u64 temp;
 2243 
 2244         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2245         if (!wqe)
 2246                 return -ENOSPC;
 2247 
 2248         temp = info->map[0] | LS_64_1(info->map[1], 8) |
 2249             LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) |
 2250             LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) |
 2251             LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56);
 2252 
 2253         set_64bit_val(wqe, IRDMA_BYTE_0, temp);
 2254         set_64bit_val(wqe, IRDMA_BYTE_40,
 2255                       FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
 2256                       FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
 2257 
 2258         temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
 2259             FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
 2260             FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
 2261                        info->use_cnp_up_override) |
 2262             FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
 2263         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2264 
 2265         set_64bit_val(wqe, IRDMA_BYTE_24, temp);
 2266 
 2267         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPMAP WQE", wqe,
 2268                         IRDMA_CQP_WQE_SIZE * 8);
 2269         irdma_sc_cqp_post_sq(cqp);
 2270 
 2271         return 0;
 2272 }
 2273 
 2274 /**
 2275  * irdma_sc_manage_ws_node - create/modify/destroy WS node
 2276  * @cqp: struct for cqp hw
 2277  * @info: node info structure
 2278  * @node_op: 0 for add 1 for modify, 2 for delete
 2279  * @scratch: u64 saved to be used during cqp completion
 2280  */
 2281 static int
 2282 irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
 2283                         struct irdma_ws_node_info *info,
 2284                         enum irdma_ws_node_op node_op, u64 scratch)
 2285 {
 2286         __le64 *wqe;
 2287         u64 temp = 0;
 2288 
 2289         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2290         if (!wqe)
 2291                 return -ENOSPC;
 2292 
 2293         set_64bit_val(wqe, IRDMA_BYTE_32,
 2294                       FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
 2295                       FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
 2296 
 2297         temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
 2298             FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
 2299             FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
 2300             FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
 2301             FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
 2302             FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
 2303             FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
 2304             FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
 2305             FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
 2306         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2307 
 2308         set_64bit_val(wqe, IRDMA_BYTE_24, temp);
 2309 
 2310         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_WS WQE", wqe,
 2311                         IRDMA_CQP_WQE_SIZE * 8);
 2312         irdma_sc_cqp_post_sq(cqp);
 2313 
 2314         return 0;
 2315 }
 2316 
 2317 /**
 2318  * irdma_sc_qp_flush_wqes - flush qp's wqe
 2319  * @qp: sc qp
 2320  * @info: dlush information
 2321  * @scratch: u64 saved to be used during cqp completion
 2322  * @post_sq: flag for cqp db to ring
 2323  */
 2324 int
 2325 irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
 2326                        struct irdma_qp_flush_info *info, u64 scratch,
 2327                        bool post_sq)
 2328 {
 2329         u64 temp = 0;
 2330         __le64 *wqe;
 2331         struct irdma_sc_cqp *cqp;
 2332         u64 hdr;
 2333         bool flush_sq = false, flush_rq = false;
 2334 
 2335         if (info->rq && !qp->flush_rq)
 2336                 flush_rq = true;
 2337         if (info->sq && !qp->flush_sq)
 2338                 flush_sq = true;
 2339         qp->flush_sq |= flush_sq;
 2340         qp->flush_rq |= flush_rq;
 2341 
 2342         if (!flush_sq && !flush_rq) {
 2343                 irdma_debug(qp->dev, IRDMA_DEBUG_CQP,
 2344                             "Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id);
 2345                 return -EALREADY;
 2346         }
 2347 
 2348         cqp = qp->pd->dev->cqp;
 2349         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2350         if (!wqe)
 2351                 return -ENOSPC;
 2352 
 2353         if (info->userflushcode) {
 2354                 if (flush_rq)
 2355                         temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
 2356                                            info->rq_minor_code) |
 2357                             FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
 2358                                        info->rq_major_code);
 2359                 if (flush_sq)
 2360                         temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
 2361                                            info->sq_minor_code) |
 2362                             FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
 2363                                        info->sq_major_code);
 2364         }
 2365         set_64bit_val(wqe, IRDMA_BYTE_16, temp);
 2366 
 2367         temp = (info->generate_ae) ?
 2368             info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
 2369                                        info->ae_src) : 0;
 2370         set_64bit_val(wqe, IRDMA_BYTE_8, temp);
 2371 
 2372         hdr = qp->qp_uk.qp_id |
 2373             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
 2374             FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
 2375             FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
 2376             FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
 2377             FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
 2378             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2379         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2380 
 2381         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2382 
 2383         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_FLUSH WQE", wqe,
 2384                         IRDMA_CQP_WQE_SIZE * 8);
 2385         if (post_sq)
 2386                 irdma_sc_cqp_post_sq(cqp);
 2387 
 2388         return 0;
 2389 }
 2390 
 2391 /**
 2392  * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
 2393  * @qp: sc qp
 2394  * @info: gen ae information
 2395  * @scratch: u64 saved to be used during cqp completion
 2396  * @post_sq: flag for cqp db to ring
 2397  */
 2398 static int
 2399 irdma_sc_gen_ae(struct irdma_sc_qp *qp,
 2400                 struct irdma_gen_ae_info *info, u64 scratch,
 2401                 bool post_sq)
 2402 {
 2403         u64 temp;
 2404         __le64 *wqe;
 2405         struct irdma_sc_cqp *cqp;
 2406         u64 hdr;
 2407 
 2408         cqp = qp->pd->dev->cqp;
 2409         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2410         if (!wqe)
 2411                 return -ENOSPC;
 2412 
 2413         temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
 2414                                           info->ae_src);
 2415         set_64bit_val(wqe, IRDMA_BYTE_8, temp);
 2416 
 2417         hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
 2418                                            IRDMA_CQP_OP_GEN_AE) |
 2419             FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
 2420             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2421         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2422 
 2423         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2424 
 2425         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "GEN_AE WQE", wqe,
 2426                         IRDMA_CQP_WQE_SIZE * 8);
 2427         if (post_sq)
 2428                 irdma_sc_cqp_post_sq(cqp);
 2429 
 2430         return 0;
 2431 }
 2432 
 2433 /*** irdma_sc_qp_upload_context - upload qp's context
 2434  * @dev: sc device struct
 2435  * @info: upload context info ptr for return
 2436  * @scratch: u64 saved to be used during cqp completion
 2437  * @post_sq: flag for cqp db to ring
 2438  */
 2439 static int
 2440 irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
 2441                            struct irdma_upload_context_info *info,
 2442                            u64 scratch, bool post_sq)
 2443 {
 2444         __le64 *wqe;
 2445         struct irdma_sc_cqp *cqp;
 2446         u64 hdr;
 2447 
 2448         cqp = dev->cqp;
 2449         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2450         if (!wqe)
 2451                 return -ENOSPC;
 2452 
 2453         set_64bit_val(wqe, IRDMA_BYTE_16, info->buf_pa);
 2454 
 2455         hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
 2456             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
 2457             FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
 2458             FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
 2459             FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
 2460             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2461         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2462 
 2463         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2464 
 2465         irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QP_UPLOAD_CTX WQE", wqe,
 2466                         IRDMA_CQP_WQE_SIZE * 8);
 2467         if (post_sq)
 2468                 irdma_sc_cqp_post_sq(cqp);
 2469 
 2470         return 0;
 2471 }
 2472 
 2473 /**
 2474  * irdma_sc_manage_push_page - Handle push page
 2475  * @cqp: struct for cqp hw
 2476  * @info: push page info
 2477  * @scratch: u64 saved to be used during cqp completion
 2478  * @post_sq: flag for cqp db to ring
 2479  */
 2480 static int
 2481 irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
 2482                           struct irdma_cqp_manage_push_page_info *info,
 2483                           u64 scratch, bool post_sq)
 2484 {
 2485         __le64 *wqe;
 2486         u64 hdr;
 2487 
 2488         if (info->free_page &&
 2489             info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
 2490                 return -EINVAL;
 2491 
 2492         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2493         if (!wqe)
 2494                 return -ENOSPC;
 2495 
 2496         set_64bit_val(wqe, IRDMA_BYTE_16, info->qs_handle);
 2497         hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
 2498             FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
 2499             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
 2500             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
 2501             FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
 2502         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2503 
 2504         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2505 
 2506         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", wqe,
 2507                         IRDMA_CQP_WQE_SIZE * 8);
 2508         if (post_sq)
 2509                 irdma_sc_cqp_post_sq(cqp);
 2510 
 2511         return 0;
 2512 }
 2513 
 2514 /**
 2515  * irdma_sc_suspend_qp - suspend qp for param change
 2516  * @cqp: struct for cqp hw
 2517  * @qp: sc qp struct
 2518  * @scratch: u64 saved to be used during cqp completion
 2519  */
 2520 static int
 2521 irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
 2522                     u64 scratch)
 2523 {
 2524         u64 hdr;
 2525         __le64 *wqe;
 2526 
 2527         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2528         if (!wqe)
 2529                 return -ENOSPC;
 2530 
 2531         hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
 2532             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
 2533             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2534         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2535 
 2536         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2537 
 2538         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SUSPEND_QP WQE", wqe,
 2539                         IRDMA_CQP_WQE_SIZE * 8);
 2540         irdma_sc_cqp_post_sq(cqp);
 2541 
 2542         return 0;
 2543 }
 2544 
 2545 /**
 2546  * irdma_sc_resume_qp - resume qp after suspend
 2547  * @cqp: struct for cqp hw
 2548  * @qp: sc qp struct
 2549  * @scratch: u64 saved to be used during cqp completion
 2550  */
 2551 static int
 2552 irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
 2553                    u64 scratch)
 2554 {
 2555         u64 hdr;
 2556         __le64 *wqe;
 2557 
 2558         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2559         if (!wqe)
 2560                 return -ENOSPC;
 2561 
 2562         set_64bit_val(wqe, IRDMA_BYTE_16,
 2563                       FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
 2564 
 2565         hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
 2566             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
 2567             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2568         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2569 
 2570         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2571 
 2572         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "RESUME_QP WQE", wqe,
 2573                         IRDMA_CQP_WQE_SIZE * 8);
 2574         irdma_sc_cqp_post_sq(cqp);
 2575 
 2576         return 0;
 2577 }
 2578 
 2579 /**
 2580  * irdma_sc_cq_ack - acknowledge completion q
 2581  * @cq: cq struct
 2582  */
 2583 static inline void
 2584 irdma_sc_cq_ack(struct irdma_sc_cq *cq)
 2585 {
 2586         db_wr32(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
 2587 }
 2588 
 2589 /**
 2590  * irdma_sc_cq_init - initialize completion q
 2591  * @cq: cq struct
 2592  * @info: cq initialization info
 2593  */
 2594 int
 2595 irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
 2596 {
 2597         int ret_code;
 2598         u32 pble_obj_cnt;
 2599 
 2600         pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
 2601         if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
 2602                 return -EINVAL;
 2603 
 2604         cq->cq_pa = info->cq_base_pa;
 2605         cq->dev = info->dev;
 2606         cq->ceq_id = info->ceq_id;
 2607         info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
 2608         info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
 2609         ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
 2610         if (ret_code)
 2611                 return ret_code;
 2612 
 2613         cq->virtual_map = info->virtual_map;
 2614         cq->pbl_chunk_size = info->pbl_chunk_size;
 2615         cq->ceqe_mask = info->ceqe_mask;
 2616         cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
 2617         cq->shadow_area_pa = info->shadow_area_pa;
 2618         cq->shadow_read_threshold = info->shadow_read_threshold;
 2619         cq->ceq_id_valid = info->ceq_id_valid;
 2620         cq->tph_en = info->tph_en;
 2621         cq->tph_val = info->tph_val;
 2622         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
 2623         cq->vsi = info->vsi;
 2624 
 2625         return 0;
 2626 }
 2627 
 2628 /**
 2629  * irdma_sc_cq_create - create completion q
 2630  * @cq: cq struct
 2631  * @scratch: u64 saved to be used during cqp completion
 2632  * @check_overflow: flag for overflow check
 2633  * @post_sq: flag for cqp db to ring
 2634  */
 2635 static int
 2636 irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
 2637                    bool check_overflow, bool post_sq)
 2638 {
 2639         __le64 *wqe;
 2640         struct irdma_sc_cqp *cqp;
 2641         u64 hdr;
 2642         struct irdma_sc_ceq *ceq;
 2643         int ret_code = 0;
 2644 
 2645         cqp = cq->dev->cqp;
 2646         if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
 2647                 return -EINVAL;
 2648 
 2649         if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
 2650                 return -EINVAL;
 2651 
 2652         ceq = cq->dev->ceq[cq->ceq_id];
 2653         if (ceq && ceq->reg_cq)
 2654                 ret_code = irdma_sc_add_cq_ctx(ceq, cq);
 2655 
 2656         if (ret_code)
 2657                 return ret_code;
 2658 
 2659         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2660         if (!wqe) {
 2661                 if (ceq && ceq->reg_cq)
 2662                         irdma_sc_remove_cq_ctx(ceq, cq);
 2663                 return -ENOSPC;
 2664         }
 2665 
 2666         set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
 2667         set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
 2668         set_64bit_val(wqe, IRDMA_BYTE_16,
 2669                       FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
 2670         set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa));
 2671         set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
 2672         set_64bit_val(wqe, IRDMA_BYTE_48,
 2673                       FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
 2674         set_64bit_val(wqe, IRDMA_BYTE_56,
 2675                       FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
 2676                       FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
 2677 
 2678         hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
 2679             FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
 2680                       IRDMA_CQPSQ_CQ_CEQID) |
 2681             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
 2682             FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
 2683             FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
 2684             FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
 2685             FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
 2686             FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
 2687             FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
 2688             FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
 2689                        cq->cq_uk.avoid_mem_cflct) |
 2690             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2691 
 2692         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2693 
 2694         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2695 
 2696         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_CREATE WQE", wqe,
 2697                         IRDMA_CQP_WQE_SIZE * 8);
 2698         if (post_sq)
 2699                 irdma_sc_cqp_post_sq(cqp);
 2700 
 2701         return 0;
 2702 }
 2703 
 2704 /**
 2705  * irdma_sc_cq_destroy - destroy completion q
 2706  * @cq: cq struct
 2707  * @scratch: u64 saved to be used during cqp completion
 2708  * @post_sq: flag for cqp db to ring
 2709  */
 2710 int
 2711 irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
 2712 {
 2713         struct irdma_sc_cqp *cqp;
 2714         __le64 *wqe;
 2715         u64 hdr;
 2716         struct irdma_sc_ceq *ceq;
 2717 
 2718         cqp = cq->dev->cqp;
 2719         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2720         if (!wqe)
 2721                 return -ENOSPC;
 2722 
 2723         ceq = cq->dev->ceq[cq->ceq_id];
 2724         if (ceq && ceq->reg_cq)
 2725                 irdma_sc_remove_cq_ctx(ceq, cq);
 2726 
 2727         set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
 2728         set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
 2729         set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
 2730         set_64bit_val(wqe, IRDMA_BYTE_48,
 2731                       (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
 2732 
 2733         hdr = cq->cq_uk.cq_id |
 2734             FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
 2735                       IRDMA_CQPSQ_CQ_CEQID) |
 2736             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
 2737             FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
 2738             FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
 2739             FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
 2740             FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
 2741             FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
 2742             FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
 2743             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2744         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2745 
 2746         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2747 
 2748         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_DESTROY WQE", wqe,
 2749                         IRDMA_CQP_WQE_SIZE * 8);
 2750         if (post_sq)
 2751                 irdma_sc_cqp_post_sq(cqp);
 2752 
 2753         return 0;
 2754 }
 2755 
 2756 /**
 2757  * irdma_sc_cq_resize - set resized cq buffer info
 2758  * @cq: resized cq
 2759  * @info: resized cq buffer info
 2760  */
 2761 void
 2762 irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
 2763 {
 2764         cq->virtual_map = info->virtual_map;
 2765         cq->cq_pa = info->cq_pa;
 2766         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
 2767         cq->pbl_chunk_size = info->pbl_chunk_size;
 2768         irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
 2769 }
 2770 
 2771 /**
 2772  * irdma_sc_cq_modify - modify a Completion Queue
 2773  * @cq: cq struct
 2774  * @info: modification info struct
 2775  * @scratch: u64 saved to be used during cqp completion
 2776  * @post_sq: flag to post to sq
 2777  */
 2778 static int
 2779 irdma_sc_cq_modify(struct irdma_sc_cq *cq,
 2780                    struct irdma_modify_cq_info *info, u64 scratch,
 2781                    bool post_sq)
 2782 {
 2783         struct irdma_sc_cqp *cqp;
 2784         __le64 *wqe;
 2785         u64 hdr;
 2786         u32 pble_obj_cnt;
 2787 
 2788         pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
 2789         if (info->cq_resize && info->virtual_map &&
 2790             info->first_pm_pbl_idx >= pble_obj_cnt)
 2791                 return -EINVAL;
 2792 
 2793         cqp = cq->dev->cqp;
 2794         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 2795         if (!wqe)
 2796                 return -ENOSPC;
 2797 
 2798         set_64bit_val(wqe, IRDMA_BYTE_0, info->cq_size);
 2799         set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
 2800         set_64bit_val(wqe, IRDMA_BYTE_16,
 2801                       FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
 2802         set_64bit_val(wqe, IRDMA_BYTE_32, info->cq_pa);
 2803         set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
 2804         set_64bit_val(wqe, IRDMA_BYTE_48, info->first_pm_pbl_idx);
 2805         set_64bit_val(wqe, IRDMA_BYTE_56,
 2806                       FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
 2807                       FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
 2808 
 2809         hdr = cq->cq_uk.cq_id |
 2810             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
 2811             FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
 2812             FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
 2813             FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
 2814             FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
 2815             FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
 2816             FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
 2817             FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
 2818                        cq->cq_uk.avoid_mem_cflct) |
 2819             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 2820         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 2821 
 2822         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 2823 
 2824         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_MODIFY WQE", wqe,
 2825                         IRDMA_CQP_WQE_SIZE * 8);
 2826         if (post_sq)
 2827                 irdma_sc_cqp_post_sq(cqp);
 2828 
 2829         return 0;
 2830 }
 2831 
 2832 /**
 2833  * irdma_check_cqp_progress - check cqp processing progress
 2834  * @timeout: timeout info struct
 2835  * @dev: sc device struct
 2836  */
 2837 void
 2838 irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout,
 2839                          struct irdma_sc_dev *dev)
 2840 {
 2841         if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
 2842                 timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
 2843                 timeout->count = 0;
 2844         } else if (timeout->compl_cqp_cmds !=
 2845                    dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]) {
 2846                 timeout->count++;
 2847         }
 2848 }
 2849 
 2850 /**
 2851  * irdma_get_cqp_reg_info - get head and tail for cqp using registers
 2852  * @cqp: struct for cqp hw
 2853  * @val: cqp tail register value
 2854  * @tail: wqtail register value
 2855  * @error: cqp processing err
 2856  */
 2857 static inline void
 2858 irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
 2859                        u32 *tail, u32 *error)
 2860 {
 2861         *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
 2862         *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
 2863         *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
 2864 }
 2865 
 2866 /**
 2867  * irdma_cqp_poll_registers - poll cqp registers
 2868  * @cqp: struct for cqp hw
 2869  * @tail: wqtail register value
 2870  * @count: how many times to try for completion
 2871  */
 2872 static int
 2873 irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
 2874                          u32 count)
 2875 {
 2876         u32 i = 0;
 2877         u32 newtail, error, val;
 2878 
 2879         while (i++ < count) {
 2880                 irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
 2881                 if (error) {
 2882                         error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
 2883                         irdma_debug(cqp->dev, IRDMA_DEBUG_CQP,
 2884                                     "CQPERRCODES error_code[x%08X]\n", error);
 2885                         return -EIO;
 2886                 }
 2887                 if (newtail != tail) {
 2888                         /* SUCCESS */
 2889                         IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
 2890                         cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
 2891                         return 0;
 2892                 }
 2893                 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
 2894         }
 2895 
 2896         return -ETIMEDOUT;
 2897 }
 2898 
 2899 /**
 2900  * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
 2901  * @dev: sc device struct
 2902  * @buf: pointer to commit buffer
 2903  * @buf_idx: buffer index
 2904  * @obj_info: object info pointer
 2905  * @rsrc_idx: indexs of memory resource
 2906  */
 2907 static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf,
 2908                                       u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
 2909                                       u32 rsrc_idx){
 2910         u64 temp;
 2911 
 2912         get_64bit_val(buf, buf_idx, &temp);
 2913 
 2914         switch (rsrc_idx) {
 2915         case IRDMA_HMC_IW_QP:
 2916                 obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
 2917                 break;
 2918         case IRDMA_HMC_IW_CQ:
 2919                 obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
 2920                 break;
 2921         case IRDMA_HMC_IW_APBVT_ENTRY:
 2922                 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
 2923                         obj_info[rsrc_idx].cnt = 1;
 2924                 else
 2925                         obj_info[rsrc_idx].cnt = 0;
 2926                 break;
 2927         default:
 2928                 obj_info[rsrc_idx].cnt = (u32)temp;
 2929                 break;
 2930         }
 2931 
 2932         obj_info[rsrc_idx].base = (u64)RS_64_1(temp, IRDMA_COMMIT_FPM_BASE_S) * 512;
 2933 
 2934         return temp;
 2935 }
 2936 
 2937 /**
 2938  * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
 2939  * @dev: pointer to dev struct
 2940  * @buf: ptr to fpm commit buffer
 2941  * @info: ptr to irdma_hmc_obj_info struct
 2942  * @sd: number of SDs for HMC objects
 2943  *
 2944  * parses fpm commit info and copy base value
 2945  * of hmc objects in hmc_info
 2946  */
 2947 static int
 2948 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
 2949                               struct irdma_hmc_obj_info *info,
 2950                               u32 *sd)
 2951 {
 2952         u64 size;
 2953         u32 i;
 2954         u64 max_base = 0;
 2955         u32 last_hmc_obj = 0;
 2956 
 2957         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_0, info,
 2958                                    IRDMA_HMC_IW_QP);
 2959         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_8, info,
 2960                                    IRDMA_HMC_IW_CQ);
 2961         /* skiping RSRVD */
 2962         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_24, info,
 2963                                    IRDMA_HMC_IW_HTE);
 2964         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_32, info,
 2965                                    IRDMA_HMC_IW_ARP);
 2966         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_40, info,
 2967                                    IRDMA_HMC_IW_APBVT_ENTRY);
 2968         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_48, info,
 2969                                    IRDMA_HMC_IW_MR);
 2970         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_56, info,
 2971                                    IRDMA_HMC_IW_XF);
 2972         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_64, info,
 2973                                    IRDMA_HMC_IW_XFFL);
 2974         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_72, info,
 2975                                    IRDMA_HMC_IW_Q1);
 2976         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_80, info,
 2977                                    IRDMA_HMC_IW_Q1FL);
 2978         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_88, info,
 2979                                    IRDMA_HMC_IW_TIMER);
 2980         irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_112, info,
 2981                                    IRDMA_HMC_IW_PBLE);
 2982         /* skipping RSVD. */
 2983         if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
 2984                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_96, info,
 2985                                            IRDMA_HMC_IW_FSIMC);
 2986                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_104, info,
 2987                                            IRDMA_HMC_IW_FSIAV);
 2988                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_128, info,
 2989                                            IRDMA_HMC_IW_RRF);
 2990                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_136, info,
 2991                                            IRDMA_HMC_IW_RRFFL);
 2992                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_144, info,
 2993                                            IRDMA_HMC_IW_HDR);
 2994                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info,
 2995                                            IRDMA_HMC_IW_MD);
 2996                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
 2997                                            IRDMA_HMC_IW_OOISC);
 2998                 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
 2999                                            IRDMA_HMC_IW_OOISCFFL);
 3000         }
 3001 
 3002         /* searching for the last object in HMC to find the size of the HMC area. */
 3003         for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
 3004                 if (info[i].base > max_base) {
 3005                         max_base = info[i].base;
 3006                         last_hmc_obj = i;
 3007                 }
 3008         }
 3009 
 3010         size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
 3011             info[last_hmc_obj].base;
 3012 
 3013         if (size & 0x1FFFFF)
 3014                 *sd = (u32)((size >> 21) + 1);  /* add 1 for remainder */
 3015         else
 3016                 *sd = (u32)(size >> 21);
 3017 
 3018         return 0;
 3019 }
 3020 
 3021 /**
 3022  * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
 3023  * @buf: ptr to fpm query buffer
 3024  * @buf_idx: index into buf
 3025  * @obj_info: ptr to irdma_hmc_obj_info struct
 3026  * @rsrc_idx: resource index into info
 3027  *
 3028  * Decode a 64 bit value from fpm query buffer into max count and size
 3029  */
 3030 static u64 irdma_sc_decode_fpm_query(__le64 * buf, u32 buf_idx,
 3031                                      struct irdma_hmc_obj_info *obj_info,
 3032                                      u32 rsrc_idx){
 3033         u64 temp;
 3034         u32 size;
 3035 
 3036         get_64bit_val(buf, buf_idx, &temp);
 3037         obj_info[rsrc_idx].max_cnt = (u32)temp;
 3038         size = (u32)RS_64_1(temp, 32);
 3039         obj_info[rsrc_idx].size = LS_64_1(1, size);
 3040 
 3041         return temp;
 3042 }
 3043 
 3044 /**
 3045  * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
 3046  * @dev: ptr to shared code device
 3047  * @buf: ptr to fpm query buffer
 3048  * @hmc_info: ptr to irdma_hmc_obj_info struct
 3049  * @hmc_fpm_misc: ptr to fpm data
 3050  *
 3051  * parses fpm query buffer and copy max_cnt and
 3052  * size value of hmc objects in hmc_info
 3053  */
 3054 static int
 3055 irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
 3056                              struct irdma_hmc_info *hmc_info,
 3057                              struct irdma_hmc_fpm_misc *hmc_fpm_misc)
 3058 {
 3059         struct irdma_hmc_obj_info *obj_info;
 3060         u64 temp;
 3061         u32 size;
 3062         u16 max_pe_sds;
 3063 
 3064         obj_info = hmc_info->hmc_obj;
 3065 
 3066         get_64bit_val(buf, IRDMA_BYTE_0, &temp);
 3067         hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
 3068         max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
 3069 
 3070         hmc_fpm_misc->max_sds = max_pe_sds;
 3071         hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
 3072         get_64bit_val(buf, 8, &temp);
 3073         obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
 3074         size = (u32)RS_64_1(temp, 32);
 3075         obj_info[IRDMA_HMC_IW_QP].size = LS_64_1(1, size);
 3076 
 3077         get_64bit_val(buf, 16, &temp);
 3078         obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
 3079         size = (u32)RS_64_1(temp, 32);
 3080         obj_info[IRDMA_HMC_IW_CQ].size = LS_64_1(1, size);
 3081 
 3082         irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
 3083         irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
 3084 
 3085         obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
 3086         obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
 3087 
 3088         irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
 3089         irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
 3090 
 3091         get_64bit_val(buf, 64, &temp);
 3092         obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
 3093         obj_info[IRDMA_HMC_IW_XFFL].size = 4;
 3094         hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
 3095         if (!hmc_fpm_misc->xf_block_size)
 3096                 return -EINVAL;
 3097 
 3098         irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
 3099         get_64bit_val(buf, 80, &temp);
 3100         obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
 3101         obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
 3102 
 3103         hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
 3104         if (!hmc_fpm_misc->q1_block_size)
 3105                 return -EINVAL;
 3106 
 3107         irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
 3108 
 3109         get_64bit_val(buf, 112, &temp);
 3110         obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
 3111         obj_info[IRDMA_HMC_IW_PBLE].size = 8;
 3112 
 3113         get_64bit_val(buf, 120, &temp);
 3114         hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
 3115         hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
 3116         hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
 3117         if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
 3118                 return 0;
 3119         irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
 3120         irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
 3121         irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
 3122 
 3123         get_64bit_val(buf, IRDMA_BYTE_136, &temp);
 3124         obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
 3125         obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
 3126         hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
 3127         if (!hmc_fpm_misc->rrf_block_size &&
 3128             obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
 3129                 return -EINVAL;
 3130 
 3131         irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
 3132         irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
 3133         irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
 3134 
 3135         get_64bit_val(buf, IRDMA_BYTE_168, &temp);
 3136         obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
 3137         obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
 3138         hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
 3139         if (!hmc_fpm_misc->ooiscf_block_size &&
 3140             obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
 3141                 return -EINVAL;
 3142 
 3143         return 0;
 3144 }
 3145 
 3146 /**
 3147  * irdma_sc_find_reg_cq - find cq ctx index
 3148  * @ceq: ceq sc structure
 3149  * @cq: cq sc structure
 3150  */
 3151 static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
 3152                                 struct irdma_sc_cq *cq){
 3153         u32 i;
 3154 
 3155         for (i = 0; i < ceq->reg_cq_size; i++) {
 3156                 if (cq == ceq->reg_cq[i])
 3157                         return i;
 3158         }
 3159 
 3160         return IRDMA_INVALID_CQ_IDX;
 3161 }
 3162 
 3163 /**
 3164  * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
 3165  * @ceq: ceq sc structure
 3166  * @cq: cq sc structure
 3167  */
 3168 int
 3169 irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
 3170 {
 3171         unsigned long flags;
 3172 
 3173         spin_lock_irqsave(&ceq->req_cq_lock, flags);
 3174 
 3175         if (ceq->reg_cq_size == ceq->elem_cnt) {
 3176                 spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
 3177                 return -ENOSPC;
 3178         }
 3179 
 3180         ceq->reg_cq[ceq->reg_cq_size++] = cq;
 3181 
 3182         spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
 3183 
 3184         return 0;
 3185 }
 3186 
 3187 /**
 3188  * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
 3189  * @ceq: ceq sc structure
 3190  * @cq: cq sc structure
 3191  */
 3192 void
 3193 irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
 3194 {
 3195         unsigned long flags;
 3196         u32 cq_ctx_idx;
 3197 
 3198         spin_lock_irqsave(&ceq->req_cq_lock, flags);
 3199         cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
 3200         if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
 3201                 goto exit;
 3202 
 3203         ceq->reg_cq_size--;
 3204         if (cq_ctx_idx != ceq->reg_cq_size)
 3205                 ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
 3206         ceq->reg_cq[ceq->reg_cq_size] = NULL;
 3207 
 3208 exit:
 3209         spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
 3210 }
 3211 
 3212 /**
 3213  * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
 3214  * @cqp: IWARP control queue pair pointer
 3215  * @info: IWARP control queue pair init info pointer
 3216  *
 3217  * Initializes the object and context buffers for a control Queue Pair.
 3218  */
 3219 int
 3220 irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
 3221                   struct irdma_cqp_init_info *info)
 3222 {
 3223         u8 hw_sq_size;
 3224 
 3225         if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
 3226             info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
 3227             ((info->sq_size & (info->sq_size - 1))))
 3228                 return -EINVAL;
 3229 
 3230         hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
 3231                                                 IRDMA_QUEUE_TYPE_CQP);
 3232         cqp->size = sizeof(*cqp);
 3233         cqp->sq_size = info->sq_size;
 3234         cqp->hw_sq_size = hw_sq_size;
 3235         cqp->sq_base = info->sq;
 3236         cqp->host_ctx = info->host_ctx;
 3237         cqp->sq_pa = info->sq_pa;
 3238         cqp->host_ctx_pa = info->host_ctx_pa;
 3239         cqp->dev = info->dev;
 3240         cqp->struct_ver = info->struct_ver;
 3241         cqp->hw_maj_ver = info->hw_maj_ver;
 3242         cqp->hw_min_ver = info->hw_min_ver;
 3243         cqp->scratch_array = info->scratch_array;
 3244         cqp->polarity = 0;
 3245         cqp->en_datacenter_tcp = info->en_datacenter_tcp;
 3246         cqp->ena_vf_count = info->ena_vf_count;
 3247         cqp->hmc_profile = info->hmc_profile;
 3248         cqp->ceqs_per_vf = info->ceqs_per_vf;
 3249         cqp->disable_packed = info->disable_packed;
 3250         cqp->rocev2_rto_policy = info->rocev2_rto_policy;
 3251         cqp->protocol_used = info->protocol_used;
 3252         irdma_memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
 3253         cqp->en_rem_endpoint_trk = info->en_rem_endpoint_trk;
 3254         info->dev->cqp = cqp;
 3255 
 3256         IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
 3257         cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
 3258         cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
 3259         /* for the cqp commands backlog. */
 3260         INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
 3261 
 3262         writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
 3263         writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
 3264         writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
 3265 
 3266         irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
 3267                     "sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04x]\n",
 3268                     cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (unsigned long long)cqp->sq_pa, cqp,
 3269                     cqp->polarity);
 3270         return 0;
 3271 }
 3272 
 3273 /**
 3274  * irdma_sc_cqp_create - create cqp during bringup
 3275  * @cqp: struct for cqp hw
 3276  * @maj_err: If error, major err number
 3277  * @min_err: If error, minor err number
 3278  */
 3279 int
 3280 irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
 3281 {
 3282         u64 temp;
 3283         u8 hw_rev;
 3284         u32 cnt = 0, p1, p2, val = 0, err_code;
 3285         int ret_code;
 3286 
 3287         hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
 3288         cqp->sdbuf.size = IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size;
 3289         cqp->sdbuf.va = irdma_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf,
 3290                                                cqp->sdbuf.size,
 3291                                                IRDMA_SD_BUF_ALIGNMENT);
 3292         if (!cqp->sdbuf.va)
 3293                 return -ENOMEM;
 3294 
 3295         spin_lock_init(&cqp->dev->cqp_lock);
 3296 
 3297         temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
 3298             FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
 3299             FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
 3300             FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
 3301         if (hw_rev >= IRDMA_GEN_2) {
 3302                 temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
 3303                                    cqp->rocev2_rto_policy) |
 3304                     FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
 3305                                cqp->protocol_used);
 3306         }
 3307 
 3308         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_0, temp);
 3309         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_8, cqp->sq_pa);
 3310 
 3311         temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
 3312             FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
 3313         if (hw_rev >= IRDMA_GEN_2)
 3314                 temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK,
 3315                                    cqp->en_rem_endpoint_trk);
 3316         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_16, temp);
 3317         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_24, (uintptr_t)cqp);
 3318         temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
 3319             FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
 3320         if (hw_rev >= IRDMA_GEN_2) {
 3321                 temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
 3322                     FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
 3323         }
 3324         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_32, temp);
 3325         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_40, 0);
 3326         temp = 0;
 3327         if (hw_rev >= IRDMA_GEN_2) {
 3328                 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
 3329                     FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
 3330                     FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
 3331         }
 3332         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_48, temp);
 3333         temp = 0;
 3334         if (hw_rev >= IRDMA_GEN_2) {
 3335                 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
 3336                     FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
 3337                     FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
 3338                     FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
 3339         }
 3340         set_64bit_val(cqp->host_ctx, IRDMA_BYTE_56, temp);
 3341         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQP_HOST_CTX WQE",
 3342                         cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8);
 3343         p1 = RS_32_1(cqp->host_ctx_pa, 32);
 3344         p2 = (u32)cqp->host_ctx_pa;
 3345 
 3346         writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
 3347         writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
 3348 
 3349         do {
 3350                 if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
 3351                         ret_code = -ETIMEDOUT;
 3352                         goto err;
 3353                 }
 3354                 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
 3355                 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
 3356         } while (!val);
 3357 
 3358         if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
 3359                 ret_code = -EOPNOTSUPP;
 3360                 goto err;
 3361         }
 3362 
 3363         cqp->process_cqp_sds = irdma_update_sds_noccq;
 3364         return 0;
 3365 
 3366 err:
 3367         spin_lock_destroy(&cqp->dev->cqp_lock);
 3368         irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
 3369         err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
 3370         *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
 3371         *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
 3372         return ret_code;
 3373 }
 3374 
 3375 /**
 3376  * irdma_sc_cqp_post_sq - post of cqp's sq
 3377  * @cqp: struct for cqp hw
 3378  */
 3379 void
 3380 irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
 3381 {
 3382         db_wr32(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
 3383 
 3384         irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
 3385                     "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
 3386                     cqp->sq_ring.tail, cqp->sq_ring.size);
 3387 }
 3388 
 3389 /**
 3390  * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
 3391  * and pass back index
 3392  * @cqp: CQP HW structure
 3393  * @scratch: private data for CQP WQE
 3394  * @wqe_idx: WQE index of CQP SQ
 3395  */
 3396 __le64 *
 3397 irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
 3398                                    u32 *wqe_idx)
 3399 {
 3400         __le64 *wqe = NULL;
 3401         int ret_code;
 3402 
 3403         if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
 3404                 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
 3405                             "CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
 3406                             cqp->sq_ring.head, cqp->sq_ring.tail,
 3407                             cqp->sq_ring.size);
 3408                 return NULL;
 3409         }
 3410         IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
 3411         if (ret_code)
 3412                 return NULL;
 3413 
 3414         cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
 3415         if (!*wqe_idx)
 3416                 cqp->polarity = !cqp->polarity;
 3417         wqe = cqp->sq_base[*wqe_idx].elem;
 3418         cqp->scratch_array[*wqe_idx] = scratch;
 3419 
 3420         memset(&wqe[0], 0, 24);
 3421         memset(&wqe[4], 0, 32);
 3422 
 3423         return wqe;
 3424 }
 3425 
 3426 /**
 3427  * irdma_sc_cqp_destroy - destroy cqp during close
 3428  * @cqp: struct for cqp hw
 3429  * @free_hwcqp: true for regular cqp destroy; false for reset path
 3430  */
 3431 int
 3432 irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp)
 3433 {
 3434         u32 cnt = 0, val;
 3435         int ret_code = 0;
 3436 
 3437         if (free_hwcqp) {
 3438                 writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
 3439                 writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
 3440                 do {
 3441                         if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
 3442                                 ret_code = -ETIMEDOUT;
 3443                                 break;
 3444                         }
 3445                         irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
 3446                         val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
 3447                 } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
 3448         }
 3449         irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
 3450         spin_lock_destroy(&cqp->dev->cqp_lock);
 3451         return ret_code;
 3452 }
 3453 
 3454 /**
 3455  * irdma_sc_ccq_arm - enable intr for control cq
 3456  * @ccq: ccq sc struct
 3457  */
 3458 void
 3459 irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
 3460 {
 3461         u64 temp_val;
 3462         u16 sw_cq_sel;
 3463         u8 arm_next_se;
 3464         u8 arm_seq_num;
 3465 
 3466         get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val);
 3467         sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
 3468         arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
 3469         arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
 3470         arm_seq_num++;
 3471         temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
 3472             FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
 3473             FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
 3474             FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
 3475         set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val);
 3476 
 3477         irdma_wmb();            /* make sure shadow area is updated before arming */
 3478 
 3479         db_wr32(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
 3480 }
 3481 
 3482 /**
 3483  * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
 3484  * @ccq: ccq sc struct
 3485  * @info: completion q entry to return
 3486  */
 3487 int
 3488 irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
 3489                           struct irdma_ccq_cqe_info *info)
 3490 {
 3491         u64 qp_ctx, temp, temp1;
 3492         __le64 *cqe;
 3493         struct irdma_sc_cqp *cqp;
 3494         u32 wqe_idx;
 3495         u32 error;
 3496         u8 polarity;
 3497         int ret_code = 0;
 3498 
 3499         if (ccq->cq_uk.avoid_mem_cflct)
 3500                 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
 3501         else
 3502                 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
 3503 
 3504         get_64bit_val(cqe, IRDMA_BYTE_24, &temp);
 3505         polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
 3506         if (polarity != ccq->cq_uk.polarity)
 3507                 return -ENOENT;
 3508 
 3509         get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx);
 3510         cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx;
 3511         info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
 3512         info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
 3513         info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
 3514         if (info->error) {
 3515                 info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
 3516                 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
 3517                 irdma_debug(cqp->dev, IRDMA_DEBUG_CQP,
 3518                             "CQPERRCODES error_code[x%08X]\n", error);
 3519         }
 3520 
 3521         wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
 3522         info->scratch = cqp->scratch_array[wqe_idx];
 3523 
 3524         get_64bit_val(cqe, IRDMA_BYTE_16, &temp1);
 3525         info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
 3526 
 3527         get_64bit_val(cqp->sq_base[wqe_idx].elem, IRDMA_BYTE_24, &temp1);
 3528         info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
 3529         info->cqp = cqp;
 3530 
 3531         /* move the head for cq */
 3532         IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
 3533         if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
 3534                 ccq->cq_uk.polarity ^= 1;
 3535 
 3536         /* update cq tail in cq shadow memory also */
 3537         IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
 3538         set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_0,
 3539                       IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
 3540 
 3541         irdma_wmb();            /* make sure shadow area is updated before moving tail */
 3542 
 3543         IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
 3544         ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
 3545 
 3546         return ret_code;
 3547 }
 3548 
 3549 /**
 3550  * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
 3551  * @cqp: struct for cqp hw
 3552  * @op_code: cqp opcode for completion
 3553  * @compl_info: completion q entry to return
 3554  */
 3555 int
 3556 irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
 3557                               struct irdma_ccq_cqe_info *compl_info)
 3558 {
 3559         struct irdma_ccq_cqe_info info = {0};
 3560         struct irdma_sc_cq *ccq;
 3561         int ret_code = 0;
 3562         u32 cnt = 0;
 3563 
 3564         ccq = cqp->dev->ccq;
 3565         while (1) {
 3566                 if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
 3567                         return -ETIMEDOUT;
 3568 
 3569                 if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
 3570                         irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
 3571                         continue;
 3572                 }
 3573                 if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
 3574                         ret_code = -EIO;
 3575                         break;
 3576                 }
 3577                 /* make sure op code matches */
 3578                 if (op_code == info.op_code)
 3579                         break;
 3580                 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
 3581                             "opcode mismatch for my op code 0x%x, returned opcode %x\n",
 3582                             op_code, info.op_code);
 3583         }
 3584 
 3585         if (compl_info)
 3586                 irdma_memcpy(compl_info, &info, sizeof(*compl_info));
 3587 
 3588         return ret_code;
 3589 }
 3590 
 3591 /**
 3592  * irdma_sc_manage_hmc_pm_func_table - manage of function table
 3593  * @cqp: struct for cqp hw
 3594  * @scratch: u64 saved to be used during cqp completion
 3595  * @info: info for the manage function table operation
 3596  * @post_sq: flag for cqp db to ring
 3597  */
 3598 static int
 3599 irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
 3600                                   struct irdma_hmc_fcn_info *info,
 3601                                   u64 scratch, bool post_sq)
 3602 {
 3603         __le64 *wqe;
 3604         u64 hdr;
 3605 
 3606         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 3607         if (!wqe)
 3608                 return -ENOSPC;
 3609 
 3610         hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
 3611             FIELD_PREP(IRDMA_CQPSQ_OPCODE,
 3612                        IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
 3613             FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
 3614             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 3615         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 3616 
 3617         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 3618 
 3619         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE,
 3620                         "MANAGE_HMC_PM_FUNC_TABLE WQE", wqe,
 3621                         IRDMA_CQP_WQE_SIZE * 8);
 3622         if (post_sq)
 3623                 irdma_sc_cqp_post_sq(cqp);
 3624 
 3625         return 0;
 3626 }
 3627 
 3628 /**
 3629  * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
 3630  * for fpm commit
 3631  * @cqp: struct for cqp hw
 3632  */
 3633 static int
 3634 irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
 3635 {
 3636         return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
 3637                                              NULL);
 3638 }
 3639 
 3640 /**
 3641  * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
 3642  * @cqp: struct for cqp hw
 3643  * @scratch: u64 saved to be used during cqp completion
 3644  * @hmc_fn_id: hmc function id
 3645  * @commit_fpm_mem: Memory for fpm values
 3646  * @post_sq: flag for cqp db to ring
 3647  * @wait_type: poll ccq or cqp registers for cqp completion
 3648  */
 3649 static int
 3650 irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
 3651                         u16 hmc_fn_id,
 3652                         struct irdma_dma_mem *commit_fpm_mem,
 3653                         bool post_sq, u8 wait_type)
 3654 {
 3655         __le64 *wqe;
 3656         u64 hdr;
 3657         u32 tail, val, error;
 3658         int ret_code = 0;
 3659 
 3660         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 3661         if (!wqe)
 3662                 return -ENOSPC;
 3663 
 3664         set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
 3665         set_64bit_val(wqe, IRDMA_BYTE_32, commit_fpm_mem->pa);
 3666 
 3667         hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
 3668             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
 3669             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 3670 
 3671         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 3672 
 3673         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 3674 
 3675         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "COMMIT_FPM_VAL WQE", wqe,
 3676                         IRDMA_CQP_WQE_SIZE * 8);
 3677         irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
 3678 
 3679         if (post_sq) {
 3680                 irdma_sc_cqp_post_sq(cqp);
 3681                 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
 3682                         ret_code = irdma_cqp_poll_registers(cqp, tail,
 3683                                                             cqp->dev->hw_attrs.max_done_count);
 3684                 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
 3685                         ret_code = irdma_sc_commit_fpm_val_done(cqp);
 3686         }
 3687 
 3688         return ret_code;
 3689 }
 3690 
 3691 /**
 3692  * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
 3693  * query fpm
 3694  * @cqp: struct for cqp hw
 3695  */
 3696 static int
 3697 irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
 3698 {
 3699         return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
 3700                                              NULL);
 3701 }
 3702 
 3703 /**
 3704  * irdma_sc_query_fpm_val - cqp wqe query fpm values
 3705  * @cqp: struct for cqp hw
 3706  * @scratch: u64 saved to be used during cqp completion
 3707  * @hmc_fn_id: hmc function id
 3708  * @query_fpm_mem: memory for return fpm values
 3709  * @post_sq: flag for cqp db to ring
 3710  * @wait_type: poll ccq or cqp registers for cqp completion
 3711  */
 3712 static int
 3713 irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
 3714                        u16 hmc_fn_id,
 3715                        struct irdma_dma_mem *query_fpm_mem,
 3716                        bool post_sq, u8 wait_type)
 3717 {
 3718         __le64 *wqe;
 3719         u64 hdr;
 3720         u32 tail, val, error;
 3721         int ret_code = 0;
 3722 
 3723         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 3724         if (!wqe)
 3725                 return -ENOSPC;
 3726 
 3727         set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
 3728         set_64bit_val(wqe, IRDMA_BYTE_32, query_fpm_mem->pa);
 3729 
 3730         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
 3731             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 3732         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 3733 
 3734         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 3735 
 3736         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY_FPM WQE", wqe,
 3737                         IRDMA_CQP_WQE_SIZE * 8);
 3738         irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
 3739 
 3740         if (post_sq) {
 3741                 irdma_sc_cqp_post_sq(cqp);
 3742                 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
 3743                         ret_code = irdma_cqp_poll_registers(cqp, tail,
 3744                                                             cqp->dev->hw_attrs.max_done_count);
 3745                 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
 3746                         ret_code = irdma_sc_query_fpm_val_done(cqp);
 3747         }
 3748 
 3749         return ret_code;
 3750 }
 3751 
 3752 /**
 3753  * irdma_sc_ceq_init - initialize ceq
 3754  * @ceq: ceq sc structure
 3755  * @info: ceq initialization info
 3756  */
 3757 int
 3758 irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
 3759                   struct irdma_ceq_init_info *info)
 3760 {
 3761         u32 pble_obj_cnt;
 3762 
 3763         if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
 3764             info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
 3765                 return -EINVAL;
 3766 
 3767         if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
 3768                 return -EINVAL;
 3769         pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
 3770 
 3771         if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
 3772                 return -EINVAL;
 3773 
 3774         ceq->size = sizeof(*ceq);
 3775         ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
 3776         ceq->ceq_id = info->ceq_id;
 3777         ceq->dev = info->dev;
 3778         ceq->elem_cnt = info->elem_cnt;
 3779         ceq->ceq_elem_pa = info->ceqe_pa;
 3780         ceq->virtual_map = info->virtual_map;
 3781         ceq->itr_no_expire = info->itr_no_expire;
 3782         ceq->reg_cq = info->reg_cq;
 3783         ceq->reg_cq_size = 0;
 3784         spin_lock_init(&ceq->req_cq_lock);
 3785         ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
 3786         ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
 3787         ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
 3788         ceq->tph_en = info->tph_en;
 3789         ceq->tph_val = info->tph_val;
 3790         ceq->vsi = info->vsi;
 3791         ceq->polarity = 1;
 3792         IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
 3793         ceq->dev->ceq[info->ceq_id] = ceq;
 3794 
 3795         return 0;
 3796 }
 3797 
 3798 /**
 3799  * irdma_sc_ceq_create - create ceq wqe
 3800  * @ceq: ceq sc structure
 3801  * @scratch: u64 saved to be used during cqp completion
 3802  * @post_sq: flag for cqp db to ring
 3803  */
 3804 static int
 3805 irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
 3806                     bool post_sq)
 3807 {
 3808         struct irdma_sc_cqp *cqp;
 3809         __le64 *wqe;
 3810         u64 hdr;
 3811 
 3812         cqp = ceq->dev->cqp;
 3813         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 3814         if (!wqe)
 3815                 return -ENOSPC;
 3816         set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt);
 3817         set_64bit_val(wqe, IRDMA_BYTE_32,
 3818                       (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
 3819         set_64bit_val(wqe, IRDMA_BYTE_48,
 3820                       (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
 3821         set_64bit_val(wqe, IRDMA_BYTE_56,
 3822                       FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
 3823                       FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
 3824         hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
 3825             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
 3826             FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
 3827             FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
 3828             FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
 3829             FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
 3830             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 3831         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 3832 
 3833         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 3834 
 3835         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_CREATE WQE", wqe,
 3836                         IRDMA_CQP_WQE_SIZE * 8);
 3837         if (post_sq)
 3838                 irdma_sc_cqp_post_sq(cqp);
 3839 
 3840         return 0;
 3841 }
 3842 
 3843 /**
 3844  * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
 3845  * @ceq: ceq sc structure
 3846  */
 3847 static int
 3848 irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
 3849 {
 3850         struct irdma_sc_cqp *cqp;
 3851 
 3852         cqp = ceq->dev->cqp;
 3853         return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
 3854                                              NULL);
 3855 }
 3856 
 3857 /**
 3858  * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
 3859  * @ceq: ceq sc structure
 3860  */
 3861 int
 3862 irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
 3863 {
 3864         struct irdma_sc_cqp *cqp;
 3865 
 3866         if (ceq->reg_cq)
 3867                 irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
 3868 
 3869         cqp = ceq->dev->cqp;
 3870         cqp->process_cqp_sds = irdma_update_sds_noccq;
 3871 
 3872         return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
 3873                                              NULL);
 3874 }
 3875 
 3876 /**
 3877  * irdma_sc_cceq_create - create cceq
 3878  * @ceq: ceq sc structure
 3879  * @scratch: u64 saved to be used during cqp completion
 3880  */
 3881 int
 3882 irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
 3883 {
 3884         int ret_code;
 3885         struct irdma_sc_dev *dev = ceq->dev;
 3886 
 3887         dev->ccq->vsi = ceq->vsi;
 3888         if (ceq->reg_cq) {
 3889                 ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
 3890                 if (ret_code)
 3891                         return ret_code;
 3892         }
 3893 
 3894         ret_code = irdma_sc_ceq_create(ceq, scratch, true);
 3895         if (!ret_code)
 3896                 return irdma_sc_cceq_create_done(ceq);
 3897 
 3898         return ret_code;
 3899 }
 3900 
 3901 /**
 3902  * irdma_sc_ceq_destroy - destroy ceq
 3903  * @ceq: ceq sc structure
 3904  * @scratch: u64 saved to be used during cqp completion
 3905  * @post_sq: flag for cqp db to ring
 3906  */
 3907 int
 3908 irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
 3909 {
 3910         struct irdma_sc_cqp *cqp;
 3911         __le64 *wqe;
 3912         u64 hdr;
 3913 
 3914         cqp = ceq->dev->cqp;
 3915         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 3916         if (!wqe)
 3917                 return -ENOSPC;
 3918 
 3919         set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt);
 3920         set_64bit_val(wqe, IRDMA_BYTE_48, ceq->first_pm_pbl_idx);
 3921         hdr = ceq->ceq_id |
 3922             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
 3923             FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
 3924             FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
 3925             FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
 3926             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 3927         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 3928 
 3929         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 3930 
 3931         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_DESTROY WQE", wqe,
 3932                         IRDMA_CQP_WQE_SIZE * 8);
 3933         ceq->dev->ceq[ceq->ceq_id] = NULL;
 3934         if (post_sq)
 3935                 irdma_sc_cqp_post_sq(cqp);
 3936 
 3937         return 0;
 3938 }
 3939 
 3940 /**
 3941  * irdma_sc_process_ceq - process ceq
 3942  * @dev: sc device struct
 3943  * @ceq: ceq sc structure
 3944  *
 3945  * It is expected caller serializes this function with cleanup_ceqes()
 3946  * because these functions manipulate the same ceq
 3947  */
 3948 void *
 3949 irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
 3950 {
 3951         u64 temp;
 3952         __le64 *ceqe;
 3953         struct irdma_sc_cq *cq = NULL;
 3954         struct irdma_sc_cq *temp_cq;
 3955         u8 polarity;
 3956         u32 cq_idx;
 3957         unsigned long flags;
 3958 
 3959         do {
 3960                 cq_idx = 0;
 3961                 ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
 3962                 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
 3963                 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
 3964                 if (polarity != ceq->polarity)
 3965                         return NULL;
 3966 
 3967                 temp_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1);
 3968                 if (!temp_cq) {
 3969                         cq_idx = IRDMA_INVALID_CQ_IDX;
 3970                         IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
 3971 
 3972                         if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
 3973                                 ceq->polarity ^= 1;
 3974                         continue;
 3975                 }
 3976 
 3977                 cq = temp_cq;
 3978                 if (ceq->reg_cq) {
 3979                         spin_lock_irqsave(&ceq->req_cq_lock, flags);
 3980                         cq_idx = irdma_sc_find_reg_cq(ceq, cq);
 3981                         spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
 3982                 }
 3983 
 3984                 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
 3985                 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
 3986                         ceq->polarity ^= 1;
 3987         } while (cq_idx == IRDMA_INVALID_CQ_IDX);
 3988 
 3989         if (cq) {
 3990                 cq->cq_uk.armed = false;
 3991                 irdma_sc_cq_ack(cq);
 3992         }
 3993         return cq;
 3994 }
 3995 
 3996 /**
 3997  * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
 3998  * @cq: cq for which the ceqes need to be cleaned up
 3999  * @ceq: ceq ptr
 4000  *
 4001  * The function is called after the cq is destroyed to cleanup
 4002  * its pending ceqe entries. It is expected caller serializes this
 4003  * function with process_ceq() in interrupt context.
 4004  */
 4005 void
 4006 irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
 4007 {
 4008         struct irdma_sc_cq *next_cq;
 4009         u8 ceq_polarity = ceq->polarity;
 4010         __le64 *ceqe;
 4011         u8 polarity;
 4012         u64 temp;
 4013         int next;
 4014         u32 i;
 4015 
 4016         next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
 4017 
 4018         for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
 4019                 ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
 4020 
 4021                 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
 4022                 polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
 4023                 if (polarity != ceq_polarity)
 4024                         return;
 4025 
 4026                 next_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1);
 4027                 if (cq == next_cq)
 4028                         set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID);
 4029 
 4030                 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
 4031                 if (!next)
 4032                         ceq_polarity ^= 1;
 4033         }
 4034 }
 4035 
 4036 /**
 4037  * irdma_sc_aeq_init - initialize aeq
 4038  * @aeq: aeq structure ptr
 4039  * @info: aeq initialization info
 4040  */
 4041 int
 4042 irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
 4043                   struct irdma_aeq_init_info *info)
 4044 {
 4045         u32 pble_obj_cnt;
 4046 
 4047         if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
 4048             info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
 4049                 return -EINVAL;
 4050 
 4051         pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
 4052 
 4053         if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
 4054                 return -EINVAL;
 4055 
 4056         aeq->size = sizeof(*aeq);
 4057         aeq->polarity = 1;
 4058         aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
 4059         aeq->dev = info->dev;
 4060         aeq->elem_cnt = info->elem_cnt;
 4061         aeq->aeq_elem_pa = info->aeq_elem_pa;
 4062         IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
 4063         aeq->virtual_map = info->virtual_map;
 4064         aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
 4065         aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
 4066         aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
 4067         aeq->msix_idx = info->msix_idx;
 4068         info->dev->aeq = aeq;
 4069 
 4070         return 0;
 4071 }
 4072 
 4073 /**
 4074  * irdma_sc_aeq_create - create aeq
 4075  * @aeq: aeq structure ptr
 4076  * @scratch: u64 saved to be used during cqp completion
 4077  * @post_sq: flag for cqp db to ring
 4078  */
 4079 static int
 4080 irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
 4081                     bool post_sq)
 4082 {
 4083         __le64 *wqe;
 4084         struct irdma_sc_cqp *cqp;
 4085         u64 hdr;
 4086 
 4087         cqp = aeq->dev->cqp;
 4088         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 4089         if (!wqe)
 4090                 return -ENOSPC;
 4091         set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt);
 4092         set_64bit_val(wqe, IRDMA_BYTE_32,
 4093                       (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
 4094         set_64bit_val(wqe, IRDMA_BYTE_48,
 4095                       (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
 4096 
 4097         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
 4098             FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
 4099             FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
 4100             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 4101         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 4102 
 4103         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 4104 
 4105         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "AEQ_CREATE WQE", wqe,
 4106                         IRDMA_CQP_WQE_SIZE * 8);
 4107         if (post_sq)
 4108                 irdma_sc_cqp_post_sq(cqp);
 4109 
 4110         return 0;
 4111 }
 4112 
 4113 /**
 4114  * irdma_sc_aeq_destroy - destroy aeq during close
 4115  * @aeq: aeq structure ptr
 4116  * @scratch: u64 saved to be used during cqp completion
 4117  * @post_sq: flag for cqp db to ring
 4118  */
 4119 int
 4120 irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq)
 4121 {
 4122         __le64 *wqe;
 4123         struct irdma_sc_cqp *cqp;
 4124         struct irdma_sc_dev *dev;
 4125         u64 hdr;
 4126 
 4127         dev = aeq->dev;
 4128         writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
 4129 
 4130         cqp = dev->cqp;
 4131         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 4132         if (!wqe)
 4133                 return -ENOSPC;
 4134         set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt);
 4135         set_64bit_val(wqe, IRDMA_BYTE_48, aeq->first_pm_pbl_idx);
 4136         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
 4137             FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
 4138             FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
 4139             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 4140         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 4141 
 4142         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 4143 
 4144         irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "AEQ_DESTROY WQE", wqe,
 4145                         IRDMA_CQP_WQE_SIZE * 8);
 4146         if (post_sq)
 4147                 irdma_sc_cqp_post_sq(cqp);
 4148         return 0;
 4149 }
 4150 
 4151 /**
 4152  * irdma_sc_get_next_aeqe - get next aeq entry
 4153  * @aeq: aeq structure ptr
 4154  * @info: aeqe info to be returned
 4155  */
 4156 int
 4157 irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
 4158                        struct irdma_aeqe_info *info)
 4159 {
 4160         u64 temp, compl_ctx;
 4161         __le64 *aeqe;
 4162         u8 ae_src;
 4163         u8 polarity;
 4164 
 4165         aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
 4166         get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx);
 4167         get_64bit_val(aeqe, IRDMA_BYTE_8, &temp);
 4168         polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
 4169 
 4170         if (aeq->polarity != polarity)
 4171                 return -ENOENT;
 4172 
 4173         irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16);
 4174 
 4175         ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
 4176         info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
 4177         info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
 4178             ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
 4179         info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
 4180         info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
 4181         info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
 4182         info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
 4183         info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
 4184 
 4185         info->ae_src = ae_src;
 4186         switch (info->ae_id) {
 4187         case IRDMA_AE_PRIV_OPERATION_DENIED:
 4188         case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
 4189         case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
 4190         case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
 4191         case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
 4192         case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
 4193         case IRDMA_AE_UDA_XMIT_BAD_PD:
 4194         case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
 4195         case IRDMA_AE_BAD_CLOSE:
 4196         case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
 4197         case IRDMA_AE_STAG_ZERO_INVALID:
 4198         case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
 4199         case IRDMA_AE_IB_INVALID_REQUEST:
 4200         case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
 4201         case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
 4202         case IRDMA_AE_IB_REMOTE_OP_ERROR:
 4203         case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
 4204         case IRDMA_AE_DDP_UBE_INVALID_MO:
 4205         case IRDMA_AE_DDP_UBE_INVALID_QN:
 4206         case IRDMA_AE_DDP_NO_L_BIT:
 4207         case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
 4208         case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
 4209         case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
 4210         case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
 4211         case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
 4212         case IRDMA_AE_INVALID_ARP_ENTRY:
 4213         case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
 4214         case IRDMA_AE_STALE_ARP_ENTRY:
 4215         case IRDMA_AE_INVALID_AH_ENTRY:
 4216         case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
 4217         case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
 4218         case IRDMA_AE_LLP_TOO_MANY_RETRIES:
 4219         case IRDMA_AE_LLP_DOUBT_REACHABILITY:
 4220         case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
 4221         case IRDMA_AE_RESET_SENT:
 4222         case IRDMA_AE_TERMINATE_SENT:
 4223         case IRDMA_AE_RESET_NOT_SENT:
 4224         case IRDMA_AE_LCE_QP_CATASTROPHIC:
 4225         case IRDMA_AE_QP_SUSPEND_COMPLETE:
 4226         case IRDMA_AE_UDA_L4LEN_INVALID:
 4227                 info->qp = true;
 4228                 info->compl_ctx = compl_ctx;
 4229                 break;
 4230         case IRDMA_AE_LCE_CQ_CATASTROPHIC:
 4231                 info->cq = true;
 4232                 info->compl_ctx = LS_64_1(compl_ctx, 1);
 4233                 ae_src = IRDMA_AE_SOURCE_RSVD;
 4234                 break;
 4235         case IRDMA_AE_ROCE_EMPTY_MCG:
 4236         case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
 4237         case IRDMA_AE_ROCE_BAD_MC_QPID:
 4238         case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
 4239                 /* fallthrough */
 4240         case IRDMA_AE_LLP_CONNECTION_RESET:
 4241         case IRDMA_AE_LLP_SYN_RECEIVED:
 4242         case IRDMA_AE_LLP_FIN_RECEIVED:
 4243         case IRDMA_AE_LLP_CLOSE_COMPLETE:
 4244         case IRDMA_AE_LLP_TERMINATE_RECEIVED:
 4245         case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
 4246                 ae_src = IRDMA_AE_SOURCE_RSVD;
 4247                 info->qp = true;
 4248                 info->compl_ctx = compl_ctx;
 4249                 break;
 4250         default:
 4251                 break;
 4252         }
 4253 
 4254         switch (ae_src) {
 4255         case IRDMA_AE_SOURCE_RQ:
 4256         case IRDMA_AE_SOURCE_RQ_0011:
 4257                 info->qp = true;
 4258                 info->rq = true;
 4259                 info->compl_ctx = compl_ctx;
 4260                 break;
 4261         case IRDMA_AE_SOURCE_CQ:
 4262         case IRDMA_AE_SOURCE_CQ_0110:
 4263         case IRDMA_AE_SOURCE_CQ_1010:
 4264         case IRDMA_AE_SOURCE_CQ_1110:
 4265                 info->cq = true;
 4266                 info->compl_ctx = LS_64_1(compl_ctx, 1);
 4267                 break;
 4268         case IRDMA_AE_SOURCE_SQ:
 4269         case IRDMA_AE_SOURCE_SQ_0111:
 4270                 info->qp = true;
 4271                 info->sq = true;
 4272                 info->compl_ctx = compl_ctx;
 4273                 break;
 4274         case IRDMA_AE_SOURCE_IN_WR:
 4275         case IRDMA_AE_SOURCE_IN_RR:
 4276                 info->qp = true;
 4277                 info->compl_ctx = compl_ctx;
 4278                 info->in_rdrsp_wr = true;
 4279                 break;
 4280         case IRDMA_AE_SOURCE_OUT_RR:
 4281         case IRDMA_AE_SOURCE_OUT_RR_1111:
 4282                 info->qp = true;
 4283                 info->compl_ctx = compl_ctx;
 4284                 info->out_rdrsp = true;
 4285                 break;
 4286         case IRDMA_AE_SOURCE_RSVD:
 4287         default:
 4288                 break;
 4289         }
 4290 
 4291         IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
 4292         if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
 4293                 aeq->polarity ^= 1;
 4294 
 4295         return 0;
 4296 }
 4297 
 4298 /**
 4299  * irdma_sc_repost_aeq_entries - repost completed aeq entries
 4300  * @dev: sc device struct
 4301  * @count: allocate count
 4302  */
 4303 int
 4304 irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
 4305 {
 4306         db_wr32(count, dev->aeq_alloc_db);
 4307 
 4308         return 0;
 4309 }
 4310 
 4311 /**
 4312  * irdma_sc_ccq_init - initialize control cq
 4313  * @cq: sc's cq ctruct
 4314  * @info: info for control cq initialization
 4315  */
 4316 int
 4317 irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
 4318 {
 4319         u32 pble_obj_cnt;
 4320 
 4321         if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
 4322             info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
 4323                 return -EINVAL;
 4324 
 4325         if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
 4326                 return -EINVAL;
 4327 
 4328         pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
 4329 
 4330         if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
 4331                 return -EINVAL;
 4332 
 4333         cq->cq_pa = info->cq_pa;
 4334         cq->cq_uk.cq_base = info->cq_base;
 4335         cq->shadow_area_pa = info->shadow_area_pa;
 4336         cq->cq_uk.shadow_area = info->shadow_area;
 4337         cq->shadow_read_threshold = info->shadow_read_threshold;
 4338         cq->dev = info->dev;
 4339         cq->ceq_id = info->ceq_id;
 4340         cq->cq_uk.cq_size = info->num_elem;
 4341         cq->cq_type = IRDMA_CQ_TYPE_CQP;
 4342         cq->ceqe_mask = info->ceqe_mask;
 4343         IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
 4344         cq->cq_uk.cq_id = 0;    /* control cq is id 0 always */
 4345         cq->ceq_id_valid = info->ceq_id_valid;
 4346         cq->tph_en = info->tph_en;
 4347         cq->tph_val = info->tph_val;
 4348         cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
 4349         cq->pbl_list = info->pbl_list;
 4350         cq->virtual_map = info->virtual_map;
 4351         cq->pbl_chunk_size = info->pbl_chunk_size;
 4352         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
 4353         cq->cq_uk.polarity = true;
 4354         cq->vsi = info->vsi;
 4355         cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
 4356 
 4357         /* Only applicable to CQs other than CCQ so initialize to zero */
 4358         cq->cq_uk.cqe_alloc_db = NULL;
 4359 
 4360         info->dev->ccq = cq;
 4361         return 0;
 4362 }
 4363 
 4364 /**
 4365  * irdma_sc_ccq_create_done - poll cqp for ccq create
 4366  * @ccq: ccq sc struct
 4367  */
 4368 static inline int
 4369 irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
 4370 {
 4371         struct irdma_sc_cqp *cqp;
 4372 
 4373         cqp = ccq->dev->cqp;
 4374 
 4375         return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
 4376 }
 4377 
 4378 /**
 4379  * irdma_sc_ccq_create - create control cq
 4380  * @ccq: ccq sc struct
 4381  * @scratch: u64 saved to be used during cqp completion
 4382  * @check_overflow: overlow flag for ccq
 4383  * @post_sq: flag for cqp db to ring
 4384  */
 4385 int
 4386 irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
 4387                     bool check_overflow, bool post_sq)
 4388 {
 4389         int ret_code;
 4390 
 4391         ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
 4392         if (ret_code)
 4393                 return ret_code;
 4394 
 4395         if (post_sq) {
 4396                 ret_code = irdma_sc_ccq_create_done(ccq);
 4397                 if (ret_code)
 4398                         return ret_code;
 4399         }
 4400         ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
 4401 
 4402         return 0;
 4403 }
 4404 
 4405 /**
 4406  * irdma_sc_ccq_destroy - destroy ccq during close
 4407  * @ccq: ccq sc struct
 4408  * @scratch: u64 saved to be used during cqp completion
 4409  * @post_sq: flag for cqp db to ring
 4410  */
 4411 int
 4412 irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
 4413 {
 4414         struct irdma_sc_cqp *cqp;
 4415         __le64 *wqe;
 4416         u64 hdr;
 4417         int ret_code = 0;
 4418         u32 tail, val, error;
 4419 
 4420         cqp = ccq->dev->cqp;
 4421         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 4422         if (!wqe)
 4423                 return -ENOSPC;
 4424 
 4425         set_64bit_val(wqe, IRDMA_BYTE_0, ccq->cq_uk.cq_size);
 4426         set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(ccq, 1));
 4427         set_64bit_val(wqe, IRDMA_BYTE_40, ccq->shadow_area_pa);
 4428 
 4429         hdr = ccq->cq_uk.cq_id |
 4430             FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
 4431                       IRDMA_CQPSQ_CQ_CEQID) |
 4432             FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
 4433             FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
 4434             FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
 4435             FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
 4436             FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
 4437             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 4438         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 4439 
 4440         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 4441 
 4442         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CCQ_DESTROY WQE", wqe,
 4443                         IRDMA_CQP_WQE_SIZE * 8);
 4444         irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
 4445 
 4446         if (post_sq) {
 4447                 irdma_sc_cqp_post_sq(cqp);
 4448                 ret_code = irdma_cqp_poll_registers(cqp, tail,
 4449                                                     cqp->dev->hw_attrs.max_done_count);
 4450         }
 4451 
 4452         cqp->process_cqp_sds = irdma_update_sds_noccq;
 4453 
 4454         return ret_code;
 4455 }
 4456 
 4457 /**
 4458  * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
 4459  * @dev : ptr to irdma_dev struct
 4460  * @hmc_fn_id: hmc function id
 4461  */
 4462 int
 4463 irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id)
 4464 {
 4465         struct irdma_hmc_info *hmc_info;
 4466         struct irdma_hmc_fpm_misc *hmc_fpm_misc;
 4467         struct irdma_dma_mem query_fpm_mem;
 4468         int ret_code = 0;
 4469         u8 wait_type;
 4470 
 4471         hmc_info = dev->hmc_info;
 4472         hmc_fpm_misc = &dev->hmc_fpm_misc;
 4473         query_fpm_mem.pa = dev->fpm_query_buf_pa;
 4474         query_fpm_mem.va = dev->fpm_query_buf;
 4475         hmc_info->hmc_fn_id = hmc_fn_id;
 4476         wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
 4477 
 4478         ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
 4479                                           &query_fpm_mem, true, wait_type);
 4480         if (ret_code)
 4481                 return ret_code;
 4482 
 4483         /* parse the fpm_query_buf and fill hmc obj info */
 4484         ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
 4485                                                 hmc_fpm_misc);
 4486 
 4487         irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "QUERY FPM BUFFER",
 4488                         query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE);
 4489         return ret_code;
 4490 }
 4491 
 4492 /**
 4493  * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
 4494  * command and populates fpm base address in hmc_info
 4495  * @dev : ptr to irdma_dev struct
 4496  * @hmc_fn_id: hmc function id
 4497  */
 4498 static int
 4499 irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u16 hmc_fn_id)
 4500 {
 4501         struct irdma_hmc_obj_info *obj_info;
 4502         __le64 *buf;
 4503         struct irdma_hmc_info *hmc_info;
 4504         struct irdma_dma_mem commit_fpm_mem;
 4505         int ret_code = 0;
 4506         u8 wait_type;
 4507 
 4508         hmc_info = dev->hmc_info;
 4509         obj_info = hmc_info->hmc_obj;
 4510         buf = dev->fpm_commit_buf;
 4511 
 4512         set_64bit_val(buf, IRDMA_BYTE_0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
 4513         set_64bit_val(buf, IRDMA_BYTE_8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
 4514         set_64bit_val(buf, IRDMA_BYTE_16, (u64)0);      /* RSRVD */
 4515         set_64bit_val(buf, IRDMA_BYTE_24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
 4516         set_64bit_val(buf, IRDMA_BYTE_32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
 4517         set_64bit_val(buf, IRDMA_BYTE_40, (u64)0);      /* RSVD */
 4518         set_64bit_val(buf, IRDMA_BYTE_48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
 4519         set_64bit_val(buf, IRDMA_BYTE_56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
 4520         set_64bit_val(buf, IRDMA_BYTE_64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
 4521         set_64bit_val(buf, IRDMA_BYTE_72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
 4522         set_64bit_val(buf, IRDMA_BYTE_80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
 4523         set_64bit_val(buf, IRDMA_BYTE_88,
 4524                       (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
 4525         set_64bit_val(buf, IRDMA_BYTE_96,
 4526                       (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
 4527         set_64bit_val(buf, IRDMA_BYTE_104,
 4528                       (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
 4529         set_64bit_val(buf, IRDMA_BYTE_112,
 4530                       (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
 4531         set_64bit_val(buf, IRDMA_BYTE_120, (u64)0);     /* RSVD */
 4532         set_64bit_val(buf, IRDMA_BYTE_128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
 4533         set_64bit_val(buf, IRDMA_BYTE_136,
 4534                       (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
 4535         set_64bit_val(buf, IRDMA_BYTE_144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
 4536         set_64bit_val(buf, IRDMA_BYTE_152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
 4537         set_64bit_val(buf, IRDMA_BYTE_160,
 4538                       (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
 4539         set_64bit_val(buf, IRDMA_BYTE_168,
 4540                       (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
 4541         commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
 4542         commit_fpm_mem.va = dev->fpm_commit_buf;
 4543 
 4544         wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
 4545         irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER",
 4546                         commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE);
 4547         ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
 4548                                            &commit_fpm_mem, true, wait_type);
 4549         if (!ret_code)
 4550                 ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
 4551                                                          hmc_info->hmc_obj,
 4552                                                          &hmc_info->sd_table.sd_cnt);
 4553         irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER",
 4554                         commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE);
 4555 
 4556         return ret_code;
 4557 }
 4558 
 4559 /**
 4560  * cqp_sds_wqe_fill - fill cqp wqe doe sd
 4561  * @cqp: struct for cqp hw
 4562  * @info: sd info for wqe
 4563  * @scratch: u64 saved to be used during cqp completion
 4564  */
 4565 static int
 4566 cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
 4567                  struct irdma_update_sds_info *info, u64 scratch)
 4568 {
 4569         u64 data;
 4570         u64 hdr;
 4571         __le64 *wqe;
 4572         int mem_entries, wqe_entries;
 4573         struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
 4574         u64 offset = 0;
 4575         u32 wqe_idx;
 4576 
 4577         wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
 4578         if (!wqe)
 4579                 return -ENOSPC;
 4580 
 4581         wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
 4582         mem_entries = info->cnt - wqe_entries;
 4583 
 4584         if (mem_entries) {
 4585                 offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
 4586                 irdma_memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
 4587 
 4588                 data = (u64)sdbuf->pa + offset;
 4589         } else {
 4590                 data = 0;
 4591         }
 4592         data |= FLD_LS_64(cqp->dev, info->hmc_fn_id, IRDMA_CQPSQ_UPESD_HMCFNID);
 4593         set_64bit_val(wqe, IRDMA_BYTE_16, data);
 4594 
 4595         switch (wqe_entries) {
 4596         case 3:
 4597                 set_64bit_val(wqe, IRDMA_BYTE_48,
 4598                               (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
 4599                                FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
 4600 
 4601                 set_64bit_val(wqe, IRDMA_BYTE_56, info->entry[2].data);
 4602                 /* fallthrough */
 4603         case 2:
 4604                 set_64bit_val(wqe, IRDMA_BYTE_32,
 4605                               (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
 4606                                FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
 4607 
 4608                 set_64bit_val(wqe, IRDMA_BYTE_40, info->entry[1].data);
 4609                 /* fallthrough */
 4610         case 1:
 4611                 set_64bit_val(wqe, IRDMA_BYTE_0,
 4612                               FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
 4613 
 4614                 set_64bit_val(wqe, IRDMA_BYTE_8, info->entry[0].data);
 4615                 break;
 4616         default:
 4617                 break;
 4618         }
 4619 
 4620         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
 4621             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
 4622             FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
 4623         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 4624 
 4625         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 4626 
 4627         if (mem_entries)
 4628                 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE Buffer",
 4629                                 (char *)sdbuf->va + offset, mem_entries << 4);
 4630 
 4631         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE", wqe,
 4632                         IRDMA_CQP_WQE_SIZE * 8);
 4633 
 4634         return 0;
 4635 }
 4636 
 4637 /**
 4638  * irdma_update_pe_sds - cqp wqe for sd
 4639  * @dev: ptr to irdma_dev struct
 4640  * @info: sd info for sd's
 4641  * @scratch: u64 saved to be used during cqp completion
 4642  */
 4643 static int
 4644 irdma_update_pe_sds(struct irdma_sc_dev *dev,
 4645                     struct irdma_update_sds_info *info, u64 scratch)
 4646 {
 4647         struct irdma_sc_cqp *cqp = dev->cqp;
 4648         int ret_code;
 4649 
 4650         ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
 4651         if (!ret_code)
 4652                 irdma_sc_cqp_post_sq(cqp);
 4653 
 4654         return ret_code;
 4655 }
 4656 
 4657 /**
 4658  * irdma_update_sds_noccq - update sd before ccq created
 4659  * @dev: sc device struct
 4660  * @info: sd info for sd's
 4661  */
 4662 int
 4663 irdma_update_sds_noccq(struct irdma_sc_dev *dev,
 4664                        struct irdma_update_sds_info *info)
 4665 {
 4666         u32 error, val, tail;
 4667         struct irdma_sc_cqp *cqp = dev->cqp;
 4668         int ret_code;
 4669 
 4670         ret_code = cqp_sds_wqe_fill(cqp, info, 0);
 4671         if (ret_code)
 4672                 return ret_code;
 4673 
 4674         irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
 4675 
 4676         irdma_sc_cqp_post_sq(cqp);
 4677         return irdma_cqp_poll_registers(cqp, tail,
 4678                                         cqp->dev->hw_attrs.max_done_count);
 4679 }
 4680 
 4681 /**
 4682  * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
 4683  * @cqp: struct for cqp hw
 4684  * @scratch: u64 saved to be used during cqp completion
 4685  * @hmc_fn_id: hmc function id
 4686  * @post_sq: flag for cqp db to ring
 4687  * @poll_registers: flag to poll register for cqp completion
 4688  */
 4689 int
 4690 irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
 4691                                     u16 hmc_fn_id, bool post_sq,
 4692                                     bool poll_registers)
 4693 {
 4694         u64 hdr;
 4695         __le64 *wqe;
 4696         u32 tail, val, error;
 4697 
 4698         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 4699         if (!wqe)
 4700                 return -ENOSPC;
 4701 
 4702         set_64bit_val(wqe, IRDMA_BYTE_16,
 4703                       FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
 4704 
 4705         hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
 4706                          IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
 4707             FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
 4708         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 4709 
 4710         set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
 4711 
 4712         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
 4713                         wqe, IRDMA_CQP_WQE_SIZE * 8);
 4714         irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
 4715 
 4716         if (post_sq) {
 4717                 irdma_sc_cqp_post_sq(cqp);
 4718                 if (poll_registers)
 4719                         /* check for cqp sq tail update */
 4720                         return irdma_cqp_poll_registers(cqp, tail,
 4721                                                         cqp->dev->hw_attrs.max_done_count);
 4722                 else
 4723                         return irdma_sc_poll_for_cqp_op_done(cqp,
 4724                                                              IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
 4725                                                              NULL);
 4726         }
 4727 
 4728         return 0;
 4729 }
 4730 
 4731 /**
 4732  * irdma_cqp_ring_full - check if cqp ring is full
 4733  * @cqp: struct for cqp hw
 4734  */
 4735 static bool
 4736 irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
 4737 {
 4738         return IRDMA_RING_FULL_ERR(cqp->sq_ring);
 4739 }
 4740 
 4741 /**
 4742  * irdma_est_sd - returns approximate number of SDs for HMC
 4743  * @dev: sc device struct
 4744  * @hmc_info: hmc structure, size and count for HMC objects
 4745  */
 4746 static u32 irdma_est_sd(struct irdma_sc_dev *dev,
 4747                         struct irdma_hmc_info *hmc_info){
 4748         int i;
 4749         u64 size = 0;
 4750         u64 sd;
 4751 
 4752         for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
 4753                 if (i != IRDMA_HMC_IW_PBLE)
 4754                         size += round_up(hmc_info->hmc_obj[i].cnt *
 4755                                          hmc_info->hmc_obj[i].size, 512);
 4756         size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
 4757                          hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
 4758         if (size & 0x1FFFFF)
 4759                 sd = (size >> 21) + 1;  /* add 1 for remainder */
 4760         else
 4761                 sd = size >> 21;
 4762         if (sd > 0xFFFFFFFF) {
 4763                 irdma_debug(dev, IRDMA_DEBUG_HMC, "sd overflow[%ld]\n", sd);
 4764                 sd = 0xFFFFFFFF - 1;
 4765         }
 4766 
 4767         return (u32)sd;
 4768 }
 4769 
 4770 /**
 4771  * irdma_sc_query_rdma_features - query RDMA features and FW ver
 4772  * @cqp: struct for cqp hw
 4773  * @buf: buffer to hold query info
 4774  * @scratch: u64 saved to be used during cqp completion
 4775  */
 4776 static int
 4777 irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
 4778                              struct irdma_dma_mem *buf, u64 scratch)
 4779 {
 4780         __le64 *wqe;
 4781         u64 temp;
 4782         u32 tail, val, error;
 4783         int status;
 4784 
 4785         wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 4786         if (!wqe)
 4787                 return -ENOSPC;
 4788 
 4789         temp = buf->pa;
 4790         set_64bit_val(wqe, IRDMA_BYTE_32, temp);
 4791 
 4792         temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
 4793                           cqp->polarity) |
 4794             FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
 4795             FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
 4796         irdma_wmb();            /* make sure WQE is written before valid bit is set */
 4797 
 4798         set_64bit_val(wqe, IRDMA_BYTE_24, temp);
 4799 
 4800         irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", wqe,
 4801                         IRDMA_CQP_WQE_SIZE * 8);
 4802         irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
 4803 
 4804         irdma_sc_cqp_post_sq(cqp);
 4805         status = irdma_cqp_poll_registers(cqp, tail,
 4806                                           cqp->dev->hw_attrs.max_done_count);
 4807         if (error || status)
 4808                 status = -EIO;
 4809 
 4810         return status;
 4811 }
 4812 
 4813 /**
 4814  * irdma_get_rdma_features - get RDMA features
 4815  * @dev: sc device struct
 4816  */
 4817 int
 4818 irdma_get_rdma_features(struct irdma_sc_dev *dev)
 4819 {
 4820         int ret_code;
 4821         struct irdma_dma_mem feat_buf;
 4822         u64 temp;
 4823         u16 byte_idx, feat_type, feat_cnt, feat_idx;
 4824 
 4825         feat_buf.size = IRDMA_FEATURE_BUF_SIZE;
 4826         feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, feat_buf.size,
 4827                                              IRDMA_FEATURE_BUF_ALIGNMENT);
 4828         if (!feat_buf.va)
 4829                 return -ENOMEM;
 4830 
 4831         ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
 4832         if (ret_code)
 4833                 goto exit;
 4834 
 4835         get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp);
 4836         feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
 4837         if (feat_cnt < IRDMA_MIN_FEATURES) {
 4838                 ret_code = -EINVAL;
 4839                 goto exit;
 4840         } else if (feat_cnt > IRDMA_MAX_FEATURES) {
 4841                 irdma_debug(dev, IRDMA_DEBUG_DEV,
 4842                             "feature buf size insufficient,"
 4843                             "retrying with larger buffer\n");
 4844                 irdma_free_dma_mem(dev->hw, &feat_buf);
 4845                 feat_buf.size = 8 * feat_cnt;
 4846                 feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf,
 4847                                                      feat_buf.size,
 4848                                                      IRDMA_FEATURE_BUF_ALIGNMENT);
 4849                 if (!feat_buf.va)
 4850                         return -ENOMEM;
 4851 
 4852                 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
 4853                 if (ret_code)
 4854                         goto exit;
 4855 
 4856                 get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp);
 4857                 feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
 4858                 if (feat_cnt < IRDMA_MIN_FEATURES) {
 4859                         ret_code = -EINVAL;
 4860                         goto exit;
 4861                 }
 4862         }
 4863 
 4864         irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", feat_buf.va,
 4865                         feat_cnt * 8);
 4866 
 4867         for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
 4868              feat_idx++, byte_idx += 8) {
 4869                 get_64bit_val(feat_buf.va, byte_idx, &temp);
 4870                 feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
 4871                 dev->feature_info[feat_type] = temp;
 4872         }
 4873 exit:
 4874         irdma_free_dma_mem(dev->hw, &feat_buf);
 4875         return ret_code;
 4876 }
 4877 
 4878 static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
 4879                         struct irdma_hmc_info *hmc_info, u32 qpwanted){
 4880         u32 q1_cnt;
 4881 
 4882         if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
 4883                 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
 4884         } else {
 4885                 if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
 4886                         q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
 4887                 else
 4888                         q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
 4889         }
 4890 
 4891         return q1_cnt;
 4892 }
 4893 
 4894 static void
 4895 cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
 4896                     struct irdma_hmc_info *hmc_info, u32 qpwanted)
 4897 {
 4898         hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
 4899 }
 4900 
 4901 static void
 4902 cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
 4903                     struct irdma_hmc_info *hmc_info, u32 qpwanted)
 4904 {
 4905         struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
 4906 
 4907         hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
 4908             4 * hmc_fpm_misc->xf_block_size * qpwanted;
 4909 
 4910         hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
 4911 
 4912         if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
 4913                 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
 4914         if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
 4915                 hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
 4916                     hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
 4917                     hmc_fpm_misc->rrf_block_size;
 4918         if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
 4919                 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
 4920         if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
 4921                 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
 4922                     hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
 4923                     hmc_fpm_misc->ooiscf_block_size;
 4924 }
 4925 
 4926 /**
 4927  * irdma_cfg_fpm_val - configure HMC objects
 4928  * @dev: sc device struct
 4929  * @qp_count: desired qp count
 4930  */
 4931 int
 4932 irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
 4933 {
 4934         struct irdma_virt_mem virt_mem;
 4935         u32 i, mem_size;
 4936         u32 qpwanted, mrwanted, pblewanted;
 4937         u32 powerof2, hte;
 4938         u32 sd_needed;
 4939         u32 sd_diff;
 4940         u32 loop_count = 0;
 4941         struct irdma_hmc_info *hmc_info;
 4942         struct irdma_hmc_fpm_misc *hmc_fpm_misc;
 4943         int ret_code = 0;
 4944         u32 max_sds;
 4945 
 4946         hmc_info = dev->hmc_info;
 4947         hmc_fpm_misc = &dev->hmc_fpm_misc;
 4948         ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
 4949         if (ret_code) {
 4950                 irdma_debug(dev, IRDMA_DEBUG_HMC,
 4951                             "irdma_sc_init_iw_hmc returned error_code = %d\n",
 4952                             ret_code);
 4953                 return ret_code;
 4954         }
 4955 
 4956         max_sds = hmc_fpm_misc->max_sds;
 4957 
 4958         for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
 4959                 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
 4960 
 4961         sd_needed = irdma_est_sd(dev, hmc_info);
 4962         irdma_debug(dev, IRDMA_DEBUG_HMC, "sd count %d where max sd is %d\n",
 4963                     hmc_info->sd_table.sd_cnt, max_sds);
 4964 
 4965         qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
 4966 
 4967         powerof2 = 1;
 4968         while (powerof2 <= qpwanted)
 4969                 powerof2 *= 2;
 4970         powerof2 /= 2;
 4971         qpwanted = powerof2;
 4972 
 4973         mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
 4974         pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
 4975 
 4976         irdma_debug(dev, IRDMA_DEBUG_HMC,
 4977                     "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
 4978                     qp_count, max_sds,
 4979                     hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
 4980                     hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
 4981                     hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
 4982                     hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
 4983                     hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
 4984                     hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
 4985         hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
 4986             hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
 4987         hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
 4988             hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
 4989         hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
 4990             hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
 4991         if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
 4992                 hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
 4993 
 4994         while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
 4995                 qpwanted /= 2;
 4996 
 4997         if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
 4998                 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
 4999                 while (hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt > hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt) {
 5000                         qpwanted /= 2;
 5001                         cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
 5002                 }
 5003         }
 5004 
 5005         do {
 5006                 ++loop_count;
 5007                 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
 5008                 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
 5009                     min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
 5010                 hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0;       /* Reserved */
 5011                 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
 5012 
 5013                 hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
 5014                 powerof2 = 1;
 5015                 while (powerof2 < hte)
 5016                         powerof2 *= 2;
 5017                 hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
 5018                     powerof2 * hmc_fpm_misc->ht_multiplier;
 5019                 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
 5020                         cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
 5021                 else
 5022                         cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
 5023 
 5024                 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
 5025                 hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
 5026                     hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
 5027                 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
 5028                     hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
 5029                 hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
 5030                     (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
 5031 
 5032                 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
 5033                 sd_needed = irdma_est_sd(dev, hmc_info);
 5034                 irdma_debug(dev, IRDMA_DEBUG_HMC,
 5035                             "sd_needed = %d, max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
 5036                             sd_needed, max_sds, mrwanted, pblewanted, qpwanted);
 5037 
 5038                 /* Do not reduce resources further. All objects fit with max SDs */
 5039                 if (sd_needed <= max_sds)
 5040                         break;
 5041 
 5042                 sd_diff = sd_needed - max_sds;
 5043                 if (sd_diff > 128) {
 5044                         if (!(loop_count % 2) && qpwanted > 128) {
 5045                                 qpwanted /= 2;
 5046                         } else {
 5047                                 mrwanted /= 2;
 5048                                 pblewanted /= 2;
 5049                         }
 5050                         continue;
 5051                 }
 5052                 if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
 5053                     pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
 5054                         pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
 5055                         continue;
 5056                 } else if (pblewanted > (100 * FPM_MULTIPLIER)) {
 5057                         pblewanted -= 10 * FPM_MULTIPLIER;
 5058                 } else if (pblewanted > FPM_MULTIPLIER) {
 5059                         pblewanted -= FPM_MULTIPLIER;
 5060                 } else if (qpwanted <= 128) {
 5061                         if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
 5062                                 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
 5063                         if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
 5064                                 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
 5065                 }
 5066                 if (mrwanted > FPM_MULTIPLIER)
 5067                         mrwanted -= FPM_MULTIPLIER;
 5068                 if (!(loop_count % 10) && qpwanted > 128) {
 5069                         qpwanted /= 2;
 5070                         if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
 5071                                 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
 5072                 }
 5073         } while (loop_count < 2000);
 5074 
 5075         if (sd_needed > max_sds) {
 5076                 irdma_debug(dev, IRDMA_DEBUG_HMC,
 5077                             "cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
 5078                             loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
 5079                 return -EINVAL;
 5080         }
 5081 
 5082         if (loop_count > 1 && sd_needed < max_sds) {
 5083                 pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER;
 5084                 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
 5085                 sd_needed = irdma_est_sd(dev, hmc_info);
 5086         }
 5087 
 5088         irdma_debug(dev, IRDMA_DEBUG_HMC,
 5089                     "loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
 5090                     loop_count, sd_needed,
 5091                     hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
 5092                     hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
 5093                     hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
 5094                     hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
 5095                     hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
 5096                     hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
 5097                     hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
 5098 
 5099         ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
 5100         if (ret_code) {
 5101                 irdma_debug(dev, IRDMA_DEBUG_HMC,
 5102                             "cfg_iw_fpm returned error_code[x%08X]\n",
 5103                             readl(dev->hw_regs[IRDMA_CQPERRCODES]));
 5104                 return ret_code;
 5105         }
 5106 
 5107         mem_size = sizeof(struct irdma_hmc_sd_entry) *
 5108             (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
 5109         virt_mem.size = mem_size;
 5110         virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
 5111         if (!virt_mem.va) {
 5112                 irdma_debug(dev, IRDMA_DEBUG_HMC,
 5113                             "failed to allocate memory for sd_entry buffer\n");
 5114                 return -ENOMEM;
 5115         }
 5116         hmc_info->sd_table.sd_entry = virt_mem.va;
 5117 
 5118         return ret_code;
 5119 }
 5120 
 5121 /**
 5122  * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
 5123  * @dev: rdma device
 5124  * @pcmdinfo: cqp command info
 5125  */
 5126 static int
 5127 irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
 5128                    struct cqp_cmds_info *pcmdinfo)
 5129 {
 5130         int status;
 5131         struct irdma_dma_mem val_mem;
 5132         bool alloc = false;
 5133 
 5134         dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
 5135         switch (pcmdinfo->cqp_cmd) {
 5136         case IRDMA_OP_CEQ_DESTROY:
 5137                 status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
 5138                                               pcmdinfo->in.u.ceq_destroy.scratch,
 5139                                               pcmdinfo->post_sq);
 5140                 break;
 5141         case IRDMA_OP_AEQ_DESTROY:
 5142                 status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
 5143                                               pcmdinfo->in.u.aeq_destroy.scratch,
 5144                                               pcmdinfo->post_sq);
 5145                 break;
 5146         case IRDMA_OP_CEQ_CREATE:
 5147                 status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
 5148                                              pcmdinfo->in.u.ceq_create.scratch,
 5149                                              pcmdinfo->post_sq);
 5150                 break;
 5151         case IRDMA_OP_AEQ_CREATE:
 5152                 status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
 5153                                              pcmdinfo->in.u.aeq_create.scratch,
 5154                                              pcmdinfo->post_sq);
 5155                 break;
 5156         case IRDMA_OP_QP_UPLOAD_CONTEXT:
 5157                 status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
 5158                                                     &pcmdinfo->in.u.qp_upload_context.info,
 5159                                                     pcmdinfo->in.u.qp_upload_context.scratch,
 5160                                                     pcmdinfo->post_sq);
 5161                 break;
 5162         case IRDMA_OP_CQ_CREATE:
 5163                 status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
 5164                                             pcmdinfo->in.u.cq_create.scratch,
 5165                                             pcmdinfo->in.u.cq_create.check_overflow,
 5166                                             pcmdinfo->post_sq);
 5167                 break;
 5168         case IRDMA_OP_CQ_MODIFY:
 5169                 status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
 5170                                             &pcmdinfo->in.u.cq_modify.info,
 5171                                             pcmdinfo->in.u.cq_modify.scratch,
 5172                                             pcmdinfo->post_sq);
 5173                 break;
 5174         case IRDMA_OP_CQ_DESTROY:
 5175                 status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
 5176                                              pcmdinfo->in.u.cq_destroy.scratch,
 5177                                              pcmdinfo->post_sq);
 5178                 break;
 5179         case IRDMA_OP_QP_FLUSH_WQES:
 5180                 status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
 5181                                                 &pcmdinfo->in.u.qp_flush_wqes.info,
 5182                                                 pcmdinfo->in.u.qp_flush_wqes.scratch,
 5183                                                 pcmdinfo->post_sq);
 5184                 break;
 5185         case IRDMA_OP_GEN_AE:
 5186                 status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
 5187                                          &pcmdinfo->in.u.gen_ae.info,
 5188                                          pcmdinfo->in.u.gen_ae.scratch,
 5189                                          pcmdinfo->post_sq);
 5190                 break;
 5191         case IRDMA_OP_MANAGE_PUSH_PAGE:
 5192                 status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
 5193                                                    &pcmdinfo->in.u.manage_push_page.info,
 5194                                                    pcmdinfo->in.u.manage_push_page.scratch,
 5195                                                    pcmdinfo->post_sq);
 5196                 break;
 5197         case IRDMA_OP_UPDATE_PE_SDS:
 5198                 status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
 5199                                              &pcmdinfo->in.u.update_pe_sds.info,
 5200                                              pcmdinfo->in.u.update_pe_sds.scratch);
 5201                 break;
 5202         case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
 5203                 /* switch to calling through the call table */
 5204                 status =
 5205                     irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
 5206                                                       &pcmdinfo->in.u.manage_hmc_pm.info,
 5207                                                       pcmdinfo->in.u.manage_hmc_pm.scratch,
 5208                                                       true);
 5209                 break;
 5210         case IRDMA_OP_SUSPEND:
 5211                 status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
 5212                                              pcmdinfo->in.u.suspend_resume.qp,
 5213                                              pcmdinfo->in.u.suspend_resume.scratch);
 5214                 break;
 5215         case IRDMA_OP_RESUME:
 5216                 status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
 5217                                             pcmdinfo->in.u.suspend_resume.qp,
 5218                                             pcmdinfo->in.u.suspend_resume.scratch);
 5219                 break;
 5220         case IRDMA_OP_QUERY_FPM_VAL:
 5221                 val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
 5222                 val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
 5223                 status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
 5224                                                 pcmdinfo->in.u.query_fpm_val.scratch,
 5225                                                 pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
 5226                                                 &val_mem, true, IRDMA_CQP_WAIT_EVENT);
 5227                 break;
 5228         case IRDMA_OP_COMMIT_FPM_VAL:
 5229                 val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
 5230                 val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
 5231                 status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
 5232                                                  pcmdinfo->in.u.commit_fpm_val.scratch,
 5233                                                  pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
 5234                                                  &val_mem,
 5235                                                  true,
 5236                                                  IRDMA_CQP_WAIT_EVENT);
 5237                 break;
 5238         case IRDMA_OP_STATS_ALLOCATE:
 5239                 alloc = true;
 5240                 /* fallthrough */
 5241         case IRDMA_OP_STATS_FREE:
 5242                 status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
 5243                                                     &pcmdinfo->in.u.stats_manage.info,
 5244                                                     alloc,
 5245                                                     pcmdinfo->in.u.stats_manage.scratch);
 5246                 break;
 5247         case IRDMA_OP_STATS_GATHER:
 5248                 status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
 5249                                                &pcmdinfo->in.u.stats_gather.info,
 5250                                                pcmdinfo->in.u.stats_gather.scratch);
 5251                 break;
 5252         case IRDMA_OP_WS_MODIFY_NODE:
 5253                 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
 5254                                                  &pcmdinfo->in.u.ws_node.info,
 5255                                                  IRDMA_MODIFY_NODE,
 5256                                                  pcmdinfo->in.u.ws_node.scratch);
 5257                 break;
 5258         case IRDMA_OP_WS_DELETE_NODE:
 5259                 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
 5260                                                  &pcmdinfo->in.u.ws_node.info,
 5261                                                  IRDMA_DEL_NODE,
 5262                                                  pcmdinfo->in.u.ws_node.scratch);
 5263                 break;
 5264         case IRDMA_OP_WS_ADD_NODE:
 5265                 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
 5266                                                  &pcmdinfo->in.u.ws_node.info,
 5267                                                  IRDMA_ADD_NODE,
 5268                                                  pcmdinfo->in.u.ws_node.scratch);
 5269                 break;
 5270         case IRDMA_OP_SET_UP_MAP:
 5271                 status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
 5272                                              &pcmdinfo->in.u.up_map.info,
 5273                                              pcmdinfo->in.u.up_map.scratch);
 5274                 break;
 5275         case IRDMA_OP_QUERY_RDMA_FEATURES:
 5276                 status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
 5277                                                       &pcmdinfo->in.u.query_rdma.query_buff_mem,
 5278                                                       pcmdinfo->in.u.query_rdma.scratch);
 5279                 break;
 5280         case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
 5281                 status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
 5282                                                       pcmdinfo->in.u.del_arp_cache_entry.scratch,
 5283                                                       pcmdinfo->in.u.del_arp_cache_entry.arp_index,
 5284                                                       pcmdinfo->post_sq);
 5285                 break;
 5286         case IRDMA_OP_MANAGE_APBVT_ENTRY:
 5287                 status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
 5288                                                      &pcmdinfo->in.u.manage_apbvt_entry.info,
 5289                                                      pcmdinfo->in.u.manage_apbvt_entry.scratch,
 5290                                                      pcmdinfo->post_sq);
 5291                 break;
 5292         case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
 5293                 status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
 5294                                                            &pcmdinfo->in.u.manage_qhash_table_entry.info,
 5295                                                            pcmdinfo->in.u.manage_qhash_table_entry.scratch,
 5296                                                            pcmdinfo->post_sq);
 5297                 break;
 5298         case IRDMA_OP_QP_MODIFY:
 5299                 status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
 5300                                             &pcmdinfo->in.u.qp_modify.info,
 5301                                             pcmdinfo->in.u.qp_modify.scratch,
 5302                                             pcmdinfo->post_sq);
 5303                 break;
 5304         case IRDMA_OP_QP_CREATE:
 5305                 status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
 5306                                             &pcmdinfo->in.u.qp_create.info,
 5307                                             pcmdinfo->in.u.qp_create.scratch,
 5308                                             pcmdinfo->post_sq);
 5309                 break;
 5310         case IRDMA_OP_QP_DESTROY:
 5311                 status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
 5312                                              pcmdinfo->in.u.qp_destroy.scratch,
 5313                                              pcmdinfo->in.u.qp_destroy.remove_hash_idx,
 5314                                              pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
 5315                                              pcmdinfo->post_sq);
 5316                 break;
 5317         case IRDMA_OP_ALLOC_STAG:
 5318                 status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
 5319                                              &pcmdinfo->in.u.alloc_stag.info,
 5320                                              pcmdinfo->in.u.alloc_stag.scratch,
 5321                                              pcmdinfo->post_sq);
 5322                 break;
 5323         case IRDMA_OP_MR_REG_NON_SHARED:
 5324                 status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
 5325                                                     &pcmdinfo->in.u.mr_reg_non_shared.info,
 5326                                                     pcmdinfo->in.u.mr_reg_non_shared.scratch,
 5327                                                     pcmdinfo->post_sq);
 5328                 break;
 5329         case IRDMA_OP_DEALLOC_STAG:
 5330                 status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
 5331                                                &pcmdinfo->in.u.dealloc_stag.info,
 5332                                                pcmdinfo->in.u.dealloc_stag.scratch,
 5333                                                pcmdinfo->post_sq);
 5334                 break;
 5335         case IRDMA_OP_MW_ALLOC:
 5336                 status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
 5337                                            &pcmdinfo->in.u.mw_alloc.info,
 5338                                            pcmdinfo->in.u.mw_alloc.scratch,
 5339                                            pcmdinfo->post_sq);
 5340                 break;
 5341         case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
 5342                 status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
 5343                                                       &pcmdinfo->in.u.add_arp_cache_entry.info,
 5344                                                       pcmdinfo->in.u.add_arp_cache_entry.scratch,
 5345                                                       pcmdinfo->post_sq);
 5346                 break;
 5347         case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
 5348                 status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
 5349                                                         pcmdinfo->in.u.alloc_local_mac_entry.scratch,
 5350                                                         pcmdinfo->post_sq);
 5351                 break;
 5352         case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
 5353                 status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
 5354                                                       &pcmdinfo->in.u.add_local_mac_entry.info,
 5355                                                       pcmdinfo->in.u.add_local_mac_entry.scratch,
 5356                                                       pcmdinfo->post_sq);
 5357                 break;
 5358         case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
 5359                 status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
 5360                                                       pcmdinfo->in.u.del_local_mac_entry.scratch,
 5361                                                       pcmdinfo->in.u.del_local_mac_entry.entry_idx,
 5362                                                       pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
 5363                                                       pcmdinfo->post_sq);
 5364                 break;
 5365         case IRDMA_OP_AH_CREATE:
 5366                 status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
 5367                                             &pcmdinfo->in.u.ah_create.info,
 5368                                             pcmdinfo->in.u.ah_create.scratch);
 5369                 break;
 5370         case IRDMA_OP_AH_DESTROY:
 5371                 status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
 5372                                              &pcmdinfo->in.u.ah_destroy.info,
 5373                                              pcmdinfo->in.u.ah_destroy.scratch);
 5374                 break;
 5375         case IRDMA_OP_MC_CREATE:
 5376                 status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
 5377                                                    &pcmdinfo->in.u.mc_create.info,
 5378                                                    pcmdinfo->in.u.mc_create.scratch);
 5379                 break;
 5380         case IRDMA_OP_MC_DESTROY:
 5381                 status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
 5382                                                     &pcmdinfo->in.u.mc_destroy.info,
 5383                                                     pcmdinfo->in.u.mc_destroy.scratch);
 5384                 break;
 5385         case IRDMA_OP_MC_MODIFY:
 5386                 status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
 5387                                                    &pcmdinfo->in.u.mc_modify.info,
 5388                                                    pcmdinfo->in.u.mc_modify.scratch);
 5389                 break;
 5390         default:
 5391                 status = -EOPNOTSUPP;
 5392                 break;
 5393         }
 5394 
 5395         return status;
 5396 }
 5397 
 5398 /**
 5399  * irdma_process_cqp_cmd - process all cqp commands
 5400  * @dev: sc device struct
 5401  * @pcmdinfo: cqp command info
 5402  */
 5403 int
 5404 irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
 5405                       struct cqp_cmds_info *pcmdinfo)
 5406 {
 5407         int status = 0;
 5408         unsigned long flags;
 5409 
 5410         spin_lock_irqsave(&dev->cqp_lock, flags);
 5411         if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
 5412                 status = irdma_exec_cqp_cmd(dev, pcmdinfo);
 5413         else
 5414                 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
 5415         spin_unlock_irqrestore(&dev->cqp_lock, flags);
 5416         return status;
 5417 }
 5418 
 5419 /**
 5420  * irdma_process_bh - called from tasklet for cqp list
 5421  * @dev: sc device struct
 5422  */
 5423 int
 5424 irdma_process_bh(struct irdma_sc_dev *dev)
 5425 {
 5426         int status = 0;
 5427         struct cqp_cmds_info *pcmdinfo;
 5428         unsigned long flags;
 5429 
 5430         spin_lock_irqsave(&dev->cqp_lock, flags);
 5431         while (!list_empty(&dev->cqp_cmd_head) &&
 5432                !irdma_cqp_ring_full(dev->cqp)) {
 5433                 pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
 5434                 status = irdma_exec_cqp_cmd(dev, pcmdinfo);
 5435                 if (status)
 5436                         break;
 5437         }
 5438         spin_unlock_irqrestore(&dev->cqp_lock, flags);
 5439         return status;
 5440 }
 5441 
 5442 /**
 5443  * irdma_cfg_aeq- Configure AEQ interrupt
 5444  * @dev: pointer to the device structure
 5445  * @idx: vector index
 5446  * @enable: True to enable, False disables
 5447  */
 5448 void
 5449 irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
 5450 {
 5451         u32 reg_val;
 5452         reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
 5453             FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
 5454             FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, IRDMA_IDX_NOITR);
 5455 
 5456         writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
 5457 }
 5458 
 5459 /**
 5460  * sc_vsi_update_stats - Update statistics
 5461  * @vsi: sc_vsi instance to update
 5462  */
 5463 void
 5464 sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
 5465 {
 5466         struct irdma_gather_stats *gather_stats;
 5467         struct irdma_gather_stats *last_gather_stats;
 5468 
 5469         gather_stats = vsi->pestat->gather_info.gather_stats_va;
 5470         last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
 5471         irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
 5472                            last_gather_stats, vsi->dev->hw_stats_map,
 5473                            vsi->dev->hw_attrs.max_stat_idx);
 5474 }
 5475 
 5476 /**
 5477  * irdma_wait_pe_ready - Check if firmware is ready
 5478  * @dev: provides access to registers
 5479  */
 5480 static int
 5481 irdma_wait_pe_ready(struct irdma_sc_dev *dev)
 5482 {
 5483         u32 statuscpu0;
 5484         u32 statuscpu1;
 5485         u32 statuscpu2;
 5486         u32 retrycount = 0;
 5487 
 5488         do {
 5489                 statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
 5490                 statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
 5491                 statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
 5492                 if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
 5493                     statuscpu2 == 0x80)
 5494                         return 0;
 5495                 mdelay(1000);
 5496         } while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
 5497         return -1;
 5498 }
 5499 
 5500 static inline void
 5501 irdma_sc_init_hw(struct irdma_sc_dev *dev)
 5502 {
 5503         switch (dev->hw_attrs.uk_attrs.hw_rev) {
 5504         case IRDMA_GEN_2:
 5505                 icrdma_init_hw(dev);
 5506                 break;
 5507         }
 5508 }
 5509 
 5510 /**
 5511  * irdma_sc_dev_init - Initialize control part of device
 5512  * @dev: Device pointer
 5513  * @info: Device init info
 5514  */
 5515 int
 5516 irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info)
 5517 {
 5518         u32 val;
 5519         int ret_code = 0;
 5520         u8 db_size;
 5521 
 5522         INIT_LIST_HEAD(&dev->cqp_cmd_head);     /* for CQP command backlog */
 5523         mutex_init(&dev->ws_mutex);
 5524         dev->debug_mask = info->debug_mask;
 5525         dev->hmc_fn_id = info->hmc_fn_id;
 5526         dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
 5527         dev->fpm_query_buf = info->fpm_query_buf;
 5528         dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
 5529         dev->fpm_commit_buf = info->fpm_commit_buf;
 5530         dev->hw = info->hw;
 5531         dev->hw->hw_addr = info->bar0;
 5532         /* Setup the hardware limits, hmc may limit further */
 5533         dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
 5534         dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
 5535         dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
 5536         dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
 5537         dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
 5538         dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
 5539         dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
 5540         dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
 5541         dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
 5542         dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
 5543         dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
 5544         dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
 5545         dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
 5546         dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
 5547 
 5548         dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
 5549         dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
 5550         dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
 5551         dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
 5552 
 5553         dev->hw_attrs.max_pe_ready_count = 14;
 5554         dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
 5555         dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
 5556         dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
 5557 
 5558         irdma_sc_init_hw(dev);
 5559 
 5560         if (irdma_wait_pe_ready(dev))
 5561                 return -ETIMEDOUT;
 5562 
 5563         val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
 5564         db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
 5565         if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
 5566                 irdma_debug(dev, IRDMA_DEBUG_DEV,
 5567                             "RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
 5568                             val, db_size);
 5569                 return -ENODEV;
 5570         }
 5571         dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
 5572 
 5573         return ret_code;
 5574 }
 5575 
 5576 /**
 5577  * irdma_stat_val - Extract HW counter value from statistics buffer
 5578  * @stats_val: pointer to statistics buffer
 5579  * @byteoff: byte offset of counter value in the buffer (8B-aligned)
 5580  * @bitoff: bit offset of counter value within 8B entry
 5581  * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter)
 5582  */
 5583 static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff,
 5584                                  u8 bitoff, u64 bitmask){
 5585         u16 idx = byteoff / sizeof(*stats_val);
 5586 
 5587         return (stats_val[idx] >> bitoff) & bitmask;
 5588 }
 5589 
 5590 /**
 5591  * irdma_stat_delta - Calculate counter delta
 5592  * @new_val: updated counter value
 5593  * @old_val: last counter value
 5594  * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter)
 5595  */
 5596 static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) {
 5597         if (new_val >= old_val)
 5598                 return new_val - old_val;
 5599         else
 5600                 /* roll-over case */
 5601                 return max_val - old_val + new_val + 1;
 5602 }
 5603 
 5604 /**
 5605  * irdma_update_stats - Update statistics
 5606  * @hw_stats: hw_stats instance to update
 5607  * @gather_stats: updated stat counters
 5608  * @last_gather_stats: last stat counters
 5609  * @map: HW stat map (hw_stats => gather_stats)
 5610  * @max_stat_idx: number of HW stats
 5611  */
 5612 void
 5613 irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
 5614                    struct irdma_gather_stats *gather_stats,
 5615                    struct irdma_gather_stats *last_gather_stats,
 5616                    const struct irdma_hw_stat_map *map,
 5617                    u16 max_stat_idx)
 5618 {
 5619         u64 *stats_val = hw_stats->stats_val;
 5620         u16 i;
 5621 
 5622         for (i = 0; i < max_stat_idx; i++) {
 5623                 u64 new_val = irdma_stat_val(gather_stats->val,
 5624                                              map[i].byteoff, map[i].bitoff,
 5625                                              map[i].bitmask);
 5626                 u64 last_val = irdma_stat_val(last_gather_stats->val,
 5627                                               map[i].byteoff, map[i].bitoff,
 5628                                               map[i].bitmask);
 5629 
 5630                 stats_val[i] += irdma_stat_delta(new_val, last_val,
 5631                                                  map[i].bitmask);
 5632         }
 5633 
 5634         irdma_memcpy(last_gather_stats, gather_stats,
 5635                      sizeof(*last_gather_stats));
 5636 }

Cache object: 9ae31bd419e27cc59ea38a293336ed3d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.