The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_roce.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2018-2019 Cavium, Inc.
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * File : ecore_roce.c
   30  */
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include "bcm_osal.h"
   35 #include "ecore.h"
   36 #include "ecore_status.h"
   37 #include "ecore_sp_commands.h"
   38 #include "ecore_cxt.h"
   39 #include "ecore_rdma.h"
   40 #include "reg_addr.h"
   41 #include "ecore_rt_defs.h"
   42 #include "ecore_init_ops.h"
   43 #include "ecore_hw.h"
   44 #include "ecore_mcp.h"
   45 #include "ecore_init_fw_funcs.h"
   46 #include "ecore_int.h"
   47 #include "pcics_reg_driver.h"
   48 #include "ecore_iro.h"
   49 #include "ecore_gtt_reg_addr.h"
   50 #ifndef LINUX_REMOVE
   51 #include "ecore_tcp_ip.h"
   52 #endif
   53 
   54 #ifdef _NTDDK_
   55 #pragma warning(push)
   56 #pragma warning(disable : 28167)
   57 #pragma warning(disable : 28123)
   58 #pragma warning(disable : 28182)
   59 #pragma warning(disable : 6011)
   60 #endif
   61 
   62 static void ecore_roce_free_icid(struct ecore_hwfn *p_hwfn, u16 icid);
   63 
   64 static enum _ecore_status_t
   65 ecore_roce_async_event(struct ecore_hwfn *p_hwfn,
   66                        u8 fw_event_code,
   67                        u16 OSAL_UNUSED echo,
   68                        union event_ring_data *data,
   69                        u8 OSAL_UNUSED fw_return_code)
   70 {
   71         if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
   72                 u16 icid = (u16)OSAL_LE32_TO_CPU(
   73                                 data->rdma_data.rdma_destroy_qp_data.cid);
   74 
   75                 /* icid release in this async event can occur only if the icid
   76                  * was offloaded to the FW. In case it wasn't offloaded this is
   77                  * handled in ecore_roce_sp_destroy_qp.
   78                  */
   79                 ecore_roce_free_icid(p_hwfn, icid);
   80         } else
   81                 p_hwfn->p_rdma_info->events.affiliated_event(
   82                         p_hwfn->p_rdma_info->events.context,
   83                         fw_event_code,
   84                         (void *)&data->rdma_data.async_handle);
   85 
   86         return ECORE_SUCCESS;
   87 }
   88 
   89 #ifdef CONFIG_DCQCN
   90 static enum _ecore_status_t ecore_roce_start_rl(
   91         struct ecore_hwfn *p_hwfn,
   92         struct ecore_roce_dcqcn_params *dcqcn_params)
   93 {
   94         struct ecore_rl_update_params params;
   95 
   96         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
   97         OSAL_MEMSET(&params, 0, sizeof(params));
   98 
   99         params.rl_id_first = (u8)RESC_START(p_hwfn, ECORE_RL);
  100         params.rl_id_last = RESC_START(p_hwfn, ECORE_RL) +
  101                 ecore_init_qm_get_num_pf_rls(p_hwfn);
  102         params.dcqcn_update_param_flg = 1;
  103         params.rl_init_flg = 1;
  104         params.rl_start_flg = 1;
  105         params.rl_stop_flg = 0;
  106         params.rl_dc_qcn_flg = 1;
  107 
  108         params.rl_bc_rate = dcqcn_params->rl_bc_rate;
  109         params.rl_max_rate = dcqcn_params->rl_max_rate;
  110         params.rl_r_ai = dcqcn_params->rl_r_ai;
  111         params.rl_r_hai = dcqcn_params->rl_r_hai;
  112         params.dcqcn_gd = dcqcn_params->dcqcn_gd;
  113         params.dcqcn_k_us = dcqcn_params->dcqcn_k_us;
  114         params.dcqcn_timeuot_us = dcqcn_params->dcqcn_timeout_us;
  115 
  116         return ecore_sp_rl_update(p_hwfn, &params);
  117 }
  118 
  119 enum _ecore_status_t ecore_roce_stop_rl(struct ecore_hwfn *p_hwfn)
  120 {
  121         struct ecore_rl_update_params params;
  122 
  123         if (!p_hwfn->p_rdma_info->roce.dcqcn_reaction_point)
  124                 return ECORE_SUCCESS;
  125 
  126         OSAL_MEMSET(&params, 0, sizeof(params));
  127         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
  128 
  129         params.rl_id_first = (u8)RESC_START(p_hwfn, ECORE_RL);
  130         params.rl_id_last = RESC_START(p_hwfn, ECORE_RL) +
  131                 ecore_init_qm_get_num_pf_rls(p_hwfn);
  132         params.rl_stop_flg = 1;
  133 
  134         return ecore_sp_rl_update(p_hwfn, &params);
  135 }
  136 
  137 #define NIG_REG_ROCE_DUPLICATE_TO_HOST_BTH 2
  138 #define NIG_REG_ROCE_DUPLICATE_TO_HOST_ECN 1
  139 
  140 enum _ecore_status_t ecore_roce_dcqcn_cfg(
  141         struct ecore_hwfn                       *p_hwfn,
  142         struct ecore_roce_dcqcn_params          *params,
  143         struct roce_init_func_ramrod_data       *p_ramrod,
  144         struct ecore_ptt                        *p_ptt)
  145 {
  146         u32 val = 0;
  147         enum _ecore_status_t rc = ECORE_SUCCESS;
  148 
  149         if (!p_hwfn->pf_params.rdma_pf_params.enable_dcqcn ||
  150             p_hwfn->p_rdma_info->proto == PROTOCOLID_IWARP)
  151                 return rc;
  152 
  153         p_hwfn->p_rdma_info->roce.dcqcn_enabled = 0;
  154         if (params->notification_point) {
  155                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  156                            "Configuring dcqcn notification point: timeout = 0x%x\n",
  157                            params->cnp_send_timeout);
  158                 p_ramrod->roce.cnp_send_timeout = params->cnp_send_timeout;
  159                 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 1;
  160                 /* Configure NIG to duplicate to host and storm when:
  161                  *  - (ECN == 2'b11 (notification point)
  162                  */
  163                 val |= 1 << NIG_REG_ROCE_DUPLICATE_TO_HOST_ECN;
  164         }
  165 
  166         if (params->reaction_point) {
  167                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  168                            "Configuring dcqcn reaction point\n");
  169                 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 1;
  170                 p_hwfn->p_rdma_info->roce.dcqcn_reaction_point = 1;
  171                 /* Configure NIG to duplicate to host and storm when:
  172                  * - BTH opcode equals bth_hdr_flow_ctrl_opcode_2
  173                  * (reaction point)
  174                  */
  175                 val |= 1 << NIG_REG_ROCE_DUPLICATE_TO_HOST_BTH;
  176 
  177                 rc = ecore_roce_start_rl(p_hwfn, params);
  178         }
  179 
  180         if (rc)
  181                 return rc;
  182 
  183         p_ramrod->roce.cnp_dscp = params->cnp_dscp;
  184         p_ramrod->roce.cnp_vlan_priority = params->cnp_vlan_priority;
  185 
  186         ecore_wr(p_hwfn,
  187                  p_ptt,
  188                  NIG_REG_ROCE_DUPLICATE_TO_HOST,
  189                  val);
  190 
  191         return rc;
  192 }
  193 #endif
  194 
  195 enum _ecore_status_t ecore_roce_stop(struct ecore_hwfn *p_hwfn)
  196 {
  197         struct ecore_bmap *cid_map = &p_hwfn->p_rdma_info->cid_map;
  198         int wait_count = 0;
  199 
  200         /* when destroying a_RoCE QP the control is returned to the
  201          * user after the synchronous part. The asynchronous part may
  202          * take a little longer. We delay for a short while if an
  203          * asyn destroy QP is still expected. Beyond the added delay
  204          * we clear the bitmap anyway.
  205          */
  206         while (OSAL_BITMAP_WEIGHT(cid_map->bitmap, cid_map->max_count)) {
  207                 OSAL_MSLEEP(100);
  208                 if (wait_count++ > 20) {
  209                         DP_NOTICE(p_hwfn, false,
  210                                   "cid bitmap wait timed out\n");
  211                         break;
  212                 }
  213         }
  214 
  215         ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
  216 
  217         return ECORE_SUCCESS;
  218 }
  219 
  220 static void ecore_rdma_copy_gids(struct ecore_rdma_qp *qp, __le32 *src_gid,
  221                                  __le32 *dst_gid) {
  222         u32 i;
  223 
  224         if (qp->roce_mode == ROCE_V2_IPV4) {
  225                 /* The IPv4 addresses shall be aligned to the highest word.
  226                  * The lower words must be zero.
  227                  */
  228                 OSAL_MEMSET(src_gid, 0, sizeof(union ecore_gid));
  229                 OSAL_MEMSET(dst_gid, 0, sizeof(union ecore_gid));
  230                 src_gid[3] = OSAL_CPU_TO_LE32(qp->sgid.ipv4_addr);
  231                 dst_gid[3] = OSAL_CPU_TO_LE32(qp->dgid.ipv4_addr);
  232         } else {
  233                 /* RoCE, and RoCE v2 - IPv6: GIDs and IPv6 addresses coincide in
  234                  * location and size
  235                  */
  236                 for (i = 0; i < OSAL_ARRAY_SIZE(qp->sgid.dwords); i++) {
  237                         src_gid[i] = OSAL_CPU_TO_LE32(qp->sgid.dwords[i]);
  238                         dst_gid[i] = OSAL_CPU_TO_LE32(qp->dgid.dwords[i]);
  239                 }
  240         }
  241 }
  242 
  243 static enum roce_flavor ecore_roce_mode_to_flavor(enum roce_mode roce_mode)
  244 {
  245         enum roce_flavor flavor;
  246 
  247         switch (roce_mode) {
  248         case ROCE_V1:
  249                 flavor = PLAIN_ROCE;
  250                 break;
  251         case ROCE_V2_IPV4:
  252                 flavor = RROCE_IPV4;
  253                 break;
  254         case ROCE_V2_IPV6:
  255                 flavor = (enum roce_flavor)ROCE_V2_IPV6;
  256                 break;
  257         default:
  258                 flavor = (enum roce_flavor)MAX_ROCE_MODE;
  259                 break;
  260         }
  261         return flavor;
  262 }
  263 
  264 #if 0
  265 static void ecore_roce_free_cid_pair(struct ecore_hwfn *p_hwfn, u16 cid)
  266 {
  267         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  268         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, cid);
  269         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, cid + 1);
  270         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  271 }
  272 #endif
  273 
  274 static void ecore_roce_free_qp(struct ecore_hwfn *p_hwfn, u16 qp_idx)
  275 {
  276         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  277         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, qp_idx);
  278         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  279 }
  280 
  281 #define ECORE_ROCE_CREATE_QP_ATTEMPTS           (20)
  282 #define ECORE_ROCE_CREATE_QP_MSLEEP             (10)
  283 
  284 static enum _ecore_status_t ecore_roce_wait_free_cids(struct ecore_hwfn *p_hwfn, u32 qp_idx)
  285 {
  286         struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  287         bool cids_free = false;
  288         u32 icid, iter = 0;
  289         int req, resp;
  290 
  291         icid = ECORE_ROCE_QP_TO_ICID(qp_idx);
  292 
  293         /* Make sure that the cids that were used by the QP index are free.
  294          * This is necessary because the destroy flow returns to the user before
  295          * the device finishes clean up.
  296          * It can happen in the following flows:
  297          * (1) ib_destroy_qp followed by an ib_create_qp
  298          * (2) ib_modify_qp to RESET followed (not immediately), by an
  299          *     ib_modify_qp to RTR
  300          */
  301 
  302         do {
  303                 OSAL_SPIN_LOCK(&p_rdma_info->lock);
  304                 resp = ecore_bmap_test_id(p_hwfn, &p_rdma_info->cid_map, icid);
  305                 req = ecore_bmap_test_id(p_hwfn, &p_rdma_info->cid_map, icid + 1);
  306                 if (!resp && !req)
  307                         cids_free = true;
  308 
  309                 OSAL_SPIN_UNLOCK(&p_rdma_info->lock);
  310 
  311                 if (!cids_free) {
  312                         OSAL_MSLEEP(ECORE_ROCE_CREATE_QP_MSLEEP);
  313                         iter++;
  314                 }
  315         } while (!cids_free && iter < ECORE_ROCE_CREATE_QP_ATTEMPTS);
  316 
  317         if (!cids_free) {
  318                 DP_ERR(p_hwfn->p_dev,
  319                        "responder and/or requester CIDs are still in use. resp=%d, req=%d\n",
  320                        resp, req);
  321                 return ECORE_AGAIN;
  322         }
  323 
  324         return ECORE_SUCCESS;
  325 }
  326 
  327 enum _ecore_status_t ecore_roce_alloc_qp_idx(
  328                 struct ecore_hwfn *p_hwfn, u16 *qp_idx16)
  329 {
  330         struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  331         u32 start_cid, icid, cid, qp_idx;
  332         enum _ecore_status_t rc;
  333 
  334         OSAL_SPIN_LOCK(&p_rdma_info->lock);
  335         rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->qp_map, &qp_idx);
  336         if (rc != ECORE_SUCCESS) {
  337                 DP_NOTICE(p_hwfn, false, "failed to allocate qp\n");
  338                 OSAL_SPIN_UNLOCK(&p_rdma_info->lock);
  339                 return rc;
  340         }
  341 
  342         OSAL_SPIN_UNLOCK(&p_rdma_info->lock);
  343 
  344         /* Verify the cid bits that of this qp index are clear */
  345         rc = ecore_roce_wait_free_cids(p_hwfn, qp_idx);
  346         if (rc) {
  347                 rc = ECORE_UNKNOWN_ERROR;
  348                 goto err;
  349         }
  350 
  351         /* Allocate a DMA-able context for an ILT page, if not existing, for the
  352          * associated iids.
  353          * Note: If second allocation fails there's no need to free the first as
  354          *       it will be used in the future.
  355          */
  356         icid = ECORE_ROCE_QP_TO_ICID(qp_idx);
  357         start_cid = ecore_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
  358         cid = start_cid + icid;
  359 
  360         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, cid);
  361         if (rc != ECORE_SUCCESS)
  362                 goto err;
  363 
  364         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, cid + 1);
  365         if (rc != ECORE_SUCCESS)
  366                 goto err;
  367 
  368         /* qp index is under 2^16 */
  369         *qp_idx16 = (u16)qp_idx;
  370 
  371         return ECORE_SUCCESS;
  372 
  373 err:
  374         ecore_roce_free_qp(p_hwfn, (u16)qp_idx);
  375 
  376         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
  377 
  378         return rc;
  379 }
  380 
  381 static void ecore_roce_set_cid(struct ecore_hwfn *p_hwfn,
  382                              u32 cid)
  383 {
  384         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  385         ecore_bmap_set_id(p_hwfn,
  386                           &p_hwfn->p_rdma_info->cid_map,
  387                           cid);
  388         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  389 }
  390 
  391 static enum _ecore_status_t ecore_roce_sp_create_responder(
  392         struct ecore_hwfn    *p_hwfn,
  393         struct ecore_rdma_qp *qp)
  394 {
  395         struct roce_create_qp_resp_ramrod_data *p_ramrod;
  396         u16 regular_latency_queue, low_latency_queue;
  397         struct ecore_sp_init_data init_data;
  398         enum roce_flavor roce_flavor;
  399         struct ecore_spq_entry *p_ent;
  400         enum _ecore_status_t rc;
  401         u32 cid_start;
  402         u16 fw_srq_id;
  403         bool is_xrc;
  404 
  405         if (!qp->has_resp)
  406                 return ECORE_SUCCESS;
  407 
  408         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp_idx = %08x\n", qp->qp_idx);
  409 
  410         /* Allocate DMA-able memory for IRQ */
  411         qp->irq_num_pages = 1;
  412         qp->irq = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
  413                                           &qp->irq_phys_addr,
  414                                           RDMA_RING_PAGE_SIZE);
  415         if (!qp->irq) {
  416                 rc = ECORE_NOMEM;
  417                 DP_NOTICE(p_hwfn, false,
  418                           "ecore create responder failed: cannot allocate memory (irq). rc = %d\n",
  419                           rc);
  420                 return rc;
  421         }
  422 
  423         /* Get SPQ entry */
  424         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  425         init_data.cid = qp->icid;
  426         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  427         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  428 
  429         rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
  430                                    PROTOCOLID_ROCE, &init_data);
  431         if (rc != ECORE_SUCCESS)
  432                 goto err;
  433 
  434         p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
  435 
  436         p_ramrod->flags = 0;
  437 
  438         roce_flavor = ecore_roce_mode_to_flavor(qp->roce_mode);
  439         SET_FIELD(p_ramrod->flags,
  440                   ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
  441                   roce_flavor);
  442 
  443         SET_FIELD(p_ramrod->flags,
  444                   ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
  445                   qp->incoming_rdma_read_en);
  446 
  447         SET_FIELD(p_ramrod->flags,
  448                   ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
  449                   qp->incoming_rdma_write_en);
  450 
  451         SET_FIELD(p_ramrod->flags,
  452                   ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
  453                   qp->incoming_atomic_en);
  454 
  455         SET_FIELD(p_ramrod->flags,
  456                   ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
  457                   qp->e2e_flow_control_en);
  458 
  459         SET_FIELD(p_ramrod->flags,
  460                   ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG,
  461                   qp->use_srq);
  462 
  463         SET_FIELD(p_ramrod->flags,
  464                   ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
  465                   qp->fmr_and_reserved_lkey);
  466 
  467         SET_FIELD(p_ramrod->flags,
  468                   ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
  469                   ecore_rdma_is_xrc_qp(qp));
  470 
  471         /* TBD: future use only
  472          * #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK
  473          * #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT
  474          */
  475         SET_FIELD(p_ramrod->flags,
  476                   ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
  477                   qp->min_rnr_nak_timer);
  478 
  479         p_ramrod->max_ird =
  480                 qp->max_rd_atomic_resp;
  481         p_ramrod->traffic_class = qp->traffic_class_tos;
  482         p_ramrod->hop_limit = qp->hop_limit_ttl;
  483         p_ramrod->irq_num_pages = qp->irq_num_pages;
  484         p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
  485         p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
  486         p_ramrod->dst_qp_id = OSAL_CPU_TO_LE32(qp->dest_qp);
  487         p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
  488         p_ramrod->initial_psn = OSAL_CPU_TO_LE32(qp->rq_psn);
  489         p_ramrod->pd = OSAL_CPU_TO_LE16(qp->pd);
  490         p_ramrod->rq_num_pages = OSAL_CPU_TO_LE16(qp->rq_num_pages);
  491         DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
  492         DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
  493         ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  494         p_ramrod->qp_handle_for_async.hi =
  495                         OSAL_CPU_TO_LE32(qp->qp_handle_async.hi);
  496         p_ramrod->qp_handle_for_async.lo =
  497                         OSAL_CPU_TO_LE32(qp->qp_handle_async.lo);
  498         p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi);
  499         p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo);
  500         p_ramrod->cq_cid = OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
  501         p_ramrod->xrc_domain = OSAL_CPU_TO_LE16(qp->xrcd_id);
  502 
  503 #ifdef CONFIG_DCQCN
  504         /* when dcqcn is enabled physical queues are determined accoridng to qp id */
  505         if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
  506                 regular_latency_queue =
  507                         ecore_get_cm_pq_idx_rl(p_hwfn,
  508                                                (qp->icid >> 1) %
  509                                                         ROCE_DCQCN_RP_MAX_QPS);
  510         else
  511 #endif
  512                 regular_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  513         low_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
  514 
  515         p_ramrod->regular_latency_phy_queue = OSAL_CPU_TO_LE16(regular_latency_queue);
  516         p_ramrod->low_latency_phy_queue = OSAL_CPU_TO_LE16(low_latency_queue);
  517         p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi);
  518 
  519         ecore_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
  520         ecore_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
  521 
  522         p_ramrod->udp_src_port = qp->udp_src_port;
  523         p_ramrod->vlan_id = OSAL_CPU_TO_LE16(qp->vlan_id);
  524         is_xrc = ecore_rdma_is_xrc_qp(qp);
  525         fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, qp->srq_id, is_xrc);
  526         p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
  527         p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(p_hwfn->hw_info.opaque_fid);
  528 
  529         p_ramrod->stats_counter_id = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
  530                                      qp->stats_queue;
  531 
  532         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  533 
  534         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d regular physical queue = 0x%x, low latency physical queue 0x%x\n",
  535                    rc, regular_latency_queue, low_latency_queue);
  536 
  537         if (rc != ECORE_SUCCESS)
  538                 goto err;
  539 
  540         qp->resp_offloaded = true;
  541         qp->cq_prod.resp = 0;
  542 
  543         cid_start = ecore_cxt_get_proto_cid_start(p_hwfn,
  544                                                   p_hwfn->p_rdma_info->proto);
  545         ecore_roce_set_cid(p_hwfn, qp->icid - cid_start);
  546 
  547         return rc;
  548 
  549 err:
  550         DP_NOTICE(p_hwfn, false, "create responder - failed, rc = %d\n", rc);
  551         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
  552                                qp->irq,
  553                                qp->irq_phys_addr,
  554                                qp->irq_num_pages *
  555                                RDMA_RING_PAGE_SIZE);
  556 
  557         return rc;
  558 }
  559 
  560 static enum _ecore_status_t ecore_roce_sp_create_requester(
  561         struct ecore_hwfn *p_hwfn,
  562         struct ecore_rdma_qp *qp)
  563 {
  564         struct roce_create_qp_req_ramrod_data *p_ramrod;
  565         u16 regular_latency_queue, low_latency_queue;
  566         struct ecore_sp_init_data init_data;
  567         enum roce_flavor roce_flavor;
  568         struct ecore_spq_entry *p_ent;
  569         enum _ecore_status_t rc;
  570         u32 cid_start;
  571 
  572         if (!qp->has_req)
  573                 return ECORE_SUCCESS;
  574 
  575         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
  576 
  577         /* Allocate DMA-able memory for ORQ */
  578         qp->orq_num_pages = 1;
  579         qp->orq = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
  580                                           &qp->orq_phys_addr,
  581                                           RDMA_RING_PAGE_SIZE);
  582         if (!qp->orq)
  583         {
  584                 rc = ECORE_NOMEM;
  585                 DP_NOTICE(p_hwfn, false,
  586                           "ecore create requester failed: cannot allocate memory (orq). rc = %d\n",
  587                           rc);
  588                 return rc;
  589         }
  590 
  591         /* Get SPQ entry */
  592         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  593         init_data.cid = qp->icid + 1;
  594         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  595         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  596 
  597         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  598                                    ROCE_RAMROD_CREATE_QP,
  599                                    PROTOCOLID_ROCE, &init_data);
  600         if (rc != ECORE_SUCCESS)
  601                 goto err;
  602 
  603         p_ramrod = &p_ent->ramrod.roce_create_qp_req;
  604 
  605         p_ramrod->flags = 0;
  606 
  607         roce_flavor = ecore_roce_mode_to_flavor(qp->roce_mode);
  608         SET_FIELD(p_ramrod->flags,
  609                   ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
  610                   roce_flavor);
  611 
  612         SET_FIELD(p_ramrod->flags,
  613                   ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
  614                   qp->fmr_and_reserved_lkey);
  615 
  616         SET_FIELD(p_ramrod->flags,
  617                   ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
  618                   qp->signal_all);
  619 
  620         /* TBD:
  621          * future use only
  622          * #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK
  623          * #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT
  624          */
  625         SET_FIELD(p_ramrod->flags,
  626                   ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
  627                   qp->retry_cnt);
  628 
  629         SET_FIELD(p_ramrod->flags,
  630                   ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
  631                   qp->rnr_retry_cnt);
  632 
  633         SET_FIELD(p_ramrod->flags,
  634                   ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
  635                   ecore_rdma_is_xrc_qp(qp));
  636 
  637         p_ramrod->max_ord = qp->max_rd_atomic_req;
  638         p_ramrod->traffic_class = qp->traffic_class_tos;
  639         p_ramrod->hop_limit = qp->hop_limit_ttl;
  640         p_ramrod->orq_num_pages = qp->orq_num_pages;
  641         p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
  642         p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
  643         p_ramrod->dst_qp_id = OSAL_CPU_TO_LE32(qp->dest_qp);
  644         p_ramrod->ack_timeout_val = OSAL_CPU_TO_LE32(qp->ack_timeout);
  645         p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
  646         p_ramrod->initial_psn = OSAL_CPU_TO_LE32(qp->sq_psn);
  647         p_ramrod->pd = OSAL_CPU_TO_LE16(qp->pd);
  648         p_ramrod->sq_num_pages = OSAL_CPU_TO_LE16(qp->sq_num_pages);
  649         DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
  650         DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
  651         ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  652         p_ramrod->qp_handle_for_async.hi =
  653                         OSAL_CPU_TO_LE32(qp->qp_handle_async.hi);
  654         p_ramrod->qp_handle_for_async.lo =
  655                         OSAL_CPU_TO_LE32(qp->qp_handle_async.lo);
  656         p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi);
  657         p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo);
  658         p_ramrod->cq_cid = OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
  659                                        qp->sq_cq_id);
  660 
  661 #ifdef CONFIG_DCQCN
  662         /* when dcqcn is enabled physical queues are determined accoridng to qp id */
  663         if (p_hwfn->p_rdma_info->roce.dcqcn_enabled)
  664                 regular_latency_queue =
  665                         ecore_get_cm_pq_idx_rl(p_hwfn,
  666                                                (qp->icid >> 1) %
  667                                                         ROCE_DCQCN_RP_MAX_QPS);
  668         else
  669 #endif
  670                 regular_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  671         low_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
  672 
  673         p_ramrod->regular_latency_phy_queue = OSAL_CPU_TO_LE16(regular_latency_queue);
  674         p_ramrod->low_latency_phy_queue = OSAL_CPU_TO_LE16(low_latency_queue);
  675         p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi);
  676 
  677         ecore_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
  678         ecore_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
  679 
  680         p_ramrod->udp_src_port = qp->udp_src_port;
  681         p_ramrod->vlan_id = OSAL_CPU_TO_LE16(qp->vlan_id);
  682         p_ramrod->stats_counter_id = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
  683                                      qp->stats_queue;
  684 
  685         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  686 
  687         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
  688 
  689         if (rc != ECORE_SUCCESS)
  690                 goto err;
  691 
  692         qp->req_offloaded = true;
  693         qp->cq_prod.req = 0;
  694 
  695         cid_start = ecore_cxt_get_proto_cid_start(p_hwfn,
  696                                                   p_hwfn->p_rdma_info->proto);
  697         ecore_roce_set_cid(p_hwfn, qp->icid + 1 - cid_start);
  698 
  699         return rc;
  700 
  701 err:
  702         DP_NOTICE(p_hwfn, false, "Create requested - failed, rc = %d\n", rc);
  703         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
  704                                qp->orq,
  705                                qp->orq_phys_addr,
  706                                qp->orq_num_pages *
  707                                RDMA_RING_PAGE_SIZE);
  708         return rc;
  709 }
  710 
  711 static enum _ecore_status_t ecore_roce_sp_modify_responder(
  712         struct ecore_hwfn       *p_hwfn,
  713         struct ecore_rdma_qp    *qp,
  714         bool                    move_to_err,
  715         u32                     modify_flags)
  716 {
  717         struct roce_modify_qp_resp_ramrod_data  *p_ramrod;
  718         struct ecore_sp_init_data               init_data;
  719         struct ecore_spq_entry                  *p_ent;
  720         enum _ecore_status_t                    rc;
  721 
  722         if (!qp->has_resp)
  723                 return ECORE_SUCCESS;
  724 
  725         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
  726 
  727         if (move_to_err && !qp->resp_offloaded)
  728                 return ECORE_SUCCESS;
  729 
  730         /* Get SPQ entry */
  731         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  732         init_data.cid = qp->icid;
  733         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  734         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  735 
  736         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  737                                    ROCE_EVENT_MODIFY_QP,
  738                                    PROTOCOLID_ROCE, &init_data);
  739         if (rc != ECORE_SUCCESS)
  740         {
  741                 DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
  742                 return rc;
  743         }
  744 
  745         p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
  746 
  747         p_ramrod->flags = 0;
  748 
  749         SET_FIELD(p_ramrod->flags,
  750                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
  751                   move_to_err);
  752 
  753         SET_FIELD(p_ramrod->flags,
  754                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
  755                   qp->incoming_rdma_read_en);
  756 
  757         SET_FIELD(p_ramrod->flags,
  758                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
  759                   qp->incoming_rdma_write_en);
  760 
  761         SET_FIELD(p_ramrod->flags,
  762                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
  763                   qp->incoming_atomic_en);
  764 
  765         SET_FIELD(p_ramrod->flags,
  766                   ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
  767                   qp->e2e_flow_control_en);
  768 
  769         SET_FIELD(p_ramrod->flags,
  770                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
  771                   GET_FIELD(modify_flags,
  772                             ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
  773 
  774         SET_FIELD(p_ramrod->flags,
  775                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
  776                   GET_FIELD(modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY));
  777 
  778         SET_FIELD(p_ramrod->flags,
  779                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
  780                   GET_FIELD(modify_flags,
  781                             ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
  782 
  783         SET_FIELD(p_ramrod->flags,
  784                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
  785                   GET_FIELD(modify_flags,
  786                             ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
  787 
  788         /* TBD: future use only
  789          * #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK
  790          * #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT
  791          */
  792 
  793         SET_FIELD(p_ramrod->flags,
  794                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
  795                   GET_FIELD(modify_flags,
  796                             ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
  797 
  798         p_ramrod->fields = 0;
  799         SET_FIELD(p_ramrod->fields,
  800                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
  801                   qp->min_rnr_nak_timer);
  802 
  803         p_ramrod->max_ird = qp->max_rd_atomic_resp;
  804         p_ramrod->traffic_class = qp->traffic_class_tos;
  805         p_ramrod->hop_limit = qp->hop_limit_ttl;
  806         p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
  807         p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
  808         p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
  809         ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  810         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  811 
  812         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify responder, rc = %d\n", rc);
  813         return rc;
  814 }
  815 
  816 static enum _ecore_status_t ecore_roce_sp_modify_requester(
  817         struct ecore_hwfn       *p_hwfn,
  818         struct ecore_rdma_qp    *qp,
  819         bool                    move_to_sqd,
  820         bool                    move_to_err,
  821         u32                     modify_flags)
  822 {
  823         struct roce_modify_qp_req_ramrod_data   *p_ramrod;
  824         struct ecore_sp_init_data               init_data;
  825         struct ecore_spq_entry                  *p_ent;
  826         enum _ecore_status_t                    rc;
  827 
  828         if (!qp->has_req)
  829                 return ECORE_SUCCESS;
  830 
  831         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
  832 
  833         if (move_to_err && !(qp->req_offloaded))
  834                 return ECORE_SUCCESS;
  835 
  836         /* Get SPQ entry */
  837         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  838         init_data.cid = qp->icid + 1;
  839         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  840         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  841 
  842         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  843                                    ROCE_EVENT_MODIFY_QP,
  844                                    PROTOCOLID_ROCE, &init_data);
  845         if (rc != ECORE_SUCCESS) {
  846                 DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
  847                 return rc;
  848         }
  849 
  850         p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
  851 
  852         p_ramrod->flags = 0;
  853 
  854         SET_FIELD(p_ramrod->flags,
  855                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
  856                   move_to_err);
  857 
  858         SET_FIELD(p_ramrod->flags,
  859                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
  860                   move_to_sqd);
  861 
  862         SET_FIELD(p_ramrod->flags,
  863                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
  864                   qp->sqd_async);
  865 
  866         SET_FIELD(p_ramrod->flags,
  867                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
  868                   GET_FIELD(modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY));
  869 
  870         SET_FIELD(p_ramrod->flags,
  871                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
  872                   GET_FIELD(modify_flags,
  873                             ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
  874 
  875         SET_FIELD(p_ramrod->flags,
  876                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
  877                   GET_FIELD(modify_flags,
  878                             ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
  879 
  880         SET_FIELD(p_ramrod->flags,
  881                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
  882                   GET_FIELD(modify_flags,
  883                             ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
  884 
  885         SET_FIELD(p_ramrod->flags,
  886                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
  887                   GET_FIELD(modify_flags,
  888                             ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT));
  889 
  890         SET_FIELD(p_ramrod->flags,
  891                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
  892                   GET_FIELD(modify_flags,
  893                             ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
  894 
  895         /* TBD: future use only
  896          * #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK
  897          * #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT
  898          */
  899 
  900         p_ramrod->fields = 0;
  901         SET_FIELD(p_ramrod->fields,
  902                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
  903                   qp->retry_cnt);
  904 
  905         SET_FIELD(p_ramrod->fields,
  906                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
  907                   qp->rnr_retry_cnt);
  908 
  909         p_ramrod->max_ord = qp->max_rd_atomic_req;
  910         p_ramrod->traffic_class = qp->traffic_class_tos;
  911         p_ramrod->hop_limit = qp->hop_limit_ttl;
  912         p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey);
  913         p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label);
  914         p_ramrod->ack_timeout_val = OSAL_CPU_TO_LE32(qp->ack_timeout);
  915         p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu);
  916         ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  917         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  918 
  919         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify requester, rc = %d\n", rc);
  920         return rc;
  921 }
  922 
  923 static enum _ecore_status_t ecore_roce_sp_destroy_qp_responder(
  924         struct ecore_hwfn    *p_hwfn,
  925         struct ecore_rdma_qp *qp,
  926         u32                  *num_invalidated_mw,
  927         u32                  *cq_prod)
  928 {
  929         struct roce_destroy_qp_resp_output_params       *p_ramrod_res;
  930         struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
  931         struct ecore_sp_init_data               init_data;
  932         struct ecore_spq_entry                  *p_ent;
  933         dma_addr_t                              ramrod_res_phys;
  934         enum _ecore_status_t                    rc;
  935 
  936         if (!qp->has_resp) {
  937                 *num_invalidated_mw = 0;
  938                 *cq_prod = 0;
  939                 return ECORE_SUCCESS;
  940         }
  941 
  942         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
  943 
  944         *num_invalidated_mw = 0;
  945 
  946         if (!qp->resp_offloaded) {
  947                 *cq_prod = qp->cq_prod.resp;
  948                 return ECORE_SUCCESS;
  949         }
  950 
  951         /* Get SPQ entry */
  952         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  953         init_data.cid = qp->icid;
  954         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  955         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  956 
  957         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  958                                    ROCE_RAMROD_DESTROY_QP,
  959                                    PROTOCOLID_ROCE, &init_data);
  960         if (rc != ECORE_SUCCESS)
  961                 return rc;
  962 
  963         p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
  964 
  965         p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
  966                 &ramrod_res_phys, sizeof(*p_ramrod_res));
  967 
  968         if (!p_ramrod_res)
  969         {
  970                 rc = ECORE_NOMEM;
  971                 DP_NOTICE(p_hwfn, false,
  972                           "ecore destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
  973                           rc);
  974                 return rc;
  975         }
  976 
  977         DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
  978 
  979         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  980         if (rc != ECORE_SUCCESS)
  981                 goto err;
  982 
  983         *num_invalidated_mw
  984                 = OSAL_LE32_TO_CPU(p_ramrod_res->num_invalidated_mw);
  985         *cq_prod = OSAL_LE32_TO_CPU(p_ramrod_res->cq_prod);
  986         qp->cq_prod.resp = *cq_prod;
  987 
  988         /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
  989         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
  990                                qp->irq,
  991                                qp->irq_phys_addr,
  992                                qp->irq_num_pages *
  993                                RDMA_RING_PAGE_SIZE);
  994 
  995         qp->resp_offloaded = false;
  996 
  997         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
  998 
  999         /* "fall through" */
 1000 
 1001 err:
 1002         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
 1003                 sizeof(*p_ramrod_res));
 1004 
 1005         return rc;
 1006 }
 1007 
 1008 static enum _ecore_status_t ecore_roce_sp_destroy_qp_requester(
 1009         struct ecore_hwfn    *p_hwfn,
 1010         struct ecore_rdma_qp *qp,
 1011         u32                  *num_bound_mw,
 1012         u32                  *cq_prod)
 1013 {
 1014         struct roce_destroy_qp_req_output_params        *p_ramrod_res;
 1015         struct roce_destroy_qp_req_ramrod_data  *p_ramrod;
 1016         struct ecore_sp_init_data               init_data;
 1017         struct ecore_spq_entry                  *p_ent;
 1018         dma_addr_t                              ramrod_res_phys;
 1019         enum _ecore_status_t                    rc;
 1020 
 1021         if (!qp->has_req) {
 1022                 *num_bound_mw = 0;
 1023                 *cq_prod = 0;
 1024                 return ECORE_SUCCESS;
 1025         }
 1026 
 1027         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
 1028 
 1029         if (!qp->req_offloaded) {
 1030                 *cq_prod = qp->cq_prod.req;
 1031                 return ECORE_SUCCESS;
 1032         }
 1033 
 1034         p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
 1035                         OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
 1036                                 sizeof(*p_ramrod_res));
 1037         if (!p_ramrod_res)
 1038         {
 1039                 DP_NOTICE(p_hwfn, false,
 1040                           "ecore destroy requester failed: cannot allocate memory (ramrod)\n");
 1041                 return ECORE_NOMEM;
 1042         }
 1043 
 1044         /* Get SPQ entry */
 1045         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1046         init_data.cid = qp->icid + 1;
 1047         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1048         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1049 
 1050         rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
 1051                                    PROTOCOLID_ROCE, &init_data);
 1052         if (rc != ECORE_SUCCESS)
 1053                 goto err;
 1054 
 1055         p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
 1056         DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
 1057 
 1058         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1059         if (rc != ECORE_SUCCESS)
 1060                 goto err;
 1061 
 1062         *num_bound_mw = OSAL_LE32_TO_CPU(p_ramrod_res->num_bound_mw);
 1063         *cq_prod = OSAL_LE32_TO_CPU(p_ramrod_res->cq_prod);
 1064         qp->cq_prod.req = *cq_prod;
 1065 
 1066         /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
 1067         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
 1068                                qp->orq,
 1069                                qp->orq_phys_addr,
 1070                                qp->orq_num_pages *
 1071                                RDMA_RING_PAGE_SIZE);
 1072 
 1073         qp->req_offloaded = false;
 1074 
 1075         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
 1076 
 1077         /* "fall through" */
 1078 
 1079 err:
 1080         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
 1081                                sizeof(*p_ramrod_res));
 1082 
 1083         return rc;
 1084 }
 1085 
 1086 static OSAL_INLINE enum _ecore_status_t ecore_roce_sp_query_responder(
 1087         struct ecore_hwfn *p_hwfn,
 1088         struct ecore_rdma_qp *qp,
 1089         struct ecore_rdma_query_qp_out_params *out_params)
 1090 {
 1091         struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
 1092         struct roce_query_qp_resp_ramrod_data   *p_resp_ramrod;
 1093         struct ecore_sp_init_data               init_data;
 1094         dma_addr_t                              resp_ramrod_res_phys;
 1095         struct ecore_spq_entry                  *p_ent;
 1096         enum _ecore_status_t                    rc = ECORE_SUCCESS;
 1097         bool                                    error_flag;
 1098 
 1099         if (!qp->resp_offloaded) {
 1100                 /* Don't send query qp for the responder */
 1101                 out_params->rq_psn = qp->rq_psn;
 1102 
 1103                 return  ECORE_SUCCESS;
 1104         }
 1105 
 1106         /* Send a query responder ramrod to the FW */
 1107         p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
 1108                 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &resp_ramrod_res_phys,
 1109                                         sizeof(*p_resp_ramrod_res));
 1110         if (!p_resp_ramrod_res)
 1111         {
 1112                 DP_NOTICE(p_hwfn, false,
 1113                           "ecore query qp failed: cannot allocate memory (ramrod)\n");
 1114                 return ECORE_NOMEM;
 1115         }
 1116 
 1117         /* Get SPQ entry */
 1118         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1119         init_data.cid = qp->icid;
 1120         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1121         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1122         rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
 1123                                    PROTOCOLID_ROCE, &init_data);
 1124         if (rc != ECORE_SUCCESS)
 1125                 goto err;
 1126 
 1127         p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
 1128         DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
 1129 
 1130         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1131         if (rc != ECORE_SUCCESS)
 1132                 goto err;
 1133 
 1134         out_params->rq_psn = OSAL_LE32_TO_CPU(p_resp_ramrod_res->psn);
 1135         error_flag = GET_FIELD(
 1136                         OSAL_LE32_TO_CPU(p_resp_ramrod_res->err_flag),
 1137                         ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
 1138         if (error_flag)
 1139                 qp->cur_state = ECORE_ROCE_QP_STATE_ERR;
 1140 
 1141 err:
 1142         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_resp_ramrod_res,
 1143                                resp_ramrod_res_phys,
 1144                                sizeof(*p_resp_ramrod_res));
 1145 
 1146         return rc;
 1147 }
 1148 
 1149 static OSAL_INLINE enum _ecore_status_t ecore_roce_sp_query_requester(
 1150         struct ecore_hwfn *p_hwfn,
 1151         struct ecore_rdma_qp *qp,
 1152         struct ecore_rdma_query_qp_out_params *out_params,
 1153         bool *sq_draining)
 1154 {
 1155         struct roce_query_qp_req_output_params  *p_req_ramrod_res;
 1156         struct roce_query_qp_req_ramrod_data    *p_req_ramrod;
 1157         struct ecore_sp_init_data               init_data;
 1158         dma_addr_t                              req_ramrod_res_phys;
 1159         struct ecore_spq_entry                  *p_ent;
 1160         enum _ecore_status_t                    rc = ECORE_SUCCESS;
 1161         bool                                    error_flag;
 1162 
 1163         if (!qp->req_offloaded)
 1164         {
 1165                 /* Don't send query qp for the requester */
 1166                 out_params->sq_psn = qp->sq_psn;
 1167                 out_params->draining = false;
 1168 
 1169                 *sq_draining = 0;
 1170 
 1171                 return ECORE_SUCCESS;
 1172         }
 1173 
 1174         /* Send a query requester ramrod to the FW */
 1175         p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
 1176                 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &req_ramrod_res_phys,
 1177                                         sizeof(*p_req_ramrod_res));
 1178         if (!p_req_ramrod_res)
 1179         {
 1180                 DP_NOTICE(p_hwfn, false,
 1181                           "ecore query qp failed: cannot allocate memory (ramrod). rc = %d\n",
 1182                           rc);
 1183                 return ECORE_NOMEM;
 1184         }
 1185 
 1186         /* Get SPQ entry */
 1187         init_data.cid = qp->icid + 1;
 1188         rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
 1189                                    PROTOCOLID_ROCE, &init_data);
 1190         if (rc != ECORE_SUCCESS)
 1191                 goto err;
 1192 
 1193         p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
 1194         DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
 1195 
 1196         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1197         if (rc != ECORE_SUCCESS)
 1198                 goto err;
 1199 
 1200         out_params->sq_psn = OSAL_LE32_TO_CPU(p_req_ramrod_res->psn);
 1201         error_flag = GET_FIELD(OSAL_LE32_TO_CPU(p_req_ramrod_res->flags),
 1202                                ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
 1203         if (error_flag)
 1204                 qp->cur_state = ECORE_ROCE_QP_STATE_ERR;
 1205         else
 1206                 *sq_draining = GET_FIELD(
 1207                         OSAL_LE32_TO_CPU(p_req_ramrod_res->flags),
 1208                         ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
 1209 
 1210 err:
 1211         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_req_ramrod_res,
 1212                                req_ramrod_res_phys, sizeof(*p_req_ramrod_res));
 1213 
 1214         return rc;
 1215 }
 1216 
 1217 enum _ecore_status_t ecore_roce_query_qp(
 1218         struct ecore_hwfn *p_hwfn,
 1219         struct ecore_rdma_qp *qp,
 1220         struct ecore_rdma_query_qp_out_params *out_params)
 1221 {
 1222         enum _ecore_status_t    rc;
 1223 
 1224         rc = ecore_roce_sp_query_responder(p_hwfn, qp, out_params);
 1225         if (rc)
 1226                 return rc;
 1227 
 1228         rc = ecore_roce_sp_query_requester(p_hwfn, qp, out_params,
 1229                                            &out_params->draining);
 1230         if (rc)
 1231                 return rc;
 1232 
 1233         out_params->state = qp->cur_state;
 1234 
 1235         return ECORE_SUCCESS;
 1236 }
 1237 
 1238 enum _ecore_status_t ecore_roce_destroy_qp(struct ecore_hwfn *p_hwfn,
 1239                                            struct ecore_rdma_qp *qp,
 1240                                            struct ecore_rdma_destroy_qp_out_params *out_params)
 1241 {
 1242         u32 cq_prod_resp = qp->cq_prod.resp, cq_prod_req = qp->cq_prod.req;
 1243         u32 num_invalidated_mw = 0;
 1244         u32 num_bound_mw = 0;
 1245         enum _ecore_status_t rc;
 1246 
 1247         /* Destroys the specified QP
 1248          * Note: if qp state != RESET/ERR/INIT then upper driver first need to
 1249          * call modify qp to move the qp to ERR state
 1250          */
 1251         if ((qp->cur_state != ECORE_ROCE_QP_STATE_RESET) &&
 1252             (qp->cur_state != ECORE_ROCE_QP_STATE_ERR) &&
 1253             (qp->cur_state != ECORE_ROCE_QP_STATE_INIT))
 1254         {
 1255                 DP_NOTICE(p_hwfn,
 1256                           true,
 1257                    "QP must be in error, reset or init state before destroying it\n");
 1258                 return ECORE_INVAL;
 1259         }
 1260 
 1261         if (qp->cur_state != ECORE_ROCE_QP_STATE_RESET) {
 1262                 rc = ecore_roce_sp_destroy_qp_responder(p_hwfn,
 1263                                                         qp,
 1264                                                         &num_invalidated_mw,
 1265                                                         &cq_prod_resp);
 1266                 if (rc != ECORE_SUCCESS)
 1267                         return rc;
 1268 
 1269                 /* Send destroy requester ramrod */
 1270                 rc = ecore_roce_sp_destroy_qp_requester(p_hwfn, qp,
 1271                                                         &num_bound_mw,
 1272                                                         &cq_prod_req);
 1273                 if (rc != ECORE_SUCCESS)
 1274                         return rc;
 1275 
 1276                 /* resp_ofload was true, num_invalidated_mw is valid */
 1277                 if (num_invalidated_mw != num_bound_mw) {
 1278                         DP_NOTICE(p_hwfn,
 1279                                   true,
 1280                                   "number of invalidate memory windows is different from bounded ones\n");
 1281                         return ECORE_INVAL;
 1282                 }
 1283         }
 1284 
 1285         ecore_roce_free_qp(p_hwfn, qp->qp_idx);
 1286 
 1287         out_params->rq_cq_prod = cq_prod_resp;
 1288         out_params->sq_cq_prod = cq_prod_req;
 1289 
 1290         return ECORE_SUCCESS;
 1291 }
 1292 
 1293 enum _ecore_status_t ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid)
 1294 {
 1295         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1296         struct ecore_sp_init_data init_data;
 1297         struct ecore_spq_entry *p_ent;
 1298         enum _ecore_status_t rc;
 1299 
 1300         if (!rdma_cxt) {
 1301                 DP_ERR(p_hwfn->p_dev,
 1302                        "destroy ud qp failed due to NULL rdma_cxt\n");
 1303                 return ECORE_INVAL;
 1304         }
 1305 
 1306         /* Get SPQ entry */
 1307         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1308         init_data.cid = cid;
 1309         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1310         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1311         rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_UD_QP,
 1312                                    PROTOCOLID_ROCE, &init_data);
 1313         if (rc != ECORE_SUCCESS)
 1314                 goto err;
 1315 
 1316         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1317         if (rc != ECORE_SUCCESS)
 1318                 goto err;
 1319 
 1320         ecore_roce_free_qp(p_hwfn, ECORE_ROCE_ICID_TO_QP(cid));
 1321 
 1322         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "freed a ud qp with cid=%d\n", cid);
 1323 
 1324         return ECORE_SUCCESS;
 1325 
 1326 err:
 1327         DP_ERR(p_hwfn, "failed destroying a ud qp with cid=%d\n", cid);
 1328 
 1329         return rc;
 1330 }
 1331 
 1332 enum _ecore_status_t ecore_roce_create_ud_qp(void               *rdma_cxt,
 1333                         struct ecore_rdma_create_qp_out_params  *out_params)
 1334 {
 1335         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1336         struct ecore_sp_init_data init_data;
 1337         struct ecore_spq_entry *p_ent;
 1338         enum _ecore_status_t rc;
 1339         u16 icid, qp_idx;
 1340 
 1341         if (!rdma_cxt || !out_params) {
 1342                 DP_ERR(p_hwfn->p_dev,
 1343                        "ecore roce create ud qp failed due to NULL entry (rdma_cxt=%p, out=%p)\n",
 1344                        rdma_cxt, out_params);
 1345                 return ECORE_INVAL;
 1346         }
 1347 
 1348         rc = ecore_roce_alloc_qp_idx(p_hwfn, &qp_idx);
 1349         if (rc != ECORE_SUCCESS)
 1350                 goto err;
 1351 
 1352         icid = ECORE_ROCE_QP_TO_ICID(qp_idx);
 1353 
 1354         /* Get SPQ entry */
 1355         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1356         init_data.cid = icid;
 1357         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1358         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1359         rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_UD_QP,
 1360                                    PROTOCOLID_ROCE, &init_data);
 1361         if (rc != ECORE_SUCCESS)
 1362                 goto err1;
 1363 
 1364         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1365         if (rc != ECORE_SUCCESS)
 1366                 goto err1;
 1367 
 1368         out_params->icid = icid;
 1369         out_params->qp_id = ((0xFF << 16) | icid);
 1370 
 1371         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "created a ud qp with icid=%d\n",
 1372                    icid);
 1373 
 1374         return ECORE_SUCCESS;
 1375 
 1376 err1:
 1377         ecore_roce_free_qp(p_hwfn, qp_idx);
 1378 
 1379 err:
 1380         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "failed creating a ud qp\n");
 1381 
 1382         return rc;
 1383 }
 1384 
 1385 enum _ecore_status_t
 1386 ecore_roce_modify_qp(struct ecore_hwfn *p_hwfn,
 1387                      struct ecore_rdma_qp *qp,
 1388                      enum ecore_roce_qp_state prev_state,
 1389                      struct ecore_rdma_modify_qp_in_params *params)
 1390 {
 1391         u32 num_invalidated_mw = 0, num_bound_mw = 0;
 1392         enum _ecore_status_t rc = ECORE_SUCCESS;
 1393 
 1394         /* Perform additional operations according to the current state and the
 1395          * next state
 1396          */
 1397         if (((prev_state == ECORE_ROCE_QP_STATE_INIT) ||
 1398              (prev_state == ECORE_ROCE_QP_STATE_RESET)) &&
 1399             (qp->cur_state == ECORE_ROCE_QP_STATE_RTR))
 1400         {
 1401                 /* Init->RTR or Reset->RTR */
 1402 
 1403                 /* Verify the cid bits that of this qp index are clear */
 1404                 rc = ecore_roce_wait_free_cids(p_hwfn, qp->qp_idx);
 1405                 if (rc)
 1406                         return rc;
 1407 
 1408                 rc = ecore_roce_sp_create_responder(p_hwfn, qp);
 1409                 return rc;
 1410 
 1411         } else if ((prev_state == ECORE_ROCE_QP_STATE_RTR) &&
 1412                    (qp->cur_state == ECORE_ROCE_QP_STATE_RTS))
 1413         {
 1414                 /* RTR-> RTS */
 1415                 rc = ecore_roce_sp_create_requester(p_hwfn, qp);
 1416                 if (rc != ECORE_SUCCESS)
 1417                         return rc;
 1418 
 1419                 /* Send modify responder ramrod */
 1420                 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false,
 1421                                                     params->modify_flags);
 1422                 return rc;
 1423 
 1424         } else if ((prev_state == ECORE_ROCE_QP_STATE_RTS) &&
 1425                    (qp->cur_state == ECORE_ROCE_QP_STATE_RTS))
 1426         {
 1427                 /* RTS->RTS */
 1428                 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false,
 1429                                                     params->modify_flags);
 1430                 if (rc != ECORE_SUCCESS)
 1431                         return rc;
 1432 
 1433                 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, false,
 1434                                                     params->modify_flags);
 1435                 return rc;
 1436 
 1437         } else if ((prev_state == ECORE_ROCE_QP_STATE_RTS) &&
 1438                    (qp->cur_state == ECORE_ROCE_QP_STATE_SQD))
 1439         {
 1440                 /* RTS->SQD */
 1441                 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, true, false,
 1442                                                     params->modify_flags);
 1443                 return rc;
 1444 
 1445         } else if ((prev_state == ECORE_ROCE_QP_STATE_SQD) &&
 1446                    (qp->cur_state == ECORE_ROCE_QP_STATE_SQD))
 1447         {
 1448                 /* SQD->SQD */
 1449                 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false,
 1450                                                     params->modify_flags);
 1451                 if (rc != ECORE_SUCCESS)
 1452                         return rc;
 1453 
 1454                 rc = ecore_roce_sp_modify_requester(p_hwfn,  qp, false, false,
 1455                                                     params->modify_flags);
 1456                 return rc;
 1457 
 1458         } else if ((prev_state == ECORE_ROCE_QP_STATE_SQD) &&
 1459                  (qp->cur_state == ECORE_ROCE_QP_STATE_RTS))
 1460         {
 1461                 /* SQD->RTS */
 1462                 rc = ecore_roce_sp_modify_responder(p_hwfn,  qp, false,
 1463                                                     params->modify_flags);
 1464                 if (rc != ECORE_SUCCESS)
 1465                         return rc;
 1466 
 1467                 rc = ecore_roce_sp_modify_requester(p_hwfn,  qp, false, false,
 1468                                                     params->modify_flags);
 1469 
 1470                 return rc;
 1471         } else if (qp->cur_state == ECORE_ROCE_QP_STATE_ERR) {
 1472                 /* ->ERR */
 1473                 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, true,
 1474                                                     params->modify_flags);
 1475                 if (rc != ECORE_SUCCESS)
 1476                         return rc;
 1477 
 1478                 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, true,
 1479                                                     params->modify_flags);
 1480                 return rc;
 1481 
 1482         } else if (qp->cur_state == ECORE_ROCE_QP_STATE_RESET) {
 1483                 /* Any state -> RESET */
 1484 
 1485                 /* Send destroy responder ramrod */
 1486                 rc = ecore_roce_sp_destroy_qp_responder(p_hwfn, qp,
 1487                                                         &num_invalidated_mw,
 1488                                                         &qp->cq_prod.resp);
 1489 
 1490                 if (rc != ECORE_SUCCESS)
 1491                         return rc;
 1492 
 1493                 rc = ecore_roce_sp_destroy_qp_requester(p_hwfn, qp,
 1494                                                         &num_bound_mw,
 1495                                                         &qp->cq_prod.req);
 1496 
 1497                 if (rc != ECORE_SUCCESS)
 1498                         return rc;
 1499 
 1500                 if (num_invalidated_mw != num_bound_mw) {
 1501                         DP_NOTICE(p_hwfn,
 1502                                   true,
 1503                                   "number of invalidate memory windows is different from bounded ones\n");
 1504                         return ECORE_INVAL;
 1505                 }
 1506         } else {
 1507                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_SUCCESS\n");
 1508         }
 1509 
 1510         return rc;
 1511 }
 1512 
 1513 static void ecore_roce_free_icid(struct ecore_hwfn *p_hwfn, u16 icid)
 1514 {
 1515         struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
 1516         u32 start_cid, cid;
 1517 
 1518         start_cid = ecore_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
 1519         cid = icid - start_cid;
 1520 
 1521         OSAL_SPIN_LOCK(&p_rdma_info->lock);
 1522 
 1523         ecore_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
 1524 
 1525         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1526 }
 1527 
 1528 static void ecore_rdma_dpm_conf(struct ecore_hwfn *p_hwfn,
 1529                                 struct ecore_ptt *p_ptt)
 1530 {
 1531         u32 val;
 1532 
 1533         val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
 1534 
 1535         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
 1536         DP_VERBOSE(p_hwfn, (ECORE_MSG_DCB | ECORE_MSG_RDMA),
 1537                    "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
 1538                    val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
 1539 }
 1540 
 1541 /* This function disables EDPM due to DCBx considerations */
 1542 void ecore_roce_dpm_dcbx(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 1543 {
 1544         u8 val;
 1545 
 1546         /* if any QPs are already active, we want to disable DPM, since their
 1547          * context information contains information from before the latest DCBx
 1548          * update. Otherwise enable it.
 1549          */
 1550         val = (ecore_rdma_allocated_qps(p_hwfn)) ? true : false;
 1551         p_hwfn->dcbx_no_edpm = (u8)val;
 1552 
 1553         ecore_rdma_dpm_conf(p_hwfn, p_ptt);
 1554 }
 1555 
 1556 /* This function disables EDPM due to doorbell bar considerations */
 1557 void ecore_rdma_dpm_bar(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 1558 {
 1559         p_hwfn->db_bar_no_edpm = true;
 1560 
 1561         ecore_rdma_dpm_conf(p_hwfn, p_ptt);
 1562 }
 1563 
 1564 enum _ecore_status_t ecore_roce_setup(struct ecore_hwfn *p_hwfn)
 1565 {
 1566         return ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
 1567                                            ecore_roce_async_event);
 1568 }
 1569 
 1570 #ifdef _NTDDK_
 1571 #pragma warning(pop)
 1572 #endif

Cache object: 7612011f139424da88dc6fd0ad950375


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.