The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_rdma.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2018-2019 Cavium, Inc.
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * File : ecore_rdma.c
   30  */
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include "bcm_osal.h"
   35 #include "ecore.h"
   36 #include "ecore_status.h"
   37 #include "ecore_sp_commands.h"
   38 #include "ecore_cxt.h"
   39 #include "ecore_rdma.h"
   40 #include "reg_addr.h"
   41 #include "ecore_rt_defs.h"
   42 #include "ecore_init_ops.h"
   43 #include "ecore_hw.h"
   44 #include "ecore_mcp.h"
   45 #include "ecore_init_fw_funcs.h"
   46 #include "ecore_int.h"
   47 #include "pcics_reg_driver.h"
   48 #include "ecore_iro.h"
   49 #include "ecore_gtt_reg_addr.h"
   50 #include "ecore_hsi_iwarp.h"
   51 #include "ecore_ll2.h"
   52 #include "ecore_ooo.h"
   53 #ifndef LINUX_REMOVE
   54 #include "ecore_tcp_ip.h"
   55 #endif
   56 
   57 enum _ecore_status_t ecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
   58                                            struct ecore_bmap *bmap,
   59                                            u32              max_count,
   60                                            char              *name)
   61 {
   62         u32 size_in_bytes;
   63 
   64         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "max_count = %08x\n", max_count);
   65 
   66         bmap->max_count = max_count;
   67 
   68         if (!max_count) {
   69                 bmap->bitmap = OSAL_NULL;
   70                 return ECORE_SUCCESS;
   71         }
   72 
   73         size_in_bytes = sizeof(unsigned long) *
   74                 DIV_ROUND_UP(max_count, (sizeof(unsigned long) * 8));
   75 
   76         bmap->bitmap = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size_in_bytes);
   77         if (!bmap->bitmap)
   78         {
   79                 DP_NOTICE(p_hwfn, false,
   80                           "ecore bmap alloc failed: cannot allocate memory (bitmap). rc = %d\n",
   81                           ECORE_NOMEM);
   82                 return ECORE_NOMEM;
   83         }
   84 
   85         OSAL_SNPRINTF(bmap->name, QEDR_MAX_BMAP_NAME, "%s", name);
   86 
   87         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_SUCCESS\n");
   88         return ECORE_SUCCESS;
   89 }
   90 
   91 enum _ecore_status_t ecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
   92                                               struct ecore_bmap *bmap,
   93                                               u32              *id_num)
   94 {
   95         *id_num = OSAL_FIND_FIRST_ZERO_BIT(bmap->bitmap, bmap->max_count);
   96         if (*id_num >= bmap->max_count)
   97                 return ECORE_INVAL;
   98 
   99         OSAL_SET_BIT(*id_num, bmap->bitmap);
  100 
  101         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: allocated id %d\n",
  102                    bmap->name, *id_num);
  103 
  104         return ECORE_SUCCESS;
  105 }
  106 
  107 void ecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
  108                        struct ecore_bmap *bmap,
  109                        u32              id_num)
  110 {
  111         if (id_num >= bmap->max_count) {
  112                 DP_NOTICE(p_hwfn, true,
  113                           "%s bitmap: cannot set id %d max is %d\n",
  114                           bmap->name, id_num, bmap->max_count);
  115 
  116                 return;
  117         }
  118 
  119         OSAL_SET_BIT(id_num, bmap->bitmap);
  120 
  121         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: set id %d\n",
  122                    bmap->name, id_num);
  123 }
  124 
  125 void ecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
  126                            struct ecore_bmap *bmap,
  127                            u32              id_num)
  128 {
  129         bool b_acquired;
  130 
  131         if (id_num >= bmap->max_count)
  132                 return;
  133 
  134         b_acquired = OSAL_TEST_AND_CLEAR_BIT(id_num, bmap->bitmap);
  135         if (!b_acquired)
  136         {
  137                 DP_NOTICE(p_hwfn, false, "%s bitmap: id %d already released\n",
  138                           bmap->name, id_num);
  139                 return;
  140         }
  141 
  142         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: released id %d\n",
  143                    bmap->name, id_num);
  144 }
  145 
  146 int ecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
  147                        struct ecore_bmap *bmap,
  148                        u32                id_num)
  149 {
  150         if (id_num >= bmap->max_count) {
  151                 DP_NOTICE(p_hwfn, true,
  152                           "%s bitmap: id %d too high. max is %d\n",
  153                           bmap->name, id_num, bmap->max_count);
  154                 return -1;
  155         }
  156 
  157         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: tested id %d\n",
  158                    bmap->name, id_num);
  159 
  160         return OSAL_TEST_BIT(id_num, bmap->bitmap);
  161 }
  162 
  163 static bool ecore_bmap_is_empty(struct ecore_bmap *bmap)
  164 {
  165         return (bmap->max_count ==
  166                 OSAL_FIND_FIRST_BIT(bmap->bitmap, bmap->max_count));
  167 }
  168 
  169 #ifndef LINUX_REMOVE
  170 u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id)
  171 {
  172         /* first sb id for RoCE is after all the l2 sb */
  173         return FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE) + rel_sb_id;
  174 }
  175 
  176 u32 ecore_rdma_query_cau_timer_res(void)
  177 {
  178         return ECORE_CAU_DEF_RX_TIMER_RES;
  179 }
  180 #endif
  181 
  182 enum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn    *p_hwfn)
  183 {
  184         struct ecore_rdma_info *p_rdma_info;
  185 
  186         p_rdma_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info));
  187         if (!p_rdma_info) {
  188                 DP_NOTICE(p_hwfn, false,
  189                           "ecore rdma alloc failed: cannot allocate memory (rdma info).\n");
  190                 return ECORE_NOMEM;
  191         }
  192         p_hwfn->p_rdma_info = p_rdma_info;
  193 
  194 #ifdef CONFIG_ECORE_LOCK_ALLOC
  195         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_rdma_info->lock)) {
  196                 ecore_rdma_info_free(p_hwfn);
  197                 return ECORE_NOMEM;
  198         }
  199 #endif
  200         OSAL_SPIN_LOCK_INIT(&p_rdma_info->lock);
  201 
  202         return ECORE_SUCCESS;
  203 }
  204 
  205 void ecore_rdma_info_free(struct ecore_hwfn *p_hwfn)
  206 {
  207 #ifdef CONFIG_ECORE_LOCK_ALLOC
  208         OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_rdma_info->lock);
  209 #endif
  210         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info);
  211         p_hwfn->p_rdma_info = OSAL_NULL;
  212 }
  213 
  214 static enum _ecore_status_t ecore_rdma_inc_ref_cnt(struct ecore_hwfn *p_hwfn)
  215 {
  216         enum _ecore_status_t rc = ECORE_INVAL;
  217 
  218         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  219         if (p_hwfn->p_rdma_info->active) {
  220                 p_hwfn->p_rdma_info->ref_cnt++;
  221                 rc = ECORE_SUCCESS;
  222         } else {
  223                 DP_INFO(p_hwfn, "Ref cnt requested for inactive rdma\n");
  224         }
  225         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  226         return rc;
  227 }
  228 
  229 static void ecore_rdma_dec_ref_cnt(struct ecore_hwfn *p_hwfn)
  230 {
  231         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  232         p_hwfn->p_rdma_info->ref_cnt--;
  233         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  234 }
  235 
  236 static void ecore_rdma_activate(struct ecore_hwfn *p_hwfn)
  237 {
  238         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  239         p_hwfn->p_rdma_info->active = true;
  240         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  241 }
  242 
  243 /* Part of deactivating rdma is letting all the relevant flows complete before
  244  * we start shutting down: Currently query-stats which can be called from MCP
  245  * context.
  246  */
  247 /* The longest time it can take a rdma flow to complete */
  248 #define ECORE_RDMA_MAX_FLOW_TIME (100)
  249 static enum _ecore_status_t ecore_rdma_deactivate(struct ecore_hwfn *p_hwfn)
  250 {
  251         int wait_count;
  252 
  253         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  254         p_hwfn->p_rdma_info->active = false;
  255         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  256 
  257         /* We'll give each flow it's time to complete... */
  258         wait_count = p_hwfn->p_rdma_info->ref_cnt;
  259 
  260         while (p_hwfn->p_rdma_info->ref_cnt) {
  261                 OSAL_MSLEEP(ECORE_RDMA_MAX_FLOW_TIME);
  262                 if (--wait_count == 0) {
  263                         DP_NOTICE(p_hwfn, false,
  264                                   "Timeout on refcnt=%d\n",
  265                                   p_hwfn->p_rdma_info->ref_cnt);
  266                         return ECORE_TIMEOUT;
  267                 }
  268         }
  269         return ECORE_SUCCESS;
  270 }
  271 
  272 static enum _ecore_status_t ecore_rdma_alloc(struct ecore_hwfn *p_hwfn)
  273 {
  274         struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  275         u32 num_cons, num_tasks;
  276         enum _ecore_status_t rc;
  277 
  278         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocating RDMA\n");
  279 
  280         if (!p_rdma_info)
  281                 return ECORE_INVAL;
  282 
  283         if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_IWARP)
  284                 p_rdma_info->proto = PROTOCOLID_IWARP;
  285         else
  286                 p_rdma_info->proto = PROTOCOLID_ROCE;
  287 
  288         num_cons = ecore_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
  289                                                  OSAL_NULL);
  290 
  291         if (IS_IWARP(p_hwfn))
  292                 p_rdma_info->num_qps = num_cons;
  293         else
  294                 p_rdma_info->num_qps = num_cons / 2;
  295 
  296         /* INTERNAL: RoCE & iWARP use the same taskid */
  297         num_tasks = ecore_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
  298 
  299         /* Each MR uses a single task */
  300         p_rdma_info->num_mrs = num_tasks;
  301 
  302         /* Queue zone lines are shared between RoCE and L2 in such a way that
  303          * they can be used by each without obstructing the other.
  304          */
  305         p_rdma_info->queue_zone_base = (u16) RESC_START(p_hwfn, ECORE_L2_QUEUE);
  306         p_rdma_info->max_queue_zones = (u16) RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
  307 
  308         /* Allocate a struct with device params and fill it */
  309         p_rdma_info->dev = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->dev));
  310         if (!p_rdma_info->dev)
  311         {
  312                 rc = ECORE_NOMEM;
  313                 DP_NOTICE(p_hwfn, false,
  314                           "ecore rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
  315                           rc);
  316                 return rc;
  317         }
  318 
  319         /* Allocate a struct with port params and fill it */
  320         p_rdma_info->port = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->port));
  321         if (!p_rdma_info->port)
  322         {
  323                 DP_NOTICE(p_hwfn, false,
  324                           "ecore rdma alloc failed: cannot allocate memory (rdma info port)\n");
  325                 return ECORE_NOMEM;
  326         }
  327 
  328         /* Allocate bit map for pd's */
  329         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
  330                                    "PD");
  331         if (rc != ECORE_SUCCESS)
  332         {
  333                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  334                            "Failed to allocate pd_map,rc = %d\n",
  335                            rc);
  336                 return rc;
  337         }
  338 
  339         /* Allocate bit map for XRC Domains */
  340         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
  341                                    ECORE_RDMA_MAX_XRCDS, "XRCD");
  342         if (rc != ECORE_SUCCESS)
  343         {
  344                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  345                            "Failed to allocate xrcd_map,rc = %d\n",
  346                            rc);
  347                 return rc;
  348         }
  349 
  350         /* Allocate DPI bitmap */
  351         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
  352                                    p_hwfn->dpi_count, "DPI");
  353         if (rc != ECORE_SUCCESS)
  354         {
  355                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  356                            "Failed to allocate DPI bitmap, rc = %d\n", rc);
  357                 return rc;
  358         }
  359 
  360         /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
  361          * twice the number of QPs.
  362          */
  363         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
  364                                    num_cons, "CQ");
  365         if (rc != ECORE_SUCCESS)
  366         {
  367                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  368                            "Failed to allocate cq bitmap, rc = %d\n", rc);
  369                 return rc;
  370         }
  371 
  372         /* Allocate bitmap for toggle bit for cq icids
  373          * We toggle the bit every time we create or resize cq for a given icid.
  374          * The maximum number of CQs is bounded to the number of connections we
  375          * support. (num_qps in iWARP or num_qps/2 in RoCE).
  376          */
  377         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
  378                                    num_cons, "Toggle");
  379         if (rc != ECORE_SUCCESS)
  380         {
  381                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  382                            "Failed to allocate toogle bits, rc = %d\n", rc);
  383                 return rc;
  384         }
  385 
  386         /* Allocate bitmap for itids */
  387         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
  388                                    p_rdma_info->num_mrs, "MR");
  389         if (rc != ECORE_SUCCESS)
  390         {
  391                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  392                            "Failed to allocate itids bitmaps, rc = %d\n", rc);
  393                 return rc;
  394         }
  395 
  396         /* Allocate bitmap for qps. */
  397         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->qp_map,
  398                                    p_rdma_info->num_qps, "QP");
  399         if (rc != ECORE_SUCCESS)
  400         {
  401                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  402                            "Failed to allocate qp bitmap, rc = %d\n", rc);
  403                 return rc;
  404         }
  405 
  406         /* Allocate bitmap for cids used for responders/requesters. */
  407         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
  408                                    "REAL CID");
  409         if (rc != ECORE_SUCCESS)
  410         {
  411                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  412                            "Failed to allocate cid bitmap, rc = %d\n", rc);
  413                 return rc;
  414         }
  415 
  416         /* The first SRQ follows the last XRC SRQ. This means that the
  417          * SRQ IDs start from an offset equals to max_xrc_srqs.
  418          */
  419         p_rdma_info->srq_id_offset = (u16)ecore_cxt_get_xrc_srq_count(p_hwfn);
  420         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrc_srq_map,
  421                                    p_rdma_info->srq_id_offset, "XRC SRQ");
  422         if (rc != ECORE_SUCCESS) {
  423                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  424                            "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
  425                 return rc;
  426         }
  427 
  428         /* Allocate bitmap for srqs */
  429         p_rdma_info->num_srqs = ecore_cxt_get_srq_count(p_hwfn);
  430         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
  431                                    p_rdma_info->num_srqs,
  432                                    "SRQ");
  433         if (rc != ECORE_SUCCESS) {
  434                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
  435                            "Failed to allocate srq bitmap, rc = %d\n", rc);
  436 
  437                 return rc;
  438         }
  439 
  440         if (IS_IWARP(p_hwfn))
  441                 rc = ecore_iwarp_alloc(p_hwfn);
  442 
  443         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
  444 
  445         return rc;
  446 }
  447 
  448 void ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
  449                           struct ecore_bmap *bmap,
  450                           bool check)
  451 {
  452         int weight, line, item, last_line, last_item;
  453         u64 *pmap;
  454 
  455         if (!bmap || !bmap->bitmap)
  456                 return;
  457 
  458         if (!check)
  459                 goto end;
  460 
  461         weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
  462         if (!weight)
  463                 goto end;
  464 
  465         DP_NOTICE(p_hwfn, false,
  466                   "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
  467                   bmap->name, bmap->max_count, weight);
  468 
  469         pmap = (u64 *)bmap->bitmap;
  470         last_line = bmap->max_count / (64*8);
  471         last_item = last_line * 8 + (((bmap->max_count % (64*8)) + 63) / 64);
  472 
  473         /* print aligned non-zero lines, if any */
  474         for (item = 0, line = 0; line < last_line; line++, item += 8) {
  475                 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item], 64*8))
  476                         DP_NOTICE(p_hwfn, false,
  477                                   "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
  478                                   line, (unsigned long long)pmap[item],
  479                                 (unsigned long long)pmap[item+1],
  480                                 (unsigned long long)pmap[item+2],
  481                                   (unsigned long long)pmap[item+3],
  482                                 (unsigned long long)pmap[item+4],
  483                                 (unsigned long long)pmap[item+5],
  484                                   (unsigned long long)pmap[item+6],
  485                                 (unsigned long long)pmap[item+7]);
  486         }
  487 
  488         /* print last unaligned non-zero line, if any */
  489         if ((bmap->max_count % (64*8)) &&
  490             (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item],
  491                                 bmap->max_count-item*64))) {
  492                 u8 str_last_line[200] = { 0 };
  493                 int  offset;
  494 
  495                 offset = OSAL_SPRINTF(str_last_line, "line 0x%04x: ", line);
  496                 for (; item < last_item; item++) {
  497                         offset += OSAL_SPRINTF(str_last_line+offset,
  498                                                "0x%016llx ",
  499                                 (unsigned long long)pmap[item]);
  500                 }
  501                 DP_NOTICE(p_hwfn, false, "%s\n", str_last_line);
  502         }
  503 
  504 end:
  505         OSAL_FREE(p_hwfn->p_dev, bmap->bitmap);
  506         bmap->bitmap = OSAL_NULL;
  507 }
  508 
  509 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn)
  510 {
  511         if (IS_IWARP(p_hwfn))
  512                 ecore_iwarp_resc_free(p_hwfn);
  513 
  514         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
  515         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->qp_map, 1);
  516         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
  517         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
  518         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
  519         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
  520         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
  521         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
  522         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
  523         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
  524 
  525         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->port);
  526         p_hwfn->p_rdma_info->port = OSAL_NULL;
  527 
  528         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->dev);
  529         p_hwfn->p_rdma_info->dev = OSAL_NULL;
  530 }
  531 
  532 static OSAL_INLINE void ecore_rdma_free_reserved_lkey(struct ecore_hwfn *p_hwfn)
  533 {
  534         ecore_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
  535 }
  536 
  537 static void ecore_rdma_free_ilt(struct ecore_hwfn *p_hwfn)
  538 {
  539         /* Free Connection CXT */
  540         ecore_cxt_free_ilt_range(
  541                 p_hwfn, ECORE_ELEM_CXT,
  542                 ecore_cxt_get_proto_cid_start(p_hwfn,
  543                                               p_hwfn->p_rdma_info->proto),
  544                 ecore_cxt_get_proto_cid_count(p_hwfn,
  545                                               p_hwfn->p_rdma_info->proto,
  546                                               OSAL_NULL));
  547 
  548         /* Free Task CXT ( Intentionally RoCE as task-id is shared between
  549          * RoCE and iWARP
  550          */
  551         ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
  552                                  ecore_cxt_get_proto_tid_count(
  553                                          p_hwfn, PROTOCOLID_ROCE));
  554 
  555         /* Free TSDM CXT */
  556         ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
  557                                  ecore_cxt_get_srq_count(p_hwfn));
  558 }
  559 
  560 static void ecore_rdma_free(struct ecore_hwfn *p_hwfn)
  561 {
  562         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
  563 
  564         ecore_rdma_free_reserved_lkey(p_hwfn);
  565 
  566         ecore_rdma_resc_free(p_hwfn);
  567 
  568         ecore_rdma_free_ilt(p_hwfn);
  569 }
  570 
  571 static void ecore_rdma_get_guid(struct ecore_hwfn *p_hwfn, u8 *guid)
  572 {
  573         u8 mac_addr[6];
  574 
  575         OSAL_MEMCPY(&mac_addr[0], &p_hwfn->hw_info.hw_mac_addr[0], ETH_ALEN);
  576         guid[0] = mac_addr[0] ^ 2;
  577         guid[1] = mac_addr[1];
  578         guid[2] = mac_addr[2];
  579         guid[3] = 0xff;
  580         guid[4] = 0xfe;
  581         guid[5] = mac_addr[3];
  582         guid[6] = mac_addr[4];
  583         guid[7] = mac_addr[5];
  584 }
  585 
  586 static void ecore_rdma_init_events(
  587         struct ecore_hwfn *p_hwfn,
  588         struct ecore_rdma_start_in_params *params)
  589 {
  590         struct ecore_rdma_events *events;
  591 
  592         events = &p_hwfn->p_rdma_info->events;
  593 
  594         events->unaffiliated_event = params->events->unaffiliated_event;
  595         events->affiliated_event = params->events->affiliated_event;
  596         events->context = params->events->context;
  597 }
  598 
  599 static void ecore_rdma_init_devinfo(
  600         struct ecore_hwfn *p_hwfn,
  601         struct ecore_rdma_start_in_params *params)
  602 {
  603         struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  604         u32 pci_status_control;
  605 
  606         /* Vendor specific information */
  607         dev->vendor_id = p_hwfn->p_dev->vendor_id;
  608         dev->vendor_part_id = p_hwfn->p_dev->device_id;
  609         dev->hw_ver = 0;
  610         dev->fw_ver = STORM_FW_VERSION;
  611 
  612         ecore_rdma_get_guid(p_hwfn, (u8 *)(&dev->sys_image_guid));
  613         dev->node_guid = dev->sys_image_guid;
  614 
  615         dev->max_sge = OSAL_MIN_T(u32, RDMA_MAX_SGE_PER_SQ_WQE,
  616                                   RDMA_MAX_SGE_PER_RQ_WQE);
  617 
  618         if (p_hwfn->p_dev->rdma_max_sge) {
  619                 dev->max_sge = OSAL_MIN_T(u32,
  620                                      p_hwfn->p_dev->rdma_max_sge,
  621                                      dev->max_sge);
  622         }
  623 
  624         /* Set these values according to configuration
  625          * MAX SGE for SRQ is not defined by FW for now
  626          * define it in driver.
  627          * TODO: Get this value from FW.
  628          */
  629         dev->max_srq_sge = ECORE_RDMA_MAX_SGE_PER_SRQ_WQE;
  630         if (p_hwfn->p_dev->rdma_max_srq_sge) {
  631                 dev->max_srq_sge = OSAL_MIN_T(u32,
  632                                      p_hwfn->p_dev->rdma_max_srq_sge,
  633                                      dev->max_srq_sge);
  634         }
  635 
  636         dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
  637         dev->max_inline = (p_hwfn->p_dev->rdma_max_inline) ?
  638                 OSAL_MIN_T(u32,
  639                            p_hwfn->p_dev->rdma_max_inline,
  640                            dev->max_inline) :
  641                         dev->max_inline;
  642 
  643         dev->max_wqe = ECORE_RDMA_MAX_WQE;
  644         dev->max_cnq = (u8)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ);
  645 
  646         /* The number of QPs may be higher than ECORE_ROCE_MAX_QPS. because
  647          * it is up-aligned to 16 and then to ILT page size within ecore cxt.
  648          * This is OK in terms of ILT but we don't want to configure the FW
  649          * above its abilities
  650          */
  651         dev->max_qp = OSAL_MIN_T(u64, ROCE_MAX_QPS,
  652                              p_hwfn->p_rdma_info->num_qps);
  653 
  654         /* CQs uses the same icids that QPs use hence they are limited by the
  655          * number of icids. There are two icids per QP.
  656          */
  657         dev->max_cq = dev->max_qp * 2;
  658 
  659         /* The number of mrs is smaller by 1 since the first is reserved */
  660         dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
  661         dev->max_mr_size = ECORE_RDMA_MAX_MR_SIZE;
  662         /* The maximum CQE capacity per CQ supported */
  663         /* max number of cqes will be in two layer pbl,
  664          * 8 is the pointer size in bytes
  665          * 32 is the size of cq element in bytes
  666          */
  667         if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_32_BITS)
  668                 dev->max_cqe = ECORE_RDMA_MAX_CQE_32_BIT;
  669         else
  670                 dev->max_cqe = ECORE_RDMA_MAX_CQE_16_BIT;
  671 
  672         dev->max_mw = 0;
  673         dev->max_fmr = ECORE_RDMA_MAX_FMR;
  674         dev->max_mr_mw_fmr_pbl = (OSAL_PAGE_SIZE/8) * (OSAL_PAGE_SIZE/8);
  675         dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * OSAL_PAGE_SIZE;
  676         dev->max_pkey = ECORE_RDMA_MAX_P_KEY;
  677         /* Right now we dont take any parameters from user
  678          * So assign predefined max_srq to num_srqs.
  679          */
  680         dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
  681 
  682         /* SRQ WQE size */
  683         dev->max_srq_wr = ECORE_RDMA_MAX_SRQ_WQE_ELEM;
  684 
  685         dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
  686                                           (RDMA_RESP_RD_ATOMIC_ELM_SIZE*2);
  687         dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
  688                                          RDMA_REQ_RD_ATOMIC_ELM_SIZE;
  689 
  690         dev->max_dev_resp_rd_atomic_resc =
  691                 dev->max_qp_resp_rd_atomic_resc * p_hwfn->p_rdma_info->num_qps;
  692         dev->page_size_caps = ECORE_RDMA_PAGE_SIZE_CAPS;
  693         dev->dev_ack_delay = ECORE_RDMA_ACK_DELAY;
  694         dev->max_pd = RDMA_MAX_PDS;
  695         dev->max_ah = dev->max_qp;
  696         dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, ECORE_RDMA_STATS_QUEUE);
  697 
  698         /* Set capablities */
  699         dev->dev_caps = 0;
  700         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RNR_NAK, 1);
  701         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
  702         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
  703         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RESIZE_CQ, 1);
  704         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
  705         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
  706         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ZBVA, 1);
  707         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
  708 
  709         /* Check atomic operations support in PCI configuration space. */
  710         OSAL_PCI_READ_CONFIG_DWORD(p_hwfn->p_dev,
  711                                    PCICFG_DEVICE_STATUS_CONTROL_2,
  712                                    &pci_status_control);
  713 
  714         if (pci_status_control &
  715             PCICFG_DEVICE_STATUS_CONTROL_2_ATOMIC_REQ_ENABLE)
  716                 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ATOMIC_OP, 1);
  717 
  718         if (IS_IWARP(p_hwfn))
  719                 ecore_iwarp_init_devinfo(p_hwfn);
  720 }
  721 
  722 static void ecore_rdma_init_port(
  723         struct ecore_hwfn *p_hwfn)
  724 {
  725         struct ecore_rdma_port *port = p_hwfn->p_rdma_info->port;
  726         struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  727 
  728         port->port_state = p_hwfn->mcp_info->link_output.link_up ?
  729                 ECORE_RDMA_PORT_UP : ECORE_RDMA_PORT_DOWN;
  730 
  731         port->max_msg_size = OSAL_MIN_T(u64,
  732                                    (dev->max_mr_mw_fmr_size *
  733                                     p_hwfn->p_dev->rdma_max_sge),
  734                                         ((u64)1 << 31));
  735 
  736         port->pkey_bad_counter = 0;
  737 }
  738 
  739 static enum _ecore_status_t ecore_rdma_init_hw(
  740         struct ecore_hwfn *p_hwfn,
  741         struct ecore_ptt *p_ptt)
  742 {
  743         u32 ll2_ethertype_en;
  744 
  745         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW\n");
  746         p_hwfn->b_rdma_enabled_in_prs = false;
  747 
  748         if (IS_IWARP(p_hwfn))
  749                 return ecore_iwarp_init_hw(p_hwfn, p_ptt);
  750 
  751         ecore_wr(p_hwfn,
  752                  p_ptt,
  753                  PRS_REG_ROCE_DEST_QP_MAX_PF,
  754                  0);
  755 
  756         p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
  757 
  758         /* We delay writing to this reg until first cid is allocated. See
  759          * ecore_cxt_dynamic_ilt_alloc function for more details
  760          */
  761 
  762         ll2_ethertype_en = ecore_rd(p_hwfn,
  763                              p_ptt,
  764                              PRS_REG_LIGHT_L2_ETHERTYPE_EN);
  765         ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
  766                  (ll2_ethertype_en | 0x01));
  767 
  768 #ifndef REAL_ASIC_ONLY
  769         if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
  770                 ecore_wr(p_hwfn,
  771                          p_ptt,
  772                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
  773                          0);
  774                 ecore_wr(p_hwfn,
  775                          p_ptt,
  776                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 4,
  777                          0);
  778         }
  779 #endif
  780 
  781         if (ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2)
  782         {
  783                 DP_NOTICE(p_hwfn,
  784                           true,
  785                           "The first RoCE's cid should be even\n");
  786                 return ECORE_UNKNOWN_ERROR;
  787         }
  788 
  789         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW - Done\n");
  790         return ECORE_SUCCESS;
  791 }
  792 
  793 static enum _ecore_status_t
  794 ecore_rdma_start_fw(struct ecore_hwfn *p_hwfn,
  795 #ifdef CONFIG_DCQCN
  796                     struct ecore_ptt *p_ptt,
  797 #else
  798                     struct ecore_ptt OSAL_UNUSED *p_ptt,
  799 #endif
  800                     struct ecore_rdma_start_in_params *params)
  801 {
  802         struct rdma_init_func_ramrod_data *p_ramrod;
  803         struct rdma_init_func_hdr *pheader;
  804         struct ecore_rdma_info *p_rdma_info;
  805         struct ecore_sp_init_data init_data;
  806         struct ecore_spq_entry *p_ent;
  807         u16 igu_sb_id, sb_id;
  808         u8 ll2_queue_id;
  809         u32 cnq_id;
  810         enum _ecore_status_t rc;
  811 
  812         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Starting FW\n");
  813 
  814         p_rdma_info = p_hwfn->p_rdma_info;
  815 
  816         /* Save the number of cnqs for the function close ramrod */
  817         p_rdma_info->num_cnqs = params->desired_cnq;
  818 
  819         /* Get SPQ entry */
  820         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  821         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  822         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  823 
  824         rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
  825                                    p_rdma_info->proto, &init_data);
  826         if (rc != ECORE_SUCCESS)
  827                 return rc;
  828 
  829         if (IS_IWARP(p_hwfn)) {
  830                 ecore_iwarp_init_fw_ramrod(p_hwfn,
  831                                            &p_ent->ramrod.iwarp_init_func);
  832                 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
  833         } else {
  834 #ifdef CONFIG_DCQCN
  835                 rc = ecore_roce_dcqcn_cfg(p_hwfn, &params->roce.dcqcn_params,
  836                                           &p_ent->ramrod.roce_init_func, p_ptt);
  837                 if (rc != ECORE_SUCCESS) {
  838                         DP_NOTICE(p_hwfn, false,
  839                                   "Failed to configure DCQCN. rc = %d.\n", rc);
  840                         return rc;
  841                 }
  842 #endif
  843                 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
  844 
  845                 /* The ll2_queue_id is used only for UD QPs */
  846                 ll2_queue_id = ecore_ll2_handle_to_queue_id(
  847                         p_hwfn, params->roce.ll2_handle);
  848                 p_ent->ramrod.roce_init_func.roce.ll2_queue_id = ll2_queue_id;
  849         }
  850 
  851         pheader = &p_ramrod->params_header;
  852         pheader->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
  853         pheader->num_cnqs = params->desired_cnq;
  854 
  855         /* The first SRQ ILT page is used for XRC SRQs and all the following
  856          * pages contain regular SRQs. Hence the first regular SRQ ID is the
  857          * maximum number XRC SRQs.
  858          */
  859         pheader->first_reg_srq_id = p_rdma_info->srq_id_offset;
  860         pheader->reg_srq_base_addr =
  861                 ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
  862 
  863         if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_16_BITS)
  864                 pheader->cq_ring_mode = 1; /* 1=16 bits */
  865         else
  866                 pheader->cq_ring_mode = 0; /* 0=32 bits */
  867 
  868         for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++)
  869         {
  870                 sb_id = (u16)OSAL_GET_RDMA_SB_ID(p_hwfn, cnq_id);
  871                 igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
  872                 p_ramrod->cnq_params[cnq_id].sb_num =
  873                         OSAL_CPU_TO_LE16(igu_sb_id);
  874 
  875                 p_ramrod->cnq_params[cnq_id].sb_index =
  876                         p_hwfn->pf_params.rdma_pf_params.gl_pi;
  877 
  878                 p_ramrod->cnq_params[cnq_id].num_pbl_pages =
  879                         params->cnq_pbl_list[cnq_id].num_pbl_pages;
  880 
  881                 p_ramrod->cnq_params[cnq_id].pbl_base_addr.hi =
  882                         DMA_HI_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
  883                 p_ramrod->cnq_params[cnq_id].pbl_base_addr.lo =
  884                         DMA_LO_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
  885 
  886                 /* we arbitrarily decide that cnq_id will be as qz_offset */
  887                 p_ramrod->cnq_params[cnq_id].queue_zone_num =
  888                         OSAL_CPU_TO_LE16(p_rdma_info->queue_zone_base + cnq_id);
  889         }
  890 
  891         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  892 
  893         return rc;
  894 }
  895 
  896 enum _ecore_status_t ecore_rdma_alloc_tid(void  *rdma_cxt,
  897                                           u32   *itid)
  898 {
  899         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
  900         enum _ecore_status_t rc;
  901 
  902         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID\n");
  903 
  904         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
  905         rc = ecore_rdma_bmap_alloc_id(p_hwfn,
  906                                       &p_hwfn->p_rdma_info->tid_map,
  907                                       itid);
  908         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
  909         if (rc != ECORE_SUCCESS) {
  910                 DP_NOTICE(p_hwfn, false, "Failed in allocating tid\n");
  911                 goto out;
  912         }
  913 
  914         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_TASK, *itid);
  915 out:
  916         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
  917         return rc;
  918 }
  919 
  920 static OSAL_INLINE enum _ecore_status_t ecore_rdma_reserve_lkey(
  921                 struct ecore_hwfn *p_hwfn)
  922 {
  923         struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  924 
  925         /* Tid 0 will be used as the key for "reserved MR".
  926          * The driver should allocate memory for it so it can be loaded but no
  927          * ramrod should be passed on it.
  928          */
  929         ecore_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
  930         if (dev->reserved_lkey != RDMA_RESERVED_LKEY)
  931         {
  932                 DP_NOTICE(p_hwfn, true,
  933                           "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
  934                 return ECORE_INVAL;
  935         }
  936 
  937         return ECORE_SUCCESS;
  938 }
  939 
  940 static enum _ecore_status_t ecore_rdma_setup(struct ecore_hwfn    *p_hwfn,
  941                                 struct ecore_ptt                  *p_ptt,
  942                                 struct ecore_rdma_start_in_params *params)
  943 {
  944         enum _ecore_status_t rc = 0;
  945 
  946         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA setup\n");
  947 
  948         ecore_rdma_init_devinfo(p_hwfn, params);
  949         ecore_rdma_init_port(p_hwfn);
  950         ecore_rdma_init_events(p_hwfn, params);
  951 
  952         rc = ecore_rdma_reserve_lkey(p_hwfn);
  953         if (rc != ECORE_SUCCESS)
  954                 return rc;
  955 
  956         rc = ecore_rdma_init_hw(p_hwfn, p_ptt);
  957         if (rc != ECORE_SUCCESS)
  958                 return rc;
  959 
  960         if (IS_IWARP(p_hwfn)) {
  961                 rc = ecore_iwarp_setup(p_hwfn, params);
  962                 if (rc != ECORE_SUCCESS)
  963                         return rc;
  964         } else {
  965                 rc = ecore_roce_setup(p_hwfn);
  966                 if (rc != ECORE_SUCCESS)
  967                         return rc;
  968         }
  969 
  970         return ecore_rdma_start_fw(p_hwfn, p_ptt, params);
  971 }
  972 
  973 enum _ecore_status_t ecore_rdma_stop(void *rdma_cxt)
  974 {
  975         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
  976         struct rdma_close_func_ramrod_data *p_ramrod;
  977         struct ecore_sp_init_data init_data;
  978         struct ecore_spq_entry *p_ent;
  979         struct ecore_ptt *p_ptt;
  980         u32 ll2_ethertype_en;
  981         enum _ecore_status_t rc = ECORE_TIMEOUT;
  982 
  983         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop\n");
  984 
  985         rc = ecore_rdma_deactivate(p_hwfn);
  986         if (rc != ECORE_SUCCESS)
  987                 return rc;
  988 
  989         p_ptt = ecore_ptt_acquire(p_hwfn);
  990         if (!p_ptt) {
  991                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Failed to acquire PTT\n");
  992                 return rc;
  993         }
  994 
  995 #ifdef CONFIG_DCQCN
  996         ecore_roce_stop_rl(p_hwfn);
  997 #endif
  998 
  999         /* Disable RoCE search */
 1000         ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
 1001         p_hwfn->b_rdma_enabled_in_prs = false;
 1002 
 1003         ecore_wr(p_hwfn,
 1004                  p_ptt,
 1005                  PRS_REG_ROCE_DEST_QP_MAX_PF,
 1006                  0);
 1007 
 1008         ll2_ethertype_en = ecore_rd(p_hwfn,
 1009                                     p_ptt,
 1010                                     PRS_REG_LIGHT_L2_ETHERTYPE_EN);
 1011 
 1012         ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
 1013                  (ll2_ethertype_en & 0xFFFE));
 1014 
 1015 #ifndef REAL_ASIC_ONLY
 1016         /* INTERNAL: In CMT mode, re-initialize nig to direct packets to both
 1017          * enginesfor L2 performance, Roce requires all traffic to go just to
 1018          * engine 0.
 1019          */
 1020         if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
 1021                 DP_ERR(p_hwfn->p_dev,
 1022                        "On Everest 4 Big Bear Board revision A0 when RoCE driver is loaded L2 performance is sub-optimal (all traffic is routed to engine 0). For optimal L2 results either remove RoCE driver or use board revision B0\n");
 1023 
 1024                 ecore_wr(p_hwfn,
 1025                          p_ptt,
 1026                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
 1027                          0x55555555);
 1028                 ecore_wr(p_hwfn,
 1029                          p_ptt,
 1030                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
 1031                          0x55555555);
 1032         }
 1033 #endif
 1034 
 1035         if (IS_IWARP(p_hwfn)) {
 1036                 rc = ecore_iwarp_stop(p_hwfn);
 1037                 if (rc != ECORE_SUCCESS) {
 1038                         ecore_ptt_release(p_hwfn, p_ptt);
 1039                         return 0;
 1040                 }
 1041         } else {
 1042                 rc = ecore_roce_stop(p_hwfn);
 1043                 if (rc != ECORE_SUCCESS) {
 1044                         ecore_ptt_release(p_hwfn, p_ptt);
 1045                         return 0;
 1046                 }
 1047         }
 1048 
 1049         ecore_ptt_release(p_hwfn, p_ptt);
 1050 
 1051         /* Get SPQ entry */
 1052         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1053         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1054         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1055 
 1056         /* Stop RoCE */
 1057         rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
 1058                                    p_hwfn->p_rdma_info->proto, &init_data);
 1059         if (rc != ECORE_SUCCESS)
 1060                 goto out;
 1061 
 1062         p_ramrod = &p_ent->ramrod.rdma_close_func;
 1063 
 1064         p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
 1065         p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
 1066 
 1067         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1068 
 1069 out:
 1070         ecore_rdma_free(p_hwfn);
 1071 
 1072         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
 1073         return rc;
 1074 }
 1075 
 1076 enum _ecore_status_t ecore_rdma_add_user(void                 *rdma_cxt,
 1077                         struct ecore_rdma_add_user_out_params *out_params)
 1078 {
 1079         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1080         u32 dpi_start_offset;
 1081         u32 returned_id = 0;
 1082         enum _ecore_status_t rc;
 1083 
 1084         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding User\n");
 1085 
 1086         /* Allocate DPI */
 1087         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 1088         rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
 1089                                       &returned_id);
 1090         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1091 
 1092         if (rc != ECORE_SUCCESS)
 1093                 DP_NOTICE(p_hwfn, false, "Failed in allocating dpi\n");
 1094 
 1095         out_params->dpi = (u16)returned_id;
 1096 
 1097         /* Calculate the corresponding DPI address */
 1098         dpi_start_offset = p_hwfn->dpi_start_offset;
 1099 
 1100         out_params->dpi_addr = (u64)(osal_int_ptr_t)((u8 OSAL_IOMEM*)p_hwfn->doorbells +
 1101                                                      dpi_start_offset +
 1102                                                      ((out_params->dpi) * p_hwfn->dpi_size));
 1103 
 1104         out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset +
 1105                                     out_params->dpi * p_hwfn->dpi_size;
 1106 
 1107         out_params->dpi_size = p_hwfn->dpi_size;
 1108         out_params->wid_count = p_hwfn->wid_count;
 1109 
 1110         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
 1111         return rc;
 1112 }
 1113 
 1114 struct ecore_rdma_port *ecore_rdma_query_port(void      *rdma_cxt)
 1115 {
 1116         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1117         struct ecore_rdma_port *p_port = p_hwfn->p_rdma_info->port;
 1118         struct ecore_mcp_link_state *p_link_output;
 1119 
 1120         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA Query port\n");
 1121 
 1122         /* The link state is saved only for the leading hwfn */
 1123         p_link_output =
 1124                 &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
 1125 
 1126         /* Link may have changed... */
 1127         p_port->port_state = p_link_output->link_up ? ECORE_RDMA_PORT_UP
 1128                                                     : ECORE_RDMA_PORT_DOWN;
 1129 
 1130         p_port->link_speed = p_link_output->speed;
 1131 
 1132         p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
 1133 
 1134         return p_port;
 1135 }
 1136 
 1137 struct ecore_rdma_device *ecore_rdma_query_device(void  *rdma_cxt)
 1138 {
 1139         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1140 
 1141         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query device\n");
 1142 
 1143         /* Return struct with device parameters */
 1144         return p_hwfn->p_rdma_info->dev;
 1145 }
 1146 
 1147 void ecore_rdma_free_tid(void   *rdma_cxt,
 1148                          u32    itid)
 1149 {
 1150         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1151 
 1152         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", itid);
 1153 
 1154         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 1155         ecore_bmap_release_id(p_hwfn,
 1156                               &p_hwfn->p_rdma_info->tid_map,
 1157                               itid);
 1158         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1159 }
 1160 
 1161 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
 1162 {
 1163         struct ecore_hwfn *p_hwfn;
 1164         u16 qz_num;
 1165         u32 addr;
 1166 
 1167         p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1168 
 1169         if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
 1170                 DP_NOTICE(p_hwfn, false,
 1171                           "queue zone offset %d is too large (max is %d)\n",
 1172                           qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
 1173                 return;
 1174         }
 1175 
 1176         qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
 1177         addr = GTT_BAR0_MAP_REG_USDM_RAM +
 1178                USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
 1179 
 1180         REG_WR16(p_hwfn, addr, prod);
 1181 
 1182         /* keep prod updates ordered */
 1183         OSAL_WMB(p_hwfn->p_dev);
 1184 }
 1185 
 1186 enum _ecore_status_t ecore_rdma_alloc_pd(void   *rdma_cxt,
 1187                                          u16    *pd)
 1188 {
 1189         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1190         u32                  returned_id;
 1191         enum _ecore_status_t rc;
 1192 
 1193         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD\n");
 1194 
 1195         /* Allocates an unused protection domain */
 1196         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 1197         rc = ecore_rdma_bmap_alloc_id(p_hwfn,
 1198                                       &p_hwfn->p_rdma_info->pd_map,
 1199                                       &returned_id);
 1200         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1201         if (rc != ECORE_SUCCESS)
 1202                 DP_NOTICE(p_hwfn, false, "Failed in allocating pd id\n");
 1203 
 1204         *pd = (u16)returned_id;
 1205 
 1206         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
 1207         return rc;
 1208 }
 1209 
 1210 void ecore_rdma_free_pd(void    *rdma_cxt,
 1211                         u16     pd)
 1212 {
 1213         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1214 
 1215         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "pd = %08x\n", pd);
 1216 
 1217         /* Returns a previously allocated protection domain for reuse */
 1218         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 1219         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
 1220         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1221 }
 1222 
 1223 enum _ecore_status_t ecore_rdma_alloc_xrcd(void *rdma_cxt,
 1224                                            u16  *xrcd_id)
 1225 {
 1226         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1227         u32                  returned_id;
 1228         enum _ecore_status_t rc;
 1229 
 1230         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD\n");
 1231 
 1232         /* Allocates an unused XRC domain */
 1233         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 1234         rc = ecore_rdma_bmap_alloc_id(p_hwfn,
 1235                                       &p_hwfn->p_rdma_info->xrcd_map,
 1236                                       &returned_id);
 1237         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1238         if (rc != ECORE_SUCCESS)
 1239                 DP_NOTICE(p_hwfn, false, "Failed in allocating xrcd id\n");
 1240 
 1241         *xrcd_id = (u16)returned_id;
 1242 
 1243         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
 1244         return rc;
 1245 }
 1246 
 1247 void ecore_rdma_free_xrcd(void  *rdma_cxt,
 1248                           u16   xrcd_id)
 1249 {
 1250         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1251 
 1252         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
 1253 
 1254         /* Returns a previously allocated protection domain for reuse */
 1255         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 1256         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
 1257         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1258 }
 1259 
 1260 static enum ecore_rdma_toggle_bit
 1261 ecore_rdma_toggle_bit_create_resize_cq(struct ecore_hwfn *p_hwfn,
 1262                                        u16 icid)
 1263 {
 1264         struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
 1265         enum ecore_rdma_toggle_bit toggle_bit;
 1266         u32 bmap_id;
 1267 
 1268         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", icid);
 1269 
 1270         /* the function toggle the bit that is related to a given icid
 1271          * and returns the new toggle bit's value
 1272          */
 1273         bmap_id = icid - ecore_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
 1274 
 1275         OSAL_SPIN_LOCK(&p_info->lock);
 1276         toggle_bit = !OSAL_TEST_AND_FLIP_BIT(bmap_id, p_info->toggle_bits.bitmap);
 1277         OSAL_SPIN_UNLOCK(&p_info->lock);
 1278 
 1279         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_RDMA_TOGGLE_BIT_= %d\n",
 1280                    toggle_bit);
 1281 
 1282         return toggle_bit;
 1283 }
 1284 
 1285 enum _ecore_status_t ecore_rdma_create_cq(void                        *rdma_cxt,
 1286                                 struct ecore_rdma_create_cq_in_params *params,
 1287                                 u16                                   *icid)
 1288 {
 1289         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1290         struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
 1291         struct rdma_create_cq_ramrod_data       *p_ramrod;
 1292         enum ecore_rdma_toggle_bit              toggle_bit;
 1293         struct ecore_sp_init_data               init_data;
 1294         struct ecore_spq_entry                  *p_ent;
 1295         enum _ecore_status_t                    rc;
 1296         u32                                     returned_id;
 1297 
 1298         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cq_handle = %08x%08x\n",
 1299                    params->cq_handle_hi, params->cq_handle_lo);
 1300 
 1301         /* Allocate icid */
 1302         OSAL_SPIN_LOCK(&p_info->lock);
 1303         rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
 1304         OSAL_SPIN_UNLOCK(&p_info->lock);
 1305 
 1306         if (rc != ECORE_SUCCESS)
 1307         {
 1308                 DP_NOTICE(p_hwfn, false, "Can't create CQ, rc = %d\n", rc);
 1309                 return rc;
 1310         }
 1311 
 1312         *icid = (u16)(returned_id +
 1313                       ecore_cxt_get_proto_cid_start(
 1314                               p_hwfn, p_info->proto));
 1315 
 1316         /* Check if icid requires a page allocation */
 1317         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *icid);
 1318         if (rc != ECORE_SUCCESS)
 1319                 goto err;
 1320 
 1321         /* Get SPQ entry */
 1322         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1323         init_data.cid = *icid;
 1324         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1325         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1326 
 1327         /* Send create CQ ramrod */
 1328         rc = ecore_sp_init_request(p_hwfn, &p_ent,
 1329                                    RDMA_RAMROD_CREATE_CQ,
 1330                                    p_info->proto, &init_data);
 1331         if (rc != ECORE_SUCCESS)
 1332                 goto err;
 1333 
 1334         p_ramrod = &p_ent->ramrod.rdma_create_cq;
 1335 
 1336         p_ramrod->cq_handle.hi = OSAL_CPU_TO_LE32(params->cq_handle_hi);
 1337         p_ramrod->cq_handle.lo = OSAL_CPU_TO_LE32(params->cq_handle_lo);
 1338         p_ramrod->dpi = OSAL_CPU_TO_LE16(params->dpi);
 1339         p_ramrod->is_two_level_pbl = params->pbl_two_level;
 1340         p_ramrod->max_cqes = OSAL_CPU_TO_LE32(params->cq_size);
 1341         DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
 1342         p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(params->pbl_num_pages);
 1343         p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM)
 1344                         + params->cnq_id;
 1345         p_ramrod->int_timeout = params->int_timeout;
 1346         /* INTERNAL: Two layer PBL is currently not supported, ignoring next line */
 1347         /* INTERNAL: p_ramrod->pbl_log_page_size = params->pbl_page_size_log - 12; */
 1348 
 1349         /* toggle the bit for every resize or create cq for a given icid */
 1350         toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
 1351 
 1352         p_ramrod->toggle_bit = toggle_bit;
 1353 
 1354         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1355         if (rc != ECORE_SUCCESS) {
 1356                 /* restore toggle bit */
 1357                 ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
 1358                 goto err;
 1359         }
 1360 
 1361         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Created CQ, rc = %d\n", rc);
 1362         return rc;
 1363 
 1364 err:
 1365         /* release allocated icid */
 1366         OSAL_SPIN_LOCK(&p_info->lock);
 1367         ecore_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
 1368         OSAL_SPIN_UNLOCK(&p_info->lock);
 1369 
 1370         DP_NOTICE(p_hwfn, false, "Create CQ failed, rc = %d\n", rc);
 1371 
 1372         return rc;
 1373 }
 1374 
 1375 enum _ecore_status_t ecore_rdma_destroy_cq(void                 *rdma_cxt,
 1376                         struct ecore_rdma_destroy_cq_in_params  *in_params,
 1377                         struct ecore_rdma_destroy_cq_out_params *out_params)
 1378 {
 1379         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1380         struct rdma_destroy_cq_output_params *p_ramrod_res;
 1381         struct rdma_destroy_cq_ramrod_data      *p_ramrod;
 1382         struct ecore_sp_init_data               init_data;
 1383         struct ecore_spq_entry                  *p_ent;
 1384         dma_addr_t                              ramrod_res_phys;
 1385         enum _ecore_status_t                    rc = ECORE_NOMEM;
 1386 
 1387         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
 1388 
 1389         p_ramrod_res = (struct rdma_destroy_cq_output_params *)
 1390                         OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
 1391                                 sizeof(struct rdma_destroy_cq_output_params));
 1392         if (!p_ramrod_res)
 1393         {
 1394                 DP_NOTICE(p_hwfn, false,
 1395                           "ecore destroy cq failed: cannot allocate memory (ramrod)\n");
 1396                 return rc;
 1397         }
 1398 
 1399         /* Get SPQ entry */
 1400         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1401         init_data.cid =  in_params->icid;
 1402         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1403         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1404 
 1405         /* Send destroy CQ ramrod */
 1406         rc = ecore_sp_init_request(p_hwfn, &p_ent,
 1407                                    RDMA_RAMROD_DESTROY_CQ,
 1408                                    p_hwfn->p_rdma_info->proto, &init_data);
 1409         if (rc != ECORE_SUCCESS)
 1410                 goto err;
 1411 
 1412         p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
 1413         DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
 1414 
 1415         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 1416         if (rc != ECORE_SUCCESS)
 1417                 goto err;
 1418 
 1419         out_params->num_cq_notif =
 1420                 OSAL_LE16_TO_CPU(p_ramrod_res->cnq_num);
 1421 
 1422         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
 1423                                sizeof(struct rdma_destroy_cq_output_params));
 1424 
 1425         /* Free icid */
 1426         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 1427 
 1428         ecore_bmap_release_id(p_hwfn,
 1429                               &p_hwfn->p_rdma_info->cq_map,
 1430                 (in_params->icid - ecore_cxt_get_proto_cid_start(
 1431                         p_hwfn, p_hwfn->p_rdma_info->proto)));
 1432 
 1433         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 1434 
 1435         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
 1436         return rc;
 1437 
 1438 err:
 1439         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
 1440                                sizeof(struct rdma_destroy_cq_output_params));
 1441 
 1442         return rc;
 1443 }
 1444 
 1445 void ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac)
 1446 {
 1447         p_fw_mac[0] = OSAL_CPU_TO_LE16((p_ecore_mac[0] << 8) + p_ecore_mac[1]);
 1448         p_fw_mac[1] = OSAL_CPU_TO_LE16((p_ecore_mac[2] << 8) + p_ecore_mac[3]);
 1449         p_fw_mac[2] = OSAL_CPU_TO_LE16((p_ecore_mac[4] << 8) + p_ecore_mac[5]);
 1450 }
 1451 
 1452 enum _ecore_status_t ecore_rdma_query_qp(void                   *rdma_cxt,
 1453                         struct ecore_rdma_qp                    *qp,
 1454                         struct ecore_rdma_query_qp_out_params   *out_params)
 1455 
 1456 {
 1457         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1458         enum _ecore_status_t rc = ECORE_SUCCESS;
 1459 
 1460         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
 1461 
 1462         /* The following fields are filled in from qp and not FW as they can't
 1463          * be modified by FW
 1464          */
 1465         out_params->mtu = qp->mtu;
 1466         out_params->dest_qp = qp->dest_qp;
 1467         out_params->incoming_atomic_en = qp->incoming_atomic_en;
 1468         out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
 1469         out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
 1470         out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
 1471         out_params->dgid = qp->dgid;
 1472         out_params->flow_label = qp->flow_label;
 1473         out_params->hop_limit_ttl = qp->hop_limit_ttl;
 1474         out_params->traffic_class_tos = qp->traffic_class_tos;
 1475         out_params->timeout = qp->ack_timeout;
 1476         out_params->rnr_retry = qp->rnr_retry_cnt;
 1477         out_params->retry_cnt = qp->retry_cnt;
 1478         out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
 1479         out_params->pkey_index = 0;
 1480         out_params->max_rd_atomic = qp->max_rd_atomic_req;
 1481         out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
 1482         out_params->sqd_async = qp->sqd_async;
 1483 
 1484         if (IS_IWARP(p_hwfn))
 1485                 rc = ecore_iwarp_query_qp(qp, out_params);
 1486         else
 1487                 rc = ecore_roce_query_qp(p_hwfn, qp, out_params);
 1488 
 1489         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query QP, rc = %d\n", rc);
 1490         return rc;
 1491 }
 1492 
 1493 enum _ecore_status_t ecore_rdma_destroy_qp(void *rdma_cxt,
 1494                                            struct ecore_rdma_qp *qp,
 1495                                            struct ecore_rdma_destroy_qp_out_params *out_params)
 1496 {
 1497         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1498         enum _ecore_status_t rc = ECORE_SUCCESS;
 1499 
 1500         if (!rdma_cxt || !qp) {
 1501                 DP_ERR(p_hwfn,
 1502                        "ecore rdma destroy qp failed: invalid NULL input. rdma_cxt=%p, qp=%p\n",
 1503                        rdma_cxt, qp);
 1504                 return ECORE_INVAL;
 1505         }
 1506 
 1507         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)\n", qp->icid);
 1508 
 1509         if (IS_IWARP(p_hwfn))
 1510                 rc = ecore_iwarp_destroy_qp(p_hwfn, qp);
 1511         else
 1512                 rc = ecore_roce_destroy_qp(p_hwfn, qp, out_params);
 1513 
 1514         /* free qp params struct */
 1515         OSAL_FREE(p_hwfn->p_dev, qp);
 1516 
 1517         return rc;
 1518 }
 1519 
 1520 struct ecore_rdma_qp *ecore_rdma_create_qp(void                 *rdma_cxt,
 1521                         struct ecore_rdma_create_qp_in_params   *in_params,
 1522                         struct ecore_rdma_create_qp_out_params  *out_params)
 1523 {
 1524         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1525         struct ecore_rdma_qp *qp;
 1526         u8 max_stats_queues;
 1527         enum _ecore_status_t rc = 0;
 1528 
 1529         if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
 1530                 DP_ERR(p_hwfn->p_dev,
 1531                        "ecore roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
 1532                        rdma_cxt,
 1533                        in_params,
 1534                        out_params);
 1535                 return OSAL_NULL;
 1536         }
 1537 
 1538         /* Some sanity checks... */
 1539         max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
 1540         if (in_params->stats_queue >= max_stats_queues) {
 1541                 DP_ERR(p_hwfn->p_dev,
 1542                        "ecore rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
 1543                        in_params->stats_queue, max_stats_queues);
 1544                 return OSAL_NULL;
 1545         }
 1546 
 1547         if (IS_IWARP(p_hwfn)) {
 1548                 if (in_params->sq_num_pages*sizeof(struct regpair) >
 1549                     IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
 1550                         DP_NOTICE(p_hwfn->p_dev, true, "Sq num pages: %d exceeds maximum\n",
 1551                                   in_params->sq_num_pages);
 1552                         return OSAL_NULL;
 1553                 }
 1554                 if (in_params->rq_num_pages*sizeof(struct regpair) >
 1555                     IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
 1556                         DP_NOTICE(p_hwfn->p_dev, true,
 1557                                   "Rq num pages: %d exceeds maximum\n",
 1558                                   in_params->rq_num_pages);
 1559                         return OSAL_NULL;
 1560                 }
 1561         }
 1562 
 1563         qp = OSAL_ZALLOC(p_hwfn->p_dev,
 1564                          GFP_KERNEL,
 1565                          sizeof(struct ecore_rdma_qp));
 1566         if (!qp)
 1567         {
 1568                 DP_NOTICE(p_hwfn, false, "Failed to allocate ecore_rdma_qp\n");
 1569                 return OSAL_NULL;
 1570         }
 1571 
 1572         qp->cur_state = ECORE_ROCE_QP_STATE_RESET;
 1573 #ifdef CONFIG_ECORE_IWARP
 1574         qp->iwarp_state = ECORE_IWARP_QP_STATE_IDLE;
 1575 #endif
 1576         qp->qp_handle.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_hi);
 1577         qp->qp_handle.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_lo);
 1578         qp->qp_handle_async.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_async_hi);
 1579         qp->qp_handle_async.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_async_lo);
 1580         qp->use_srq = in_params->use_srq;
 1581         qp->signal_all = in_params->signal_all;
 1582         qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
 1583         qp->pd = in_params->pd;
 1584         qp->dpi = in_params->dpi;
 1585         qp->sq_cq_id = in_params->sq_cq_id;
 1586         qp->sq_num_pages = in_params->sq_num_pages;
 1587         qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
 1588         qp->rq_cq_id = in_params->rq_cq_id;
 1589         qp->rq_num_pages = in_params->rq_num_pages;
 1590         qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
 1591         qp->srq_id = in_params->srq_id;
 1592         qp->req_offloaded = false;
 1593         qp->resp_offloaded = false;
 1594         /* e2e_flow_control cannot be done in case of S-RQ.
 1595          * Refer to 9.7.7.2 End-to-End Flow Control section of IB spec
 1596          */
 1597         qp->e2e_flow_control_en = qp->use_srq ? false : true;
 1598         qp->stats_queue = in_params->stats_queue;
 1599         qp->qp_type = in_params->qp_type;
 1600         qp->xrcd_id = in_params->xrcd_id;
 1601 
 1602         if (IS_IWARP(p_hwfn)) {
 1603                 rc = ecore_iwarp_create_qp(p_hwfn, qp, out_params);
 1604                 qp->qpid = qp->icid;
 1605         } else {
 1606                 rc = ecore_roce_alloc_qp_idx(p_hwfn, &qp->qp_idx);
 1607                 qp->icid = ECORE_ROCE_QP_TO_ICID(qp->qp_idx);
 1608                 qp->qpid = ((0xFF << 16) | qp->icid);
 1609         }
 1610 
 1611         if (rc != ECORE_SUCCESS) {
 1612                 OSAL_FREE(p_hwfn->p_dev, qp);
 1613                 return OSAL_NULL;
 1614         }
 1615 
 1616         out_params->icid = qp->icid;
 1617         out_params->qp_id = qp->qpid;
 1618 
 1619         /* INTERNAL: max_sq_sges future use only*/
 1620 
 1621         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Create QP, rc = %d\n", rc);
 1622         return qp;
 1623 }
 1624 
 1625 #define ECORE_RDMA_ECN_SHIFT 0
 1626 #define ECORE_RDMA_ECN_MASK 0x3
 1627 #define ECORE_RDMA_DSCP_SHIFT 2
 1628 #define ECORE_RDMA_DSCP_MASK 0x3f
 1629 #define ECORE_RDMA_VLAN_PRIO_SHIFT 13
 1630 #define ECORE_RDMA_VLAN_PRIO_MASK 0x7
 1631 enum _ecore_status_t ecore_rdma_modify_qp(
 1632         void *rdma_cxt,
 1633         struct ecore_rdma_qp *qp,
 1634         struct ecore_rdma_modify_qp_in_params *params)
 1635 {
 1636         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1637         enum ecore_roce_qp_state prev_state;
 1638         enum _ecore_status_t     rc = ECORE_SUCCESS;
 1639 
 1640         if (GET_FIELD(params->modify_flags,
 1641                       ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN))
 1642         {
 1643                 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
 1644                 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
 1645                 qp->incoming_atomic_en = params->incoming_atomic_en;
 1646         }
 1647 
 1648         /* Update QP structure with the updated values */
 1649         if (GET_FIELD(params->modify_flags,
 1650                       ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE))
 1651         {
 1652                 qp->roce_mode = params->roce_mode;
 1653         }
 1654         if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY))
 1655         {
 1656                 qp->pkey = params->pkey;
 1657         }
 1658         if (GET_FIELD(params->modify_flags,
 1659                       ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
 1660         {
 1661                 qp->e2e_flow_control_en = params->e2e_flow_control_en;
 1662         }
 1663         if (GET_FIELD(params->modify_flags,
 1664                       ECORE_ROCE_MODIFY_QP_VALID_DEST_QP))
 1665         {
 1666                 qp->dest_qp = params->dest_qp;
 1667         }
 1668         if (GET_FIELD(params->modify_flags,
 1669                       ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR))
 1670         {
 1671                 /* Indicates that the following parameters have changed:
 1672                  * Traffic class, flow label, hop limit, source GID,
 1673                  * destination GID, loopback indicator
 1674                  */
 1675                 qp->flow_label = params->flow_label;
 1676                 qp->hop_limit_ttl = params->hop_limit_ttl;
 1677 
 1678                 qp->sgid = params->sgid;
 1679                 qp->dgid = params->dgid;
 1680                 qp->udp_src_port = params->udp_src_port;
 1681                 qp->vlan_id = params->vlan_id;
 1682                 qp->traffic_class_tos = params->traffic_class_tos;
 1683 
 1684                 /* apply global override values */
 1685                 if (p_hwfn->p_rdma_info->glob_cfg.vlan_pri_en)
 1686                         SET_FIELD(qp->vlan_id, ECORE_RDMA_VLAN_PRIO,
 1687                                   p_hwfn->p_rdma_info->glob_cfg.vlan_pri);
 1688 
 1689                 if (p_hwfn->p_rdma_info->glob_cfg.ecn_en)
 1690                         SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_ECN,
 1691                                   p_hwfn->p_rdma_info->glob_cfg.ecn);
 1692 
 1693                 if (p_hwfn->p_rdma_info->glob_cfg.dscp_en)
 1694                         SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_DSCP,
 1695                                   p_hwfn->p_rdma_info->glob_cfg.dscp);
 1696 
 1697                 qp->mtu = params->mtu;
 1698 
 1699                 OSAL_MEMCPY((u8 *)&qp->remote_mac_addr[0],
 1700                             (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
 1701                 if (params->use_local_mac) {
 1702                         OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
 1703                                     (u8 *)&params->local_mac_addr[0],
 1704                                     ETH_ALEN);
 1705                 } else {
 1706                         OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
 1707                                     (u8 *)&p_hwfn->hw_info.hw_mac_addr,
 1708                                     ETH_ALEN);
 1709                 }
 1710         }
 1711         if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN))
 1712         {
 1713                 qp->rq_psn = params->rq_psn;
 1714         }
 1715         if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN))
 1716         {
 1717                 qp->sq_psn = params->sq_psn;
 1718         }
 1719         if (GET_FIELD(params->modify_flags,
 1720                       ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
 1721         {
 1722                 qp->max_rd_atomic_req = params->max_rd_atomic_req;
 1723         }
 1724         if (GET_FIELD(params->modify_flags,
 1725                       ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
 1726         {
 1727                 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
 1728         }
 1729         if (GET_FIELD(params->modify_flags,
 1730                       ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
 1731         {
 1732                 qp->ack_timeout = params->ack_timeout;
 1733         }
 1734         if (GET_FIELD(params->modify_flags,
 1735                       ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT))
 1736         {
 1737                 qp->retry_cnt = params->retry_cnt;
 1738         }
 1739         if (GET_FIELD(params->modify_flags,
 1740                       ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
 1741         {
 1742                 qp->rnr_retry_cnt = params->rnr_retry_cnt;
 1743         }
 1744         if (GET_FIELD(params->modify_flags,
 1745                       ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
 1746         {
 1747                 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
 1748         }
 1749 
 1750         qp->sqd_async = params->sqd_async;
 1751 
 1752         prev_state = qp->cur_state;
 1753         if (GET_FIELD(params->modify_flags,
 1754                       ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE))
 1755         {
 1756                 qp->cur_state = params->new_state;
 1757                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp->cur_state=%d\n",
 1758                            qp->cur_state);
 1759         }
 1760 
 1761         if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI) {
 1762                 qp->has_req = 1;
 1763         } else if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT)
 1764         {
 1765                 qp->has_resp = 1;
 1766         } else {
 1767                 qp->has_req = 1;
 1768                 qp->has_resp = 1;
 1769         }
 1770 
 1771         if (IS_IWARP(p_hwfn)) {
 1772                 enum ecore_iwarp_qp_state new_state =
 1773                         ecore_roce2iwarp_state(qp->cur_state);
 1774 
 1775                 rc = ecore_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
 1776         } else {
 1777                 rc = ecore_roce_modify_qp(p_hwfn, qp, prev_state, params);
 1778         }
 1779 
 1780         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify QP, rc = %d\n", rc);
 1781         return rc;
 1782 }
 1783 
 1784 enum _ecore_status_t ecore_rdma_register_tid(void                *rdma_cxt,
 1785                         struct ecore_rdma_register_tid_in_params *params)
 1786 {
 1787         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1788         struct rdma_register_tid_ramrod_data *p_ramrod;
 1789         struct ecore_sp_init_data            init_data;
 1790         struct ecore_spq_entry               *p_ent;
 1791         enum rdma_tid_type                   tid_type;
 1792         u8                                   fw_return_code;
 1793         enum _ecore_status_t                 rc;
 1794 
 1795         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", params->itid);
 1796 
 1797         /* Get SPQ entry */
 1798         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1799         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1800         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1801 
 1802         rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
 1803                                    p_hwfn->p_rdma_info->proto, &init_data);
 1804         if (rc != ECORE_SUCCESS) {
 1805                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
 1806                 return rc;
 1807         }
 1808 
 1809         if (p_hwfn->p_rdma_info->last_tid < params->itid) {
 1810                 p_hwfn->p_rdma_info->last_tid = params->itid;
 1811         }
 1812 
 1813         p_ramrod = &p_ent->ramrod.rdma_register_tid;
 1814 
 1815         p_ramrod->flags = 0;
 1816         SET_FIELD(p_ramrod->flags,
 1817                   RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
 1818                   params->pbl_two_level);
 1819 
 1820         SET_FIELD(p_ramrod->flags,
 1821                   RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
 1822                   params->zbva);
 1823 
 1824         SET_FIELD(p_ramrod->flags,
 1825                   RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR,
 1826                   params->phy_mr);
 1827 
 1828         /* Don't initialize D/C field, as it may override other bits. */
 1829         if (!(params->tid_type == ECORE_RDMA_TID_FMR) &&
 1830             !(params->dma_mr))
 1831                 SET_FIELD(p_ramrod->flags,
 1832                           RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
 1833                           params->page_size_log - 12);
 1834 
 1835         SET_FIELD(p_ramrod->flags,
 1836                   RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
 1837                   params->remote_read);
 1838 
 1839         SET_FIELD(p_ramrod->flags,
 1840                   RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
 1841                   params->remote_write);
 1842 
 1843         SET_FIELD(p_ramrod->flags,
 1844                   RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
 1845                   params->remote_atomic);
 1846 
 1847         SET_FIELD(p_ramrod->flags,
 1848                   RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
 1849                   params->local_write);
 1850 
 1851         SET_FIELD(p_ramrod->flags,
 1852                   RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
 1853                   params->local_read);
 1854 
 1855         SET_FIELD(p_ramrod->flags,
 1856                   RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
 1857                   params->mw_bind);
 1858 
 1859         SET_FIELD(p_ramrod->flags1,
 1860                   RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
 1861                   params->pbl_page_size_log - 12);
 1862 
 1863         SET_FIELD(p_ramrod->flags2,
 1864                   RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
 1865                   params->dma_mr);
 1866 
 1867         switch (params->tid_type)
 1868         {
 1869         case ECORE_RDMA_TID_REGISTERED_MR:
 1870                 tid_type = RDMA_TID_REGISTERED_MR;
 1871                 break;
 1872         case ECORE_RDMA_TID_FMR:
 1873                 tid_type = RDMA_TID_FMR;
 1874                 break;
 1875         case ECORE_RDMA_TID_MW_TYPE1:
 1876                 tid_type = RDMA_TID_MW_TYPE1;
 1877                 break;
 1878         case ECORE_RDMA_TID_MW_TYPE2A:
 1879                 tid_type = RDMA_TID_MW_TYPE2A;
 1880                 break;
 1881         default:
 1882                 rc = ECORE_INVAL;
 1883                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
 1884                 return rc;
 1885         }
 1886         SET_FIELD(p_ramrod->flags1,
 1887                   RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
 1888                   tid_type);
 1889 
 1890         p_ramrod->itid = OSAL_CPU_TO_LE32(params->itid);
 1891         p_ramrod->key = params->key;
 1892         p_ramrod->pd = OSAL_CPU_TO_LE16(params->pd);
 1893         p_ramrod->length_hi = (u8)(params->length >> 32);
 1894         p_ramrod->length_lo = DMA_LO_LE(params->length);
 1895         if (params->zbva)
 1896         {
 1897                 /* Lower 32 bits of the registered MR address.
 1898                  * In case of zero based MR, will hold FBO
 1899                  */
 1900                 p_ramrod->va.hi = 0;
 1901                 p_ramrod->va.lo = OSAL_CPU_TO_LE32(params->fbo);
 1902         } else {
 1903                 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
 1904         }
 1905         DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
 1906 
 1907         /* DIF */
 1908         if (params->dif_enabled) {
 1909                 SET_FIELD(p_ramrod->flags2,
 1910                           RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
 1911                 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
 1912                                params->dif_error_addr);
 1913                 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
 1914         }
 1915 
 1916         rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
 1917         if (rc)
 1918                 return rc;
 1919 
 1920         if (fw_return_code != RDMA_RETURN_OK) {
 1921                 DP_NOTICE(p_hwfn, true, "fw_return_code = %d\n", fw_return_code);
 1922                 return ECORE_UNKNOWN_ERROR;
 1923         }
 1924 
 1925         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Register TID, rc = %d\n", rc);
 1926         return rc;
 1927 }
 1928 
 1929 static OSAL_INLINE int ecore_rdma_send_deregister_tid_ramrod(
 1930                 struct ecore_hwfn *p_hwfn,
 1931                 u32 itid,
 1932                 u8 *fw_return_code)
 1933 {
 1934         struct ecore_sp_init_data              init_data;
 1935         struct rdma_deregister_tid_ramrod_data *p_ramrod;
 1936         struct ecore_spq_entry                 *p_ent;
 1937         enum _ecore_status_t                   rc;
 1938 
 1939         /* Get SPQ entry */
 1940         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 1941         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 1942         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 1943 
 1944         rc = ecore_sp_init_request(p_hwfn, &p_ent,
 1945                                    RDMA_RAMROD_DEREGISTER_MR,
 1946                                    p_hwfn->p_rdma_info->proto, &init_data);
 1947         if (rc != ECORE_SUCCESS) {
 1948                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
 1949                 return rc;
 1950         }
 1951 
 1952         p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
 1953         p_ramrod->itid = OSAL_CPU_TO_LE32(itid);
 1954 
 1955         rc = ecore_spq_post(p_hwfn, p_ent, fw_return_code);
 1956         if (rc != ECORE_SUCCESS)
 1957         {
 1958                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
 1959                 return rc;
 1960         }
 1961 
 1962         return rc;
 1963 }
 1964 
 1965 #define ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC      (1)
 1966 
 1967 enum _ecore_status_t ecore_rdma_deregister_tid(void     *rdma_cxt,
 1968                                                u32      itid)
 1969 {
 1970         enum _ecore_status_t                   rc;
 1971         u8                                     fw_ret_code;
 1972         struct ecore_ptt                       *p_ptt;
 1973         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 1974 
 1975         /* First attempt */
 1976         rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
 1977         if (rc != ECORE_SUCCESS)
 1978                 return rc;
 1979 
 1980         if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
 1981                 goto done;
 1982 
 1983         /* Second attempt, after 1msec, if device still holds data.
 1984          * This can occur since 'destroy QP' returns to the caller rather fast.
 1985          * The synchronous part of it returns after freeing a few of the
 1986          * resources but not all of them, allowing the consumer to continue its
 1987          * flow. All of the resources will be freed after the asynchronous part
 1988          * of the destroy QP is complete.
 1989          */
 1990         OSAL_MSLEEP(ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC);
 1991         rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
 1992         if (rc != ECORE_SUCCESS)
 1993                 return rc;
 1994 
 1995         if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
 1996                 goto done;
 1997 
 1998         /* Third and last attempt, perform NIG drain and resend the ramrod */
 1999         p_ptt = ecore_ptt_acquire(p_hwfn);
 2000         if (!p_ptt)
 2001                 return ECORE_TIMEOUT;
 2002 
 2003         rc = ecore_mcp_drain(p_hwfn, p_ptt);
 2004         if (rc != ECORE_SUCCESS) {
 2005                 ecore_ptt_release(p_hwfn, p_ptt);
 2006                 return rc;
 2007         }
 2008 
 2009         ecore_ptt_release(p_hwfn, p_ptt);
 2010 
 2011         rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
 2012         if (rc != ECORE_SUCCESS)
 2013                 return rc;
 2014 
 2015 done:
 2016         if (fw_ret_code == RDMA_RETURN_OK) {
 2017                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "De-registered itid=%d\n",
 2018                            itid);
 2019                 return ECORE_SUCCESS;
 2020         } else if (fw_ret_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
 2021                 /* INTERNAL: This error is returned in case trying to deregister
 2022                  * a MR that is not allocated. We define "allocated" as either:
 2023                  * 1. Registered.
 2024                  * 2. This is an FMR MR type, which is not currently registered
 2025                  *    but can accept FMR WQEs on SQ.
 2026                  */
 2027                 DP_NOTICE(p_hwfn, false, "itid=%d, fw_ret_code=%d\n", itid,
 2028                           fw_ret_code);
 2029                 return ECORE_INVAL;
 2030         } else { /* fw_ret_code == RDMA_RETURN_NIG_DRAIN_REQ */
 2031                 DP_NOTICE(p_hwfn, true,
 2032                           "deregister failed after three attempts. itid=%d, fw_ret_code=%d\n",
 2033                           itid, fw_ret_code);
 2034                 return ECORE_UNKNOWN_ERROR;
 2035         }
 2036 }
 2037 
 2038 static struct ecore_bmap *ecore_rdma_get_srq_bmap(struct ecore_hwfn *p_hwfn, bool is_xrc)
 2039 {
 2040         if (is_xrc)
 2041                 return &p_hwfn->p_rdma_info->xrc_srq_map;
 2042 
 2043         return &p_hwfn->p_rdma_info->srq_map;
 2044 }
 2045 
 2046 u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc)
 2047 {
 2048         if (is_xrc)
 2049                 return id;
 2050 
 2051         return id + p_hwfn->p_rdma_info->srq_id_offset;
 2052 }
 2053 
 2054 enum _ecore_status_t
 2055 ecore_rdma_modify_srq(void *rdma_cxt,
 2056                       struct ecore_rdma_modify_srq_in_params *in_params)
 2057 {
 2058         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2059         struct rdma_srq_modify_ramrod_data *p_ramrod;
 2060         struct ecore_sp_init_data init_data;
 2061         struct ecore_spq_entry *p_ent;
 2062         u16 opaque_fid, fw_srq_id;
 2063         enum _ecore_status_t rc;
 2064 
 2065         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 2066         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 2067         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 2068         /* Send modify SRQ ramrod */
 2069         rc = ecore_sp_init_request(p_hwfn, &p_ent,
 2070                                    RDMA_RAMROD_MODIFY_SRQ,
 2071                                    p_hwfn->p_rdma_info->proto, &init_data);
 2072         if (rc != ECORE_SUCCESS)
 2073                 return rc;
 2074 
 2075         p_ramrod = &p_ent->ramrod.rdma_modify_srq;
 2076 
 2077         fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
 2078                                              in_params->is_xrc);
 2079         p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
 2080         opaque_fid = p_hwfn->hw_info.opaque_fid;
 2081         p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
 2082         p_ramrod->wqe_limit = OSAL_CPU_TO_LE16(in_params->wqe_limit);
 2083 
 2084         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 2085         if (rc != ECORE_SUCCESS)
 2086                 return rc;
 2087 
 2088         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
 2089                    in_params->srq_id, in_params->is_xrc);
 2090 
 2091         return rc;
 2092 }
 2093 
 2094 enum _ecore_status_t
 2095 ecore_rdma_destroy_srq(void *rdma_cxt,
 2096                        struct ecore_rdma_destroy_srq_in_params *in_params)
 2097 {
 2098         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2099         struct rdma_srq_destroy_ramrod_data *p_ramrod;
 2100         struct ecore_sp_init_data init_data;
 2101         struct ecore_spq_entry *p_ent;
 2102         u16 opaque_fid, fw_srq_id;
 2103         struct ecore_bmap *bmap;
 2104         enum _ecore_status_t rc;
 2105 
 2106         opaque_fid = p_hwfn->hw_info.opaque_fid;
 2107 
 2108         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 2109         init_data.opaque_fid = opaque_fid;
 2110         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 2111 
 2112         /* Send destroy SRQ ramrod */
 2113         rc = ecore_sp_init_request(p_hwfn, &p_ent,
 2114                                    RDMA_RAMROD_DESTROY_SRQ,
 2115                                    p_hwfn->p_rdma_info->proto, &init_data);
 2116         if (rc != ECORE_SUCCESS)
 2117                 return rc;
 2118 
 2119         p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
 2120 
 2121         fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
 2122                                              in_params->is_xrc);
 2123         p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
 2124         p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
 2125 
 2126         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 2127 
 2128         if (rc != ECORE_SUCCESS)
 2129                 return rc;
 2130 
 2131         bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
 2132 
 2133         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 2134         ecore_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
 2135         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 2136 
 2137         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
 2138                    "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
 2139                    in_params->srq_id, in_params->is_xrc);
 2140 
 2141         return rc;
 2142 }
 2143 
 2144 enum _ecore_status_t
 2145 ecore_rdma_create_srq(void *rdma_cxt,
 2146                       struct ecore_rdma_create_srq_in_params *in_params,
 2147                       struct ecore_rdma_create_srq_out_params *out_params)
 2148 {
 2149         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2150         struct rdma_srq_create_ramrod_data *p_ramrod;
 2151         struct ecore_sp_init_data init_data;
 2152         enum ecore_cxt_elem_type elem_type;
 2153         struct ecore_spq_entry *p_ent;
 2154         u16 opaque_fid, fw_srq_id;
 2155         struct ecore_bmap *bmap;
 2156         u32 returned_id;
 2157         enum _ecore_status_t rc;
 2158 
 2159         /* Allocate XRC/SRQ ID */
 2160         bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
 2161         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 2162         rc = ecore_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
 2163         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 2164 
 2165         if (rc != ECORE_SUCCESS) {
 2166                 DP_NOTICE(p_hwfn, false,
 2167                           "failed to allocate xrc/srq id (is_xrc=%u)\n",
 2168                           in_params->is_xrc);
 2169                 return rc;
 2170         }
 2171         /* Allocate XRC/SRQ ILT page */
 2172         elem_type = (in_params->is_xrc) ? (ECORE_ELEM_XRC_SRQ) : (ECORE_ELEM_SRQ);
 2173         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
 2174         if (rc != ECORE_SUCCESS)
 2175                 goto err;
 2176 
 2177         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 2178         opaque_fid = p_hwfn->hw_info.opaque_fid;
 2179         init_data.opaque_fid = opaque_fid;
 2180         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 2181 
 2182         /* Create XRC/SRQ ramrod */
 2183         rc = ecore_sp_init_request(p_hwfn, &p_ent,
 2184                                    RDMA_RAMROD_CREATE_SRQ,
 2185                                    p_hwfn->p_rdma_info->proto, &init_data);
 2186         if (rc != ECORE_SUCCESS)
 2187                 goto err;
 2188 
 2189         p_ramrod = &p_ent->ramrod.rdma_create_srq;
 2190 
 2191         p_ramrod->pbl_base_addr.hi = DMA_HI_LE(in_params->pbl_base_addr);
 2192         p_ramrod->pbl_base_addr.lo = DMA_LO_LE(in_params->pbl_base_addr);
 2193         p_ramrod->pages_in_srq_pbl = OSAL_CPU_TO_LE16(in_params->num_pages);
 2194         p_ramrod->pd_id = OSAL_CPU_TO_LE16(in_params->pd_id);
 2195         p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
 2196         p_ramrod->page_size = OSAL_CPU_TO_LE16(in_params->page_size);
 2197         p_ramrod->producers_addr.hi = DMA_HI_LE(in_params->prod_pair_addr);
 2198         p_ramrod->producers_addr.lo = DMA_LO_LE(in_params->prod_pair_addr);
 2199         fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, (u16) returned_id,
 2200                                              in_params->is_xrc);
 2201         p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
 2202 
 2203         if (in_params->is_xrc) {
 2204                 SET_FIELD(p_ramrod->flags,
 2205                           RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG,
 2206                           1);
 2207                 SET_FIELD(p_ramrod->flags,
 2208                           RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
 2209                           in_params->reserved_key_en);
 2210                 p_ramrod->xrc_srq_cq_cid = OSAL_CPU_TO_LE32(in_params->cq_cid);
 2211                 p_ramrod->xrc_domain = OSAL_CPU_TO_LE16(in_params->xrcd_id);
 2212         }
 2213 
 2214         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 2215 
 2216         if (rc != ECORE_SUCCESS)
 2217                 goto err;
 2218 
 2219         out_params->srq_id = (u16)returned_id;
 2220 
 2221         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "XRC/SRQ created Id = %x (is_xrc=%u)\n",
 2222                    out_params->srq_id, in_params->is_xrc);
 2223         return rc;
 2224 
 2225 err:
 2226         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 2227         ecore_bmap_release_id(p_hwfn, bmap, returned_id);
 2228         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 2229 
 2230         return rc;
 2231 }
 2232 
 2233 bool ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn)
 2234 {
 2235         bool result;
 2236 
 2237         /* if rdma info has not been allocated, naturally there are no qps */
 2238         if (!p_hwfn->p_rdma_info)
 2239                 return false;
 2240 
 2241         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 2242         if (!p_hwfn->p_rdma_info->qp_map.bitmap)
 2243                 result = false;
 2244         else
 2245                 result = !ecore_bmap_is_empty(&p_hwfn->p_rdma_info->qp_map);
 2246         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 2247         return result;
 2248 }
 2249 
 2250 enum _ecore_status_t ecore_rdma_resize_cq(void                  *rdma_cxt,
 2251                         struct ecore_rdma_resize_cq_in_params   *in_params,
 2252                         struct ecore_rdma_resize_cq_out_params  *out_params)
 2253 {
 2254         enum _ecore_status_t                    rc;
 2255         enum ecore_rdma_toggle_bit              toggle_bit;
 2256         struct ecore_spq_entry                  *p_ent;
 2257         struct rdma_resize_cq_ramrod_data       *p_ramrod;
 2258         u8                                      fw_return_code;
 2259         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2260         dma_addr_t                                                      ramrod_res_phys;
 2261         struct rdma_resize_cq_output_params     *p_ramrod_res;
 2262         struct ecore_sp_init_data               init_data;
 2263 
 2264         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
 2265 
 2266         /* Send resize CQ ramrod */
 2267 
 2268         p_ramrod_res = (struct rdma_resize_cq_output_params *)
 2269                         OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
 2270                                 sizeof(*p_ramrod_res));
 2271         if (!p_ramrod_res)
 2272         {
 2273                 rc = ECORE_NOMEM;
 2274                 DP_NOTICE(p_hwfn, false,
 2275                           "ecore resize cq failed: cannot allocate memory (ramrod). rc = %d\n",
 2276                           rc);
 2277                 return rc;
 2278         }
 2279 
 2280         /* Get SPQ entry */
 2281         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
 2282         init_data.cid = in_params->icid;
 2283         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 2284         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 2285 
 2286         rc = ecore_sp_init_request(p_hwfn, &p_ent,
 2287                                    RDMA_RAMROD_RESIZE_CQ,
 2288                                    p_hwfn->p_rdma_info->proto, &init_data);
 2289         if (rc != ECORE_SUCCESS)
 2290                 goto err;
 2291 
 2292         p_ramrod = &p_ent->ramrod.rdma_resize_cq;
 2293 
 2294         p_ramrod->flags = 0;
 2295 
 2296         /* toggle the bit for every resize or create cq for a given icid */
 2297         toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn,
 2298                                                             in_params->icid);
 2299 
 2300         SET_FIELD(p_ramrod->flags,
 2301                   RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT,
 2302                   toggle_bit);
 2303 
 2304         SET_FIELD(p_ramrod->flags,
 2305                   RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
 2306                   in_params->pbl_two_level);
 2307 
 2308         p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
 2309         p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(in_params->pbl_num_pages);
 2310         p_ramrod->max_cqes = OSAL_CPU_TO_LE32(in_params->cq_size);
 2311         p_ramrod->pbl_addr.hi = DMA_HI_LE(in_params->pbl_ptr);
 2312         p_ramrod->pbl_addr.lo = DMA_LO_LE(in_params->pbl_ptr);
 2313 
 2314         p_ramrod->output_params_addr.hi = DMA_HI_LE(ramrod_res_phys);
 2315         p_ramrod->output_params_addr.lo = DMA_LO_LE(ramrod_res_phys);
 2316 
 2317         rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
 2318         if (rc != ECORE_SUCCESS)
 2319                 goto err;
 2320 
 2321         if (fw_return_code != RDMA_RETURN_OK)
 2322         {
 2323                 DP_NOTICE(p_hwfn, fw_return_code != RDMA_RETURN_RESIZE_CQ_ERR,
 2324                           "fw_return_code = %d\n", fw_return_code);
 2325                 DP_NOTICE(p_hwfn,
 2326                           true, "fw_return_code = %d\n", fw_return_code);
 2327                 rc = ECORE_UNKNOWN_ERROR;
 2328                 goto err;
 2329         }
 2330 
 2331         out_params->prod = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_prod);
 2332         out_params->cons = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_cons);
 2333 
 2334         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
 2335                                sizeof(*p_ramrod_res));
 2336 
 2337         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
 2338 
 2339         return rc;
 2340 
 2341 err:
 2342         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
 2343                                sizeof(*p_ramrod_res));
 2344         DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
 2345 
 2346         return rc;
 2347 }
 2348 
 2349 enum _ecore_status_t ecore_rdma_start(void *rdma_cxt,
 2350                                 struct ecore_rdma_start_in_params *params)
 2351 {
 2352         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2353         struct ecore_ptt *p_ptt;
 2354         enum _ecore_status_t rc = ECORE_TIMEOUT;
 2355 
 2356         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
 2357                    "desired_cnq = %08x\n", params->desired_cnq);
 2358 
 2359         p_ptt = ecore_ptt_acquire(p_hwfn);
 2360         if (!p_ptt)
 2361                 goto err;
 2362 
 2363         rc = ecore_rdma_alloc(p_hwfn);
 2364         if (rc)
 2365                 goto err1;
 2366 
 2367         rc = ecore_rdma_setup(p_hwfn, p_ptt, params);
 2368         if (rc)
 2369                 goto err2;
 2370 
 2371         ecore_ptt_release(p_hwfn, p_ptt);
 2372 
 2373         ecore_rdma_activate(p_hwfn);
 2374         return rc;
 2375 
 2376 err2:
 2377         ecore_rdma_free(p_hwfn);
 2378 err1:
 2379         ecore_ptt_release(p_hwfn, p_ptt);
 2380 err:
 2381         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
 2382         return rc;
 2383 }
 2384 
 2385 enum _ecore_status_t ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
 2386                                 struct ecore_rdma_stats_out_params *out_params)
 2387 {
 2388         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2389         u8 abs_stats_queue, max_stats_queues;
 2390         u32 pstats_addr, tstats_addr, addr;
 2391         struct ecore_rdma_info *info;
 2392         struct ecore_ptt *p_ptt;
 2393 #ifdef CONFIG_ECORE_IWARP
 2394         u32 xstats_addr;
 2395 #endif
 2396         enum _ecore_status_t rc = ECORE_SUCCESS;
 2397 
 2398         if (!p_hwfn)
 2399                 return ECORE_INVAL;
 2400 
 2401         if (!p_hwfn->p_rdma_info) {
 2402                 DP_INFO(p_hwfn->p_dev, "ecore rdma query stats failed due to NULL rdma_info\n");
 2403                 return ECORE_INVAL;
 2404         }
 2405 
 2406         info = p_hwfn->p_rdma_info;
 2407 
 2408         rc = ecore_rdma_inc_ref_cnt(p_hwfn);
 2409         if (rc != ECORE_SUCCESS)
 2410                 return rc;
 2411 
 2412         max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
 2413         if (stats_queue >= max_stats_queues) {
 2414                 DP_ERR(p_hwfn->p_dev,
 2415                        "ecore rdma query stats failed due to invalid statistics queue %d. maximum is %d\n",
 2416                        stats_queue, max_stats_queues);
 2417                 rc = ECORE_INVAL;
 2418                 goto err;
 2419         }
 2420 
 2421         /* Statistics collected in statistics queues (for PF/VF) */
 2422         abs_stats_queue = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
 2423                             stats_queue;
 2424         pstats_addr = BAR0_MAP_REG_PSDM_RAM +
 2425                       PSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
 2426         tstats_addr = BAR0_MAP_REG_TSDM_RAM +
 2427                       TSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
 2428 
 2429 #ifdef CONFIG_ECORE_IWARP
 2430         /* Statistics per PF ID */
 2431         xstats_addr = BAR0_MAP_REG_XSDM_RAM +
 2432                       XSTORM_IWARP_RXMIT_STATS_OFFSET(p_hwfn->rel_pf_id);
 2433 #endif
 2434 
 2435         OSAL_MEMSET(&info->rdma_sent_pstats, 0, sizeof(info->rdma_sent_pstats));
 2436         OSAL_MEMSET(&info->rdma_rcv_tstats, 0, sizeof(info->rdma_rcv_tstats));
 2437         OSAL_MEMSET(&info->roce.event_stats, 0, sizeof(info->roce.event_stats));
 2438         OSAL_MEMSET(&info->roce.dcqcn_rx_stats, 0,sizeof(info->roce.dcqcn_rx_stats));
 2439         OSAL_MEMSET(&info->roce.dcqcn_tx_stats, 0,sizeof(info->roce.dcqcn_tx_stats));
 2440 #ifdef CONFIG_ECORE_IWARP
 2441         OSAL_MEMSET(&info->iwarp.stats, 0, sizeof(info->iwarp.stats));
 2442 #endif
 2443 
 2444         p_ptt = ecore_ptt_acquire(p_hwfn);
 2445 
 2446         if (!p_ptt) {
 2447                 rc = ECORE_TIMEOUT;
 2448                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
 2449                 goto err;
 2450         }
 2451 
 2452         ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_sent_pstats,
 2453                           pstats_addr, sizeof(struct rdma_sent_stats));
 2454 
 2455         ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_rcv_tstats,
 2456                           tstats_addr, sizeof(struct rdma_rcv_stats));
 2457 
 2458         addr = BAR0_MAP_REG_TSDM_RAM +
 2459                TSTORM_ROCE_EVENTS_STAT_OFFSET(p_hwfn->rel_pf_id);
 2460         ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.event_stats, addr,
 2461                           sizeof(struct roce_events_stats));
 2462 
 2463         addr = BAR0_MAP_REG_YSDM_RAM +
 2464                 YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(p_hwfn->rel_pf_id);
 2465         ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_rx_stats, addr,
 2466                           sizeof(struct roce_dcqcn_received_stats));
 2467 
 2468         addr = BAR0_MAP_REG_PSDM_RAM +
 2469                PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(p_hwfn->rel_pf_id);
 2470         ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_tx_stats, addr,
 2471                           sizeof(struct roce_dcqcn_sent_stats));
 2472 
 2473 #ifdef CONFIG_ECORE_IWARP
 2474         ecore_memcpy_from(p_hwfn, p_ptt, &info->iwarp.stats,
 2475                           xstats_addr, sizeof(struct iwarp_rxmit_stats_drv));
 2476 #endif
 2477 
 2478         ecore_ptt_release(p_hwfn, p_ptt);
 2479 
 2480         OSAL_MEMSET(out_params, 0, sizeof(*out_params));
 2481 
 2482         out_params->sent_bytes =
 2483                 HILO_64_REGPAIR(info->rdma_sent_pstats.sent_bytes);
 2484         out_params->sent_pkts =
 2485                 HILO_64_REGPAIR(info->rdma_sent_pstats.sent_pkts);
 2486         out_params->rcv_bytes =
 2487                 HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_bytes);
 2488         out_params->rcv_pkts =
 2489                 HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_pkts);
 2490 
 2491         out_params->silent_drops =
 2492                 OSAL_LE16_TO_CPU(info->roce.event_stats.silent_drops);
 2493         out_params->rnr_nacks_sent =
 2494                 OSAL_LE16_TO_CPU(info->roce.event_stats.rnr_naks_sent);
 2495         out_params->icrc_errors =
 2496                 OSAL_LE32_TO_CPU(info->roce.event_stats.icrc_error_count);
 2497         out_params->retransmit_events =
 2498                 OSAL_LE32_TO_CPU(info->roce.event_stats.retransmit_count);
 2499         out_params->ecn_pkt_rcv =
 2500                 HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.ecn_pkt_rcv);
 2501         out_params->cnp_pkt_rcv =
 2502                 HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.cnp_pkt_rcv);
 2503         out_params->cnp_pkt_sent =
 2504                 HILO_64_REGPAIR(info->roce.dcqcn_tx_stats.cnp_pkt_sent);
 2505 
 2506 #ifdef CONFIG_ECORE_IWARP
 2507         out_params->iwarp_tx_fast_rxmit_cnt =
 2508                 HILO_64_REGPAIR(info->iwarp.stats.tx_fast_retransmit_event_cnt);
 2509         out_params->iwarp_tx_slow_start_cnt =
 2510                 HILO_64_REGPAIR(
 2511                         info->iwarp.stats.tx_go_to_slow_start_event_cnt);
 2512         out_params->unalign_rx_comp = info->iwarp.unalign_rx_comp;
 2513 #endif
 2514 
 2515 err:
 2516         ecore_rdma_dec_ref_cnt(p_hwfn);
 2517 
 2518         return rc;
 2519 }
 2520 
 2521 enum _ecore_status_t
 2522 ecore_rdma_query_counters(void *rdma_cxt,
 2523                           struct ecore_rdma_counters_out_params *out_params)
 2524 {
 2525         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2526         unsigned long *bitmap;
 2527         unsigned int nbits;
 2528 
 2529         if (!p_hwfn->p_rdma_info)
 2530                 return ECORE_INVAL;
 2531 
 2532         OSAL_MEMSET(out_params, 0, sizeof(*out_params));
 2533 
 2534         bitmap = p_hwfn->p_rdma_info->pd_map.bitmap;
 2535         nbits = p_hwfn->p_rdma_info->pd_map.max_count;
 2536         out_params->pd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2537         out_params->max_pd = nbits;
 2538 
 2539         bitmap = p_hwfn->p_rdma_info->dpi_map.bitmap;
 2540         nbits = p_hwfn->p_rdma_info->dpi_map.max_count;
 2541         out_params->dpi_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2542         out_params->max_dpi = nbits;
 2543 
 2544         bitmap = p_hwfn->p_rdma_info->cq_map.bitmap;
 2545         nbits = p_hwfn->p_rdma_info->cq_map.max_count;
 2546         out_params->cq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2547         out_params->max_cq = nbits;
 2548 
 2549         bitmap = p_hwfn->p_rdma_info->qp_map.bitmap;
 2550         nbits = p_hwfn->p_rdma_info->qp_map.max_count;
 2551         out_params->qp_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2552         out_params->max_qp = nbits;
 2553 
 2554         bitmap = p_hwfn->p_rdma_info->tid_map.bitmap;
 2555         nbits = p_hwfn->p_rdma_info->tid_map.max_count;
 2556         out_params->tid_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2557         out_params->max_tid = nbits;
 2558 
 2559         bitmap = p_hwfn->p_rdma_info->srq_map.bitmap;
 2560         nbits = p_hwfn->p_rdma_info->srq_map.max_count;
 2561         out_params->srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2562         out_params->max_srq = nbits;
 2563 
 2564         bitmap = p_hwfn->p_rdma_info->xrc_srq_map.bitmap;
 2565         nbits = p_hwfn->p_rdma_info->xrc_srq_map.max_count;
 2566         out_params->xrc_srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2567         out_params->max_xrc_srq = nbits;
 2568 
 2569         bitmap = p_hwfn->p_rdma_info->xrcd_map.bitmap;
 2570         nbits = p_hwfn->p_rdma_info->xrcd_map.max_count;
 2571         out_params->xrcd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
 2572         out_params->max_xrcd = nbits;
 2573 
 2574         return ECORE_SUCCESS;
 2575 }
 2576 
 2577 enum _ecore_status_t ecore_rdma_resize_cnq(void                       *rdma_cxt,
 2578                                 struct ecore_rdma_resize_cnq_in_params *params)
 2579 {
 2580         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2581 
 2582         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cnq_id = %08x\n", params->cnq_id);
 2583 
 2584         /* @@@TBD: waiting for fw (there is no ramrod yet) */
 2585         return ECORE_NOTIMPL;
 2586 }
 2587 
 2588 void ecore_rdma_remove_user(void        *rdma_cxt,
 2589                             u16         dpi)
 2590 {
 2591         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
 2592 
 2593         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "dpi = %08x\n", dpi);
 2594 
 2595         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
 2596         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
 2597         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
 2598 }
 2599 
 2600 #ifndef LINUX_REMOVE
 2601 enum _ecore_status_t
 2602 ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
 2603                         struct ecore_rdma_glob_cfg *in_params,
 2604                         u32 glob_cfg_bits)
 2605 {
 2606         struct ecore_rdma_glob_cfg glob_cfg;
 2607         enum _ecore_status_t rc = ECORE_SUCCESS;
 2608 
 2609         DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_RDMA,
 2610                    "dscp %d dscp en %d ecn %d ecn en %d vlan pri %d vlan_pri_en %d\n",
 2611                    in_params->dscp, in_params->dscp_en,
 2612                    in_params->ecn, in_params->ecn_en, in_params->vlan_pri,
 2613                    in_params->vlan_pri_en);
 2614 
 2615         /* Read global cfg to local */
 2616         OSAL_MEMCPY(&glob_cfg, &p_hwfn->p_rdma_info->glob_cfg,
 2617                     sizeof(glob_cfg));
 2618 
 2619         if (glob_cfg_bits & ECORE_RDMA_DCSP_BIT_MASK) {
 2620                 if (in_params->dscp > MAX_DSCP) {
 2621                         DP_ERR(p_hwfn->p_dev, "invalid glob dscp %d\n",
 2622                                in_params->dscp);
 2623                         return ECORE_INVAL;
 2624                 }
 2625                 glob_cfg.dscp = in_params->dscp;
 2626         }
 2627 
 2628         if (glob_cfg_bits & ECORE_RDMA_DCSP_EN_BIT_MASK) {
 2629                 if (in_params->dscp_en > 1) {
 2630                         DP_ERR(p_hwfn->p_dev, "invalid glob_dscp_en %d\n",
 2631                                in_params->dscp_en);
 2632                         return ECORE_INVAL;
 2633                 }
 2634                 glob_cfg.dscp_en = in_params->dscp_en;
 2635         }
 2636 
 2637         if (glob_cfg_bits & ECORE_RDMA_ECN_BIT_MASK) {
 2638                 if (in_params->ecn > INET_ECN_ECT_0) {
 2639                         DP_ERR(p_hwfn->p_dev, "invalid glob ecn %d\n",
 2640                                in_params->ecn);
 2641                         return ECORE_INVAL;
 2642                 }
 2643                 glob_cfg.ecn = in_params->ecn;
 2644         }
 2645 
 2646         if (glob_cfg_bits & ECORE_RDMA_ECN_EN_BIT_MASK) {
 2647                 if (in_params->ecn_en > 1) {
 2648                         DP_ERR(p_hwfn->p_dev, "invalid glob ecn en %d\n",
 2649                                in_params->ecn_en);
 2650                         return ECORE_INVAL;
 2651                 }
 2652                 glob_cfg.ecn_en = in_params->ecn_en;
 2653         }
 2654 
 2655         if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_BIT_MASK) {
 2656                 if (in_params->vlan_pri > MAX_VLAN_PRIO) {
 2657                         DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri %d\n",
 2658                                in_params->vlan_pri);
 2659                         return ECORE_INVAL;
 2660                 }
 2661                 glob_cfg.vlan_pri = in_params->vlan_pri;
 2662         }
 2663 
 2664         if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK) {
 2665                 if (in_params->vlan_pri_en > 1) {
 2666                         DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri en %d\n",
 2667                                in_params->vlan_pri_en);
 2668                         return ECORE_INVAL;
 2669                 }
 2670                 glob_cfg.vlan_pri_en = in_params->vlan_pri_en;
 2671         }
 2672 
 2673         /* Write back local cfg to global */
 2674         OSAL_MEMCPY(&p_hwfn->p_rdma_info->glob_cfg, &glob_cfg,
 2675                     sizeof(glob_cfg));
 2676 
 2677         return rc;
 2678 }
 2679 
 2680 enum _ecore_status_t
 2681 ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
 2682                         struct ecore_rdma_glob_cfg *out_params)
 2683 {
 2684         OSAL_MEMCPY(out_params, &p_hwfn->p_rdma_info->glob_cfg,
 2685                     sizeof(struct ecore_rdma_glob_cfg));
 2686 
 2687         return ECORE_SUCCESS;
 2688 }
 2689 #endif /* LINUX_REMOVE */

Cache object: 3a612f48858d67c9b51a76274b01d4a4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.