The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/irdma/irdma_verbs.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
    3  *
    4  * Copyright (c) 2015 - 2022 Intel Corporation
    5  *
    6  * This software is available to you under a choice of one of two
    7  * licenses.  You may choose to be licensed under the terms of the GNU
    8  * General Public License (GPL) Version 2, available from the file
    9  * COPYING in the main directory of this source tree, or the
   10  * OpenFabrics.org BSD license below:
   11  *
   12  *   Redistribution and use in source and binary forms, with or
   13  *   without modification, are permitted provided that the following
   14  *   conditions are met:
   15  *
   16  *    - Redistributions of source code must retain the above
   17  *      copyright notice, this list of conditions and the following
   18  *      disclaimer.
   19  *
   20  *    - Redistributions in binary form must reproduce the above
   21  *      copyright notice, this list of conditions and the following
   22  *      disclaimer in the documentation and/or other materials
   23  *      provided with the distribution.
   24  *
   25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   32  * SOFTWARE.
   33  */
   34 /*$FreeBSD$*/
   35 
   36 #include "irdma_main.h"
   37 
   38 /**
   39  * irdma_query_device - get device attributes
   40  * @ibdev: device pointer from stack
   41  * @props: returning device attributes
   42  * @udata: user data
   43  */
   44 static int
   45 irdma_query_device(struct ib_device *ibdev,
   46                    struct ib_device_attr *props,
   47                    struct ib_udata *udata)
   48 {
   49         struct irdma_device *iwdev = to_iwdev(ibdev);
   50         struct irdma_pci_f *rf = iwdev->rf;
   51         struct pci_dev *pcidev = iwdev->rf->pcidev;
   52         struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
   53 
   54         if (udata->inlen || udata->outlen)
   55                 return -EINVAL;
   56 
   57         memset(props, 0, sizeof(*props));
   58         ether_addr_copy((u8 *)&props->sys_image_guid, IF_LLADDR(iwdev->netdev));
   59         props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
   60             irdma_fw_minor_ver(&rf->sc_dev);
   61         props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
   62             IB_DEVICE_MEM_MGT_EXTENSIONS;
   63         props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
   64         props->vendor_id = pcidev->vendor;
   65         props->vendor_part_id = pcidev->device;
   66         props->hw_ver = pcidev->revision;
   67         props->page_size_cap = hw_attrs->page_size_cap;
   68         props->max_mr_size = hw_attrs->max_mr_size;
   69         props->max_qp = rf->max_qp - rf->used_qps;
   70         props->max_qp_wr = hw_attrs->max_qp_wr;
   71         set_max_sge(props, rf);
   72         props->max_cq = rf->max_cq - rf->used_cqs;
   73         props->max_cqe = rf->max_cqe - 1;
   74         props->max_mr = rf->max_mr - rf->used_mrs;
   75         props->max_mw = props->max_mr;
   76         props->max_pd = rf->max_pd - rf->used_pds;
   77         props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
   78         props->max_qp_rd_atom = hw_attrs->max_hw_ird;
   79         props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
   80         if (rdma_protocol_roce(ibdev, 1)) {
   81                 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
   82                 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
   83                 props->max_ah = rf->max_ah;
   84                 if (hw_attrs->uk_attrs.hw_rev == IRDMA_GEN_2) {
   85                         props->max_mcast_grp = rf->max_mcg;
   86                         props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
   87                         props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
   88                 }
   89         }
   90         props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
   91         if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
   92                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
   93 
   94         return 0;
   95 }
   96 
   97 static int
   98 irdma_mmap_legacy(struct irdma_ucontext *ucontext,
   99                   struct vm_area_struct *vma)
  100 {
  101         u64 pfn;
  102 
  103         if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
  104                 return -EINVAL;
  105 
  106         vma->vm_private_data = ucontext;
  107         pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
  108                pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
  109 
  110         return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
  111                                  pgprot_noncached(vma->vm_page_prot), NULL);
  112 }
  113 
  114 static void
  115 irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
  116 {
  117         struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
  118 
  119         kfree(entry);
  120 }
  121 
  122 struct rdma_user_mmap_entry *
  123 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
  124                              enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
  125 {
  126         struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  127         int ret;
  128 
  129         if (!entry)
  130                 return NULL;
  131 
  132         entry->bar_offset = bar_offset;
  133         entry->mmap_flag = mmap_flag;
  134 
  135         ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
  136                                           &entry->rdma_entry, PAGE_SIZE);
  137         if (ret) {
  138                 kfree(entry);
  139                 return NULL;
  140         }
  141         *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
  142 
  143         return &entry->rdma_entry;
  144 }
  145 
  146 /**
  147  * irdma_mmap - user memory map
  148  * @context: context created during alloc
  149  * @vma: kernel info for user memory map
  150  */
  151 static int
  152 irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  153 {
  154         struct rdma_user_mmap_entry *rdma_entry;
  155         struct irdma_user_mmap_entry *entry;
  156         struct irdma_ucontext *ucontext;
  157         u64 pfn;
  158         int ret;
  159 
  160         ucontext = to_ucontext(context);
  161 
  162         /* Legacy support for libi40iw with hard-coded mmap key */
  163         if (ucontext->legacy_mode)
  164                 return irdma_mmap_legacy(ucontext, vma);
  165 
  166         rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
  167         if (!rdma_entry) {
  168                 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
  169                             "pgoff[0x%lx] does not have valid entry\n",
  170                             vma->vm_pgoff);
  171                 return -EINVAL;
  172         }
  173 
  174         entry = to_irdma_mmap_entry(rdma_entry);
  175         irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
  176                     "bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
  177                     entry->mmap_flag);
  178 
  179         pfn = (entry->bar_offset +
  180                pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
  181 
  182         switch (entry->mmap_flag) {
  183         case IRDMA_MMAP_IO_NC:
  184                 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
  185                                         pgprot_noncached(vma->vm_page_prot),
  186                                         rdma_entry);
  187                 break;
  188         case IRDMA_MMAP_IO_WC:
  189                 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
  190                                         pgprot_writecombine(vma->vm_page_prot),
  191                                         rdma_entry);
  192                 break;
  193         default:
  194                 ret = -EINVAL;
  195         }
  196 
  197         if (ret)
  198                 irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
  199                             "bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
  200                             entry->bar_offset, entry->mmap_flag, ret);
  201         rdma_user_mmap_entry_put(rdma_entry);
  202 
  203         return ret;
  204 }
  205 
  206 /**
  207  * irdma_alloc_push_page - allocate a push page for qp
  208  * @iwqp: qp pointer
  209  */
  210 static void
  211 irdma_alloc_push_page(struct irdma_qp *iwqp)
  212 {
  213         struct irdma_cqp_request *cqp_request;
  214         struct cqp_cmds_info *cqp_info;
  215         struct irdma_device *iwdev = iwqp->iwdev;
  216         struct irdma_sc_qp *qp = &iwqp->sc_qp;
  217         int status;
  218 
  219         cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
  220         if (!cqp_request)
  221                 return;
  222 
  223         cqp_info = &cqp_request->info;
  224         cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
  225         cqp_info->post_sq = 1;
  226         cqp_info->in.u.manage_push_page.info.push_idx = 0;
  227         cqp_info->in.u.manage_push_page.info.qs_handle =
  228             qp->vsi->qos[qp->user_pri].qs_handle;
  229         cqp_info->in.u.manage_push_page.info.free_page = 0;
  230         cqp_info->in.u.manage_push_page.info.push_page_type = 0;
  231         cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
  232         cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
  233 
  234         status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
  235         if (!status && cqp_request->compl_info.op_ret_val <
  236             iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
  237                 qp->push_idx = cqp_request->compl_info.op_ret_val;
  238                 qp->push_offset = 0;
  239         }
  240 
  241         irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
  242 }
  243 
  244 /**
  245  * irdma_get_pbl - Retrieve pbl from a list given a virtual
  246  * address
  247  * @va: user virtual address
  248  * @pbl_list: pbl list to search in (QP's or CQ's)
  249  */
  250 struct irdma_pbl *
  251 irdma_get_pbl(unsigned long va,
  252               struct list_head *pbl_list)
  253 {
  254         struct irdma_pbl *iwpbl;
  255 
  256         list_for_each_entry(iwpbl, pbl_list, list) {
  257                 if (iwpbl->user_base == va) {
  258                         list_del(&iwpbl->list);
  259                         iwpbl->on_list = false;
  260                         return iwpbl;
  261                 }
  262         }
  263 
  264         return NULL;
  265 }
  266 
  267 /**
  268  * irdma_clean_cqes - clean cq entries for qp
  269  * @iwqp: qp ptr (user or kernel)
  270  * @iwcq: cq ptr
  271  */
  272 void
  273 irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
  274 {
  275         struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
  276         unsigned long flags;
  277 
  278         spin_lock_irqsave(&iwcq->lock, flags);
  279         irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
  280         spin_unlock_irqrestore(&iwcq->lock, flags);
  281 }
  282 
  283 static u64 irdma_compute_push_wqe_offset(struct irdma_device *iwdev, u32 page_idx){
  284         u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
  285 
  286         if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
  287                 /* skip over db page */
  288                 bar_off += IRDMA_HW_PAGE_SIZE;
  289                 /* skip over reserved space */
  290                 bar_off += IRDMA_PF_BAR_RSVD;
  291         }
  292 
  293         /* push wqe page */
  294         bar_off += (u64)page_idx * IRDMA_HW_PAGE_SIZE;
  295 
  296         return bar_off;
  297 }
  298 
  299 void
  300 irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
  301 {
  302         if (iwqp->push_db_mmap_entry) {
  303                 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
  304                 iwqp->push_db_mmap_entry = NULL;
  305         }
  306         if (iwqp->push_wqe_mmap_entry) {
  307                 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
  308                 iwqp->push_wqe_mmap_entry = NULL;
  309         }
  310 }
  311 
  312 static int
  313 irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
  314                               struct irdma_qp *iwqp,
  315                               u64 *push_wqe_mmap_key,
  316                               u64 *push_db_mmap_key)
  317 {
  318         struct irdma_device *iwdev = ucontext->iwdev;
  319         u64 bar_off;
  320 
  321         WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2);
  322 
  323         bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx);
  324 
  325         iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
  326                                                                  bar_off, IRDMA_MMAP_IO_WC,
  327                                                                  push_wqe_mmap_key);
  328         if (!iwqp->push_wqe_mmap_entry)
  329                 return -ENOMEM;
  330 
  331         /* push doorbell page */
  332         bar_off += IRDMA_HW_PAGE_SIZE;
  333         iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
  334                                                                 bar_off, IRDMA_MMAP_IO_NC,
  335                                                                 push_db_mmap_key);
  336         if (!iwqp->push_db_mmap_entry) {
  337                 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
  338                 return -ENOMEM;
  339         }
  340 
  341         return 0;
  342 }
  343 
  344 /**
  345  * irdma_setup_virt_qp - setup for allocation of virtual qp
  346  * @iwdev: irdma device
  347  * @iwqp: qp ptr
  348  * @init_info: initialize info to return
  349  */
  350 void
  351 irdma_setup_virt_qp(struct irdma_device *iwdev,
  352                     struct irdma_qp *iwqp,
  353                     struct irdma_qp_init_info *init_info)
  354 {
  355         struct irdma_pbl *iwpbl = iwqp->iwpbl;
  356         struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
  357 
  358         iwqp->page = qpmr->sq_page;
  359         init_info->shadow_area_pa = qpmr->shadow;
  360         if (iwpbl->pbl_allocated) {
  361                 init_info->virtual_map = true;
  362                 init_info->sq_pa = qpmr->sq_pbl.idx;
  363                 init_info->rq_pa = qpmr->rq_pbl.idx;
  364         } else {
  365                 init_info->sq_pa = qpmr->sq_pbl.addr;
  366                 init_info->rq_pa = qpmr->rq_pbl.addr;
  367         }
  368 }
  369 
  370 /**
  371  * irdma_setup_umode_qp - setup sq and rq size in user mode qp
  372  * @udata: user data
  373  * @iwdev: iwarp device
  374  * @iwqp: qp ptr (user or kernel)
  375  * @info: initialize info to return
  376  * @init_attr: Initial QP create attributes
  377  */
  378 int
  379 irdma_setup_umode_qp(struct ib_udata *udata,
  380                      struct irdma_device *iwdev,
  381                      struct irdma_qp *iwqp,
  382                      struct irdma_qp_init_info *info,
  383                      struct ib_qp_init_attr *init_attr)
  384 {
  385         struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
  386         struct irdma_create_qp_req req = {0};
  387         unsigned long flags;
  388         int ret;
  389 
  390         ret = ib_copy_from_udata(&req, udata,
  391                                  min(sizeof(req), udata->inlen));
  392         if (ret) {
  393                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  394                             "ib_copy_from_data fail\n");
  395                 return ret;
  396         }
  397 
  398         iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
  399         iwqp->user_mode = 1;
  400         if (req.user_wqe_bufs) {
  401                 struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
  402 
  403                 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
  404                 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
  405                 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
  406                                             &ucontext->qp_reg_mem_list);
  407                 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
  408 
  409                 if (!iwqp->iwpbl) {
  410                         ret = -ENODATA;
  411                         irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  412                                     "no pbl info\n");
  413                         return ret;
  414                 }
  415         }
  416 
  417         if (ukinfo->abi_ver <= 5) {
  418                 /**
  419                  * For ABI version less than 6 passes raw sq and rq
  420                  * quanta in cap.max_send_wr and cap.max_recv_wr.
  421                  */
  422                 iwqp->max_send_wr = init_attr->cap.max_send_wr;
  423                 iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
  424                 ukinfo->sq_size = init_attr->cap.max_send_wr;
  425                 ukinfo->rq_size = init_attr->cap.max_recv_wr;
  426                 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift);
  427         } else {
  428                 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
  429                                                    &ukinfo->sq_shift);
  430                 if (ret)
  431                         return ret;
  432 
  433                 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
  434                                                    &ukinfo->rq_shift);
  435                 if (ret)
  436                         return ret;
  437 
  438                 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
  439                 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
  440                 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
  441                 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
  442         }
  443         irdma_setup_virt_qp(iwdev, iwqp, info);
  444 
  445         return 0;
  446 }
  447 
  448 /**
  449  * irdma_setup_kmode_qp - setup initialization for kernel mode qp
  450  * @iwdev: iwarp device
  451  * @iwqp: qp ptr (user or kernel)
  452  * @info: initialize info to return
  453  * @init_attr: Initial QP create attributes
  454  */
  455 int
  456 irdma_setup_kmode_qp(struct irdma_device *iwdev,
  457                      struct irdma_qp *iwqp,
  458                      struct irdma_qp_init_info *info,
  459                      struct ib_qp_init_attr *init_attr)
  460 {
  461         struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
  462         u32 size;
  463         int status;
  464         struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
  465 
  466         status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
  467                                               &ukinfo->sq_shift);
  468         if (status)
  469                 return status;
  470 
  471         status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
  472                                               &ukinfo->rq_shift);
  473         if (status)
  474                 return status;
  475 
  476         iwqp->kqp.sq_wrid_mem =
  477             kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
  478         if (!iwqp->kqp.sq_wrid_mem)
  479                 return -ENOMEM;
  480 
  481         iwqp->kqp.rq_wrid_mem =
  482             kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
  483         if (!iwqp->kqp.rq_wrid_mem) {
  484                 kfree(iwqp->kqp.sq_wrid_mem);
  485                 iwqp->kqp.sq_wrid_mem = NULL;
  486                 return -ENOMEM;
  487         }
  488 
  489         iwqp->kqp.sig_trk_mem = kcalloc(ukinfo->sq_depth, sizeof(u32), GFP_KERNEL);
  490         memset(iwqp->kqp.sig_trk_mem, 0, ukinfo->sq_depth * sizeof(u32));
  491         if (!iwqp->kqp.sig_trk_mem) {
  492                 kfree(iwqp->kqp.sq_wrid_mem);
  493                 iwqp->kqp.sq_wrid_mem = NULL;
  494                 kfree(iwqp->kqp.rq_wrid_mem);
  495                 iwqp->kqp.rq_wrid_mem = NULL;
  496                 return -ENOMEM;
  497         }
  498         ukinfo->sq_sigwrtrk_array = (void *)iwqp->kqp.sig_trk_mem;
  499         ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
  500         ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
  501 
  502         size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
  503         size += (IRDMA_SHADOW_AREA_SIZE << 3);
  504 
  505         mem->size = size;
  506         mem->va = irdma_allocate_dma_mem(&iwdev->rf->hw, mem, mem->size,
  507                                          256);
  508         if (!mem->va) {
  509                 kfree(iwqp->kqp.sq_wrid_mem);
  510                 iwqp->kqp.sq_wrid_mem = NULL;
  511                 kfree(iwqp->kqp.rq_wrid_mem);
  512                 iwqp->kqp.rq_wrid_mem = NULL;
  513                 return -ENOMEM;
  514         }
  515 
  516         ukinfo->sq = mem->va;
  517         info->sq_pa = mem->pa;
  518         ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
  519         info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
  520         ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
  521         info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
  522         ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
  523         ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
  524         ukinfo->qp_id = iwqp->ibqp.qp_num;
  525 
  526         iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
  527         iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
  528         init_attr->cap.max_send_wr = iwqp->max_send_wr;
  529         init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
  530 
  531         return 0;
  532 }
  533 
  534 int
  535 irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
  536 {
  537         struct irdma_pci_f *rf = iwqp->iwdev->rf;
  538         struct irdma_cqp_request *cqp_request;
  539         struct cqp_cmds_info *cqp_info;
  540         struct irdma_create_qp_info *qp_info;
  541         int status;
  542 
  543         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
  544         if (!cqp_request)
  545                 return -ENOMEM;
  546 
  547         cqp_info = &cqp_request->info;
  548         qp_info = &cqp_request->info.in.u.qp_create.info;
  549         memset(qp_info, 0, sizeof(*qp_info));
  550         qp_info->mac_valid = true;
  551         qp_info->cq_num_valid = true;
  552         qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
  553 
  554         cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
  555         cqp_info->post_sq = 1;
  556         cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
  557         cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
  558         status = irdma_handle_cqp_op(rf, cqp_request);
  559         irdma_put_cqp_request(&rf->cqp, cqp_request);
  560 
  561         return status;
  562 }
  563 
  564 void
  565 irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
  566                                    struct irdma_qp_host_ctx_info *ctx_info)
  567 {
  568         struct irdma_device *iwdev = iwqp->iwdev;
  569         struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
  570         struct irdma_roce_offload_info *roce_info;
  571         struct irdma_udp_offload_info *udp_info;
  572 
  573         udp_info = &iwqp->udp_info;
  574         udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
  575         udp_info->cwnd = iwdev->roce_cwnd;
  576         udp_info->rexmit_thresh = 2;
  577         udp_info->rnr_nak_thresh = 2;
  578         udp_info->src_port = 0xc000;
  579         udp_info->dst_port = ROCE_V2_UDP_DPORT;
  580         roce_info = &iwqp->roce_info;
  581         ether_addr_copy(roce_info->mac_addr, IF_LLADDR(iwdev->netdev));
  582 
  583         roce_info->rd_en = true;
  584         roce_info->wr_rdresp_en = true;
  585         roce_info->bind_en = true;
  586         roce_info->dcqcn_en = false;
  587         roce_info->rtomin = 5;
  588 
  589         roce_info->ack_credits = iwdev->roce_ackcreds;
  590         roce_info->ird_size = dev->hw_attrs.max_hw_ird;
  591         roce_info->ord_size = dev->hw_attrs.max_hw_ord;
  592 
  593         if (!iwqp->user_mode) {
  594                 roce_info->priv_mode_en = true;
  595                 roce_info->fast_reg_en = true;
  596                 roce_info->udprivcq_en = true;
  597         }
  598         roce_info->roce_tver = 0;
  599 
  600         ctx_info->roce_info = &iwqp->roce_info;
  601         ctx_info->udp_info = &iwqp->udp_info;
  602         irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
  603 }
  604 
  605 void
  606 irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
  607                                  struct irdma_qp_host_ctx_info *ctx_info)
  608 {
  609         struct irdma_device *iwdev = iwqp->iwdev;
  610         struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
  611         struct irdma_iwarp_offload_info *iwarp_info;
  612 
  613         iwarp_info = &iwqp->iwarp_info;
  614         ether_addr_copy(iwarp_info->mac_addr, IF_LLADDR(iwdev->netdev));
  615         iwarp_info->rd_en = true;
  616         iwarp_info->wr_rdresp_en = true;
  617         iwarp_info->bind_en = true;
  618         iwarp_info->ecn_en = true;
  619         iwarp_info->rtomin = 5;
  620 
  621         if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  622                 iwarp_info->ib_rd_en = true;
  623         if (!iwqp->user_mode) {
  624                 iwarp_info->priv_mode_en = true;
  625                 iwarp_info->fast_reg_en = true;
  626         }
  627         iwarp_info->ddp_ver = 1;
  628         iwarp_info->rdmap_ver = 1;
  629 
  630         ctx_info->iwarp_info = &iwqp->iwarp_info;
  631         ctx_info->iwarp_info_valid = true;
  632         irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
  633         ctx_info->iwarp_info_valid = false;
  634 }
  635 
  636 int
  637 irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
  638                         struct irdma_device *iwdev)
  639 {
  640         struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
  641         struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
  642 
  643         if (init_attr->create_flags)
  644                 return -EOPNOTSUPP;
  645 
  646         if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
  647             init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
  648             init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
  649                 return -EINVAL;
  650 
  651         if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
  652                 if (init_attr->qp_type != IB_QPT_RC &&
  653                     init_attr->qp_type != IB_QPT_UD &&
  654                     init_attr->qp_type != IB_QPT_GSI)
  655                         return -EOPNOTSUPP;
  656         } else {
  657                 if (init_attr->qp_type != IB_QPT_RC)
  658                         return -EOPNOTSUPP;
  659         }
  660 
  661         return 0;
  662 }
  663 
  664 void
  665 irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
  666 {
  667         irdma_qp_add_ref(&iwqp->ibqp);
  668         if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
  669                              msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)))
  670                 irdma_qp_rem_ref(&iwqp->ibqp);
  671 }
  672 
  673 void
  674 irdma_flush_worker(struct work_struct *work)
  675 {
  676         struct delayed_work *dwork = to_delayed_work(work);
  677         struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
  678 
  679         irdma_generate_flush_completions(iwqp);
  680         /* For the add in irdma_sched_qp_flush_work */
  681         irdma_qp_rem_ref(&iwqp->ibqp);
  682 }
  683 
  684 static int
  685 irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
  686 {
  687         int acc_flags = 0;
  688 
  689         if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
  690                 if (iwqp->roce_info.wr_rdresp_en) {
  691                         acc_flags |= IB_ACCESS_LOCAL_WRITE;
  692                         acc_flags |= IB_ACCESS_REMOTE_WRITE;
  693                 }
  694                 if (iwqp->roce_info.rd_en)
  695                         acc_flags |= IB_ACCESS_REMOTE_READ;
  696                 if (iwqp->roce_info.bind_en)
  697                         acc_flags |= IB_ACCESS_MW_BIND;
  698         } else {
  699                 if (iwqp->iwarp_info.wr_rdresp_en) {
  700                         acc_flags |= IB_ACCESS_LOCAL_WRITE;
  701                         acc_flags |= IB_ACCESS_REMOTE_WRITE;
  702                 }
  703                 if (iwqp->iwarp_info.rd_en)
  704                         acc_flags |= IB_ACCESS_REMOTE_READ;
  705                 if (iwqp->iwarp_info.bind_en)
  706                         acc_flags |= IB_ACCESS_MW_BIND;
  707         }
  708         return acc_flags;
  709 }
  710 
  711 /**
  712  * irdma_query_qp - query qp attributes
  713  * @ibqp: qp pointer
  714  * @attr: attributes pointer
  715  * @attr_mask: Not used
  716  * @init_attr: qp attributes to return
  717  */
  718 static int
  719 irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  720                int attr_mask, struct ib_qp_init_attr *init_attr)
  721 {
  722         struct irdma_qp *iwqp = to_iwqp(ibqp);
  723         struct irdma_sc_qp *qp = &iwqp->sc_qp;
  724 
  725         memset(attr, 0, sizeof(*attr));
  726         memset(init_attr, 0, sizeof(*init_attr));
  727 
  728         attr->qp_state = iwqp->ibqp_state;
  729         attr->cur_qp_state = iwqp->ibqp_state;
  730         attr->cap.max_send_wr = iwqp->max_send_wr;
  731         attr->cap.max_recv_wr = iwqp->max_recv_wr;
  732         attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
  733         attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
  734         attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
  735         attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
  736         attr->port_num = 1;
  737         if (rdma_protocol_roce(ibqp->device, 1)) {
  738                 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
  739                 attr->qkey = iwqp->roce_info.qkey;
  740                 attr->rq_psn = iwqp->udp_info.epsn;
  741                 attr->sq_psn = iwqp->udp_info.psn_nxt;
  742                 attr->dest_qp_num = iwqp->roce_info.dest_qp;
  743                 attr->pkey_index = iwqp->roce_info.p_key;
  744                 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
  745                 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
  746                 attr->max_rd_atomic = iwqp->roce_info.ord_size;
  747                 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
  748         }
  749 
  750         init_attr->event_handler = iwqp->ibqp.event_handler;
  751         init_attr->qp_context = iwqp->ibqp.qp_context;
  752         init_attr->send_cq = iwqp->ibqp.send_cq;
  753         init_attr->recv_cq = iwqp->ibqp.recv_cq;
  754         init_attr->cap = attr->cap;
  755 
  756         return 0;
  757 }
  758 
  759 /**
  760  * irdma_modify_qp_roce - modify qp request
  761  * @ibqp: qp's pointer for modify
  762  * @attr: access attributes
  763  * @attr_mask: state mask
  764  * @udata: user data
  765  */
  766 int
  767 irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  768                      int attr_mask, struct ib_udata *udata)
  769 {
  770 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
  771 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
  772         struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
  773         struct irdma_qp *iwqp = to_iwqp(ibqp);
  774         struct irdma_device *iwdev = iwqp->iwdev;
  775         struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
  776         struct irdma_qp_host_ctx_info *ctx_info;
  777         struct irdma_roce_offload_info *roce_info;
  778         struct irdma_udp_offload_info *udp_info;
  779         struct irdma_modify_qp_info info = {0};
  780         struct irdma_modify_qp_resp uresp = {};
  781         struct irdma_modify_qp_req ureq;
  782         unsigned long flags;
  783         u8 issue_modify_qp = 0;
  784         int ret = 0;
  785 
  786         ctx_info = &iwqp->ctx_info;
  787         roce_info = &iwqp->roce_info;
  788         udp_info = &iwqp->udp_info;
  789 
  790         if (udata) {
  791                 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
  792                     (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
  793                         return -EINVAL;
  794         }
  795 
  796         if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
  797                 return -EOPNOTSUPP;
  798 
  799         if (attr_mask & IB_QP_DEST_QPN)
  800                 roce_info->dest_qp = attr->dest_qp_num;
  801 
  802         if (attr_mask & IB_QP_PKEY_INDEX) {
  803                 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
  804                                        &roce_info->p_key);
  805                 if (ret)
  806                         return ret;
  807         }
  808 
  809         if (attr_mask & IB_QP_QKEY)
  810                 roce_info->qkey = attr->qkey;
  811 
  812         if (attr_mask & IB_QP_PATH_MTU)
  813                 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
  814 
  815         if (attr_mask & IB_QP_SQ_PSN) {
  816                 udp_info->psn_nxt = attr->sq_psn;
  817                 udp_info->lsn = 0xffff;
  818                 udp_info->psn_una = attr->sq_psn;
  819                 udp_info->psn_max = attr->sq_psn;
  820         }
  821 
  822         if (attr_mask & IB_QP_RQ_PSN)
  823                 udp_info->epsn = attr->rq_psn;
  824 
  825         if (attr_mask & IB_QP_RNR_RETRY)
  826                 udp_info->rnr_nak_thresh = attr->rnr_retry;
  827 
  828         if (attr_mask & IB_QP_RETRY_CNT)
  829                 udp_info->rexmit_thresh = attr->retry_cnt;
  830 
  831         ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
  832 
  833         if (attr_mask & IB_QP_AV) {
  834                 struct irdma_av *av = &iwqp->roce_ah.av;
  835                 u16 vlan_id = VLAN_N_VID;
  836                 u32 local_ip[4] = {};
  837 
  838                 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
  839                 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
  840                         udp_info->ttl = attr->ah_attr.grh.hop_limit;
  841                         udp_info->flow_label = attr->ah_attr.grh.flow_label;
  842                         udp_info->tos = attr->ah_attr.grh.traffic_class;
  843 
  844                         udp_info->src_port = kc_rdma_get_udp_sport(udp_info->flow_label,
  845                                                                    ibqp->qp_num,
  846                                                                    roce_info->dest_qp);
  847 
  848                         irdma_qp_rem_qos(&iwqp->sc_qp);
  849                         dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
  850                         if (iwqp->sc_qp.vsi->dscp_mode)
  851                                 ctx_info->user_pri =
  852                                     iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
  853                         else
  854                                 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
  855                 }
  856                 ret = kc_irdma_set_roce_cm_info(iwqp, attr, &vlan_id);
  857                 if (ret)
  858                         return ret;
  859                 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
  860                         return -ENOMEM;
  861                 iwqp->sc_qp.user_pri = ctx_info->user_pri;
  862                 irdma_qp_add_qos(&iwqp->sc_qp);
  863 
  864                 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
  865                         vlan_id = 0;
  866                 if (vlan_id < VLAN_N_VID) {
  867                         udp_info->insert_vlan_tag = true;
  868                         udp_info->vlan_tag = vlan_id |
  869                             ctx_info->user_pri << VLAN_PRIO_SHIFT;
  870                 } else {
  871                         udp_info->insert_vlan_tag = false;
  872                 }
  873 
  874                 av->attrs = attr->ah_attr;
  875                 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
  876                 if (av->sgid_addr.saddr.sa_family == AF_INET6) {
  877                         __be32 *daddr =
  878                         av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
  879                         __be32 *saddr =
  880                         av->sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
  881 
  882                         irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
  883                         irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
  884 
  885                         udp_info->ipv4 = false;
  886                         irdma_copy_ip_ntohl(local_ip, daddr);
  887 
  888                         udp_info->arp_idx = irdma_arp_table(iwdev->rf, local_ip,
  889                                                             NULL, IRDMA_ARP_RESOLVE);
  890                 } else {
  891                         __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
  892                         __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
  893 
  894                         local_ip[0] = ntohl(daddr);
  895 
  896                         udp_info->ipv4 = true;
  897                         udp_info->dest_ip_addr[0] = 0;
  898                         udp_info->dest_ip_addr[1] = 0;
  899                         udp_info->dest_ip_addr[2] = 0;
  900                         udp_info->dest_ip_addr[3] = local_ip[0];
  901 
  902                         udp_info->local_ipaddr[0] = 0;
  903                         udp_info->local_ipaddr[1] = 0;
  904                         udp_info->local_ipaddr[2] = 0;
  905                         udp_info->local_ipaddr[3] = ntohl(saddr);
  906                 }
  907                 udp_info->arp_idx =
  908                     irdma_add_arp(iwdev->rf, local_ip,
  909                                   ah_attr_to_dmac(attr->ah_attr));
  910         }
  911 
  912         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
  913                 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
  914                         ibdev_err(&iwdev->ibdev,
  915                                   "rd_atomic = %d, above max_hw_ord=%d\n",
  916                                   attr->max_rd_atomic,
  917                                   dev->hw_attrs.max_hw_ord);
  918                         return -EINVAL;
  919                 }
  920                 if (attr->max_rd_atomic)
  921                         roce_info->ord_size = attr->max_rd_atomic;
  922                 info.ord_valid = true;
  923         }
  924 
  925         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  926                 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
  927                         ibdev_err(&iwdev->ibdev,
  928                                   "rd_atomic = %d, above max_hw_ird=%d\n",
  929                                   attr->max_rd_atomic,
  930                                   dev->hw_attrs.max_hw_ird);
  931                         return -EINVAL;
  932                 }
  933                 if (attr->max_dest_rd_atomic)
  934                         roce_info->ird_size = attr->max_dest_rd_atomic;
  935         }
  936 
  937         if (attr_mask & IB_QP_ACCESS_FLAGS) {
  938                 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
  939                         roce_info->wr_rdresp_en = true;
  940                 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
  941                         roce_info->wr_rdresp_en = true;
  942                 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
  943                         roce_info->rd_en = true;
  944         }
  945 
  946         wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
  947 
  948         irdma_debug(dev, IRDMA_DEBUG_VERBS,
  949                     "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
  950                     __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state,
  951                     iwqp->iwarp_state, attr_mask);
  952 
  953         spin_lock_irqsave(&iwqp->lock, flags);
  954         if (attr_mask & IB_QP_STATE) {
  955                 if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
  956                                            iwqp->ibqp.qp_type, attr_mask,
  957                                            IB_LINK_LAYER_ETHERNET)) {
  958                         irdma_print("modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
  959                                     iwqp->ibqp.qp_num, iwqp->ibqp_state,
  960                                     attr->qp_state);
  961                         ret = -EINVAL;
  962                         goto exit;
  963                 }
  964                 info.curr_iwarp_state = iwqp->iwarp_state;
  965 
  966                 switch (attr->qp_state) {
  967                 case IB_QPS_INIT:
  968                         if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
  969                                 ret = -EINVAL;
  970                                 goto exit;
  971                         }
  972 
  973                         if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
  974                                 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
  975                                 issue_modify_qp = 1;
  976                         }
  977                         break;
  978                 case IB_QPS_RTR:
  979                         if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
  980                                 ret = -EINVAL;
  981                                 goto exit;
  982                         }
  983                         info.arp_cache_idx_valid = true;
  984                         info.cq_num_valid = true;
  985                         info.next_iwarp_state = IRDMA_QP_STATE_RTR;
  986                         issue_modify_qp = 1;
  987                         break;
  988                 case IB_QPS_RTS:
  989                         if (iwqp->ibqp_state < IB_QPS_RTR ||
  990                             iwqp->ibqp_state == IB_QPS_ERR) {
  991                                 ret = -EINVAL;
  992                                 goto exit;
  993                         }
  994 
  995                         info.arp_cache_idx_valid = true;
  996                         info.cq_num_valid = true;
  997                         info.ord_valid = true;
  998                         info.next_iwarp_state = IRDMA_QP_STATE_RTS;
  999                         issue_modify_qp = 1;
 1000                         if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
 1001                                 iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
 1002                         udp_info->cwnd = iwdev->roce_cwnd;
 1003                         roce_info->ack_credits = iwdev->roce_ackcreds;
 1004                         if (iwdev->push_mode && udata &&
 1005                             iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
 1006                             dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1007                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1008                                 irdma_alloc_push_page(iwqp);
 1009                                 spin_lock_irqsave(&iwqp->lock, flags);
 1010                         }
 1011                         break;
 1012                 case IB_QPS_SQD:
 1013                         if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
 1014                                 goto exit;
 1015 
 1016                         if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
 1017                                 ret = -EINVAL;
 1018                                 goto exit;
 1019                         }
 1020 
 1021                         info.next_iwarp_state = IRDMA_QP_STATE_SQD;
 1022                         issue_modify_qp = 1;
 1023                         break;
 1024                 case IB_QPS_SQE:
 1025                 case IB_QPS_ERR:
 1026                 case IB_QPS_RESET:
 1027                         if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
 1028                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1029                                 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
 1030                                 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
 1031                                 spin_lock_irqsave(&iwqp->lock, flags);
 1032                         }
 1033 
 1034                         if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
 1035                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1036                                 if (udata && udata->inlen) {
 1037                                         if (ib_copy_from_udata(&ureq, udata,
 1038                                                                min(sizeof(ureq), udata->inlen)))
 1039                                                 return -EINVAL;
 1040 
 1041                                         irdma_flush_wqes(iwqp,
 1042                                                          (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
 1043                                                          (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
 1044                                                          IRDMA_REFLUSH);
 1045                                 }
 1046                                 return 0;
 1047                         }
 1048 
 1049                         info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
 1050                         issue_modify_qp = 1;
 1051                         break;
 1052                 default:
 1053                         ret = -EINVAL;
 1054                         goto exit;
 1055                 }
 1056 
 1057                 iwqp->ibqp_state = attr->qp_state;
 1058         }
 1059 
 1060         ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
 1061         ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
 1062         irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
 1063         spin_unlock_irqrestore(&iwqp->lock, flags);
 1064 
 1065         if (attr_mask & IB_QP_STATE) {
 1066                 if (issue_modify_qp) {
 1067                         ctx_info->rem_endpoint_idx = udp_info->arp_idx;
 1068                         if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
 1069                                 return -EINVAL;
 1070                         spin_lock_irqsave(&iwqp->lock, flags);
 1071                         if (iwqp->iwarp_state == info.curr_iwarp_state) {
 1072                                 iwqp->iwarp_state = info.next_iwarp_state;
 1073                                 iwqp->ibqp_state = attr->qp_state;
 1074                         }
 1075                         if (iwqp->ibqp_state > IB_QPS_RTS &&
 1076                             !iwqp->flush_issued) {
 1077                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1078                                 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
 1079                                                  IRDMA_FLUSH_RQ |
 1080                                                  IRDMA_FLUSH_WAIT);
 1081                                 iwqp->flush_issued = 1;
 1082 
 1083                         } else {
 1084                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1085                         }
 1086                 } else {
 1087                         iwqp->ibqp_state = attr->qp_state;
 1088                 }
 1089                 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1090                         struct irdma_ucontext *ucontext;
 1091 
 1092                         ucontext = rdma_udata_to_drv_context(udata,
 1093                                                              struct irdma_ucontext, ibucontext);
 1094                         if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
 1095                             !iwqp->push_wqe_mmap_entry &&
 1096                             !irdma_setup_push_mmap_entries(ucontext, iwqp,
 1097                                                            &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
 1098                                 uresp.push_valid = 1;
 1099                                 uresp.push_offset = iwqp->sc_qp.push_offset;
 1100                         }
 1101                         uresp.rd_fence_rate = iwdev->rd_fence_rate;
 1102                         ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
 1103                                                                   udata->outlen));
 1104                         if (ret) {
 1105                                 irdma_remove_push_mmap_entries(iwqp);
 1106                                 irdma_debug(iwdev_to_idev(iwdev),
 1107                                             IRDMA_DEBUG_VERBS,
 1108                                             "copy_to_udata failed\n");
 1109                                 return ret;
 1110                         }
 1111                 }
 1112         }
 1113 
 1114         return 0;
 1115 exit:
 1116         spin_unlock_irqrestore(&iwqp->lock, flags);
 1117 
 1118         return ret;
 1119 }
 1120 
 1121 /**
 1122  * irdma_modify_qp - modify qp request
 1123  * @ibqp: qp's pointer for modify
 1124  * @attr: access attributes
 1125  * @attr_mask: state mask
 1126  * @udata: user data
 1127  */
 1128 int
 1129 irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
 1130                 struct ib_udata *udata)
 1131 {
 1132 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
 1133 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
 1134         struct irdma_qp *iwqp = to_iwqp(ibqp);
 1135         struct irdma_device *iwdev = iwqp->iwdev;
 1136         struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
 1137         struct irdma_qp_host_ctx_info *ctx_info;
 1138         struct irdma_tcp_offload_info *tcp_info;
 1139         struct irdma_iwarp_offload_info *offload_info;
 1140         struct irdma_modify_qp_info info = {0};
 1141         struct irdma_modify_qp_resp uresp = {};
 1142         struct irdma_modify_qp_req ureq = {};
 1143         u8 issue_modify_qp = 0;
 1144         u8 dont_wait = 0;
 1145         int err;
 1146         unsigned long flags;
 1147 
 1148         if (udata) {
 1149                 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
 1150                     (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
 1151                         return -EINVAL;
 1152         }
 1153 
 1154         if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
 1155                 return -EOPNOTSUPP;
 1156 
 1157         ctx_info = &iwqp->ctx_info;
 1158         offload_info = &iwqp->iwarp_info;
 1159         tcp_info = &iwqp->tcp_info;
 1160         wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
 1161         irdma_debug(dev, IRDMA_DEBUG_VERBS,
 1162                     "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
 1163                     __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state, iwqp->iwarp_state,
 1164                     iwqp->last_aeq, iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
 1165 
 1166         spin_lock_irqsave(&iwqp->lock, flags);
 1167         if (attr_mask & IB_QP_STATE) {
 1168                 info.curr_iwarp_state = iwqp->iwarp_state;
 1169                 switch (attr->qp_state) {
 1170                 case IB_QPS_INIT:
 1171                 case IB_QPS_RTR:
 1172                         if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
 1173                                 err = -EINVAL;
 1174                                 goto exit;
 1175                         }
 1176 
 1177                         if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
 1178                                 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
 1179                                 issue_modify_qp = 1;
 1180                         }
 1181                         if (iwdev->push_mode && udata &&
 1182                             iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
 1183                             dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1184                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1185                                 irdma_alloc_push_page(iwqp);
 1186                                 spin_lock_irqsave(&iwqp->lock, flags);
 1187                         }
 1188                         break;
 1189                 case IB_QPS_RTS:
 1190                         if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
 1191                             !iwqp->cm_id) {
 1192                                 err = -EINVAL;
 1193                                 goto exit;
 1194                         }
 1195 
 1196                         issue_modify_qp = 1;
 1197                         iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
 1198                         iwqp->hte_added = 1;
 1199                         info.next_iwarp_state = IRDMA_QP_STATE_RTS;
 1200                         info.tcp_ctx_valid = true;
 1201                         info.ord_valid = true;
 1202                         info.arp_cache_idx_valid = true;
 1203                         info.cq_num_valid = true;
 1204                         break;
 1205                 case IB_QPS_SQD:
 1206                         if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
 1207                                 err = 0;
 1208                                 goto exit;
 1209                         }
 1210 
 1211                         if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
 1212                             iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
 1213                                 err = 0;
 1214                                 goto exit;
 1215                         }
 1216 
 1217                         if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
 1218                                 err = -EINVAL;
 1219                                 goto exit;
 1220                         }
 1221 
 1222                         info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
 1223                         issue_modify_qp = 1;
 1224                         break;
 1225                 case IB_QPS_SQE:
 1226                         if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
 1227                                 err = -EINVAL;
 1228                                 goto exit;
 1229                         }
 1230 
 1231                         info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
 1232                         issue_modify_qp = 1;
 1233                         break;
 1234                 case IB_QPS_ERR:
 1235                 case IB_QPS_RESET:
 1236                         if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
 1237                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1238                                 if (udata && udata->inlen) {
 1239                                         if (ib_copy_from_udata(&ureq, udata,
 1240                                                                min(sizeof(ureq), udata->inlen)))
 1241                                                 return -EINVAL;
 1242 
 1243                                         irdma_flush_wqes(iwqp,
 1244                                                          (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
 1245                                                          (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
 1246                                                          IRDMA_REFLUSH);
 1247                                 }
 1248                                 return 0;
 1249                         }
 1250 
 1251                         if (iwqp->sc_qp.term_flags) {
 1252                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1253                                 irdma_terminate_del_timer(&iwqp->sc_qp);
 1254                                 spin_lock_irqsave(&iwqp->lock, flags);
 1255                         }
 1256                         info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
 1257                         if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
 1258                             iwdev->iw_status &&
 1259                             iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
 1260                                 info.reset_tcp_conn = true;
 1261                         else
 1262                                 dont_wait = 1;
 1263 
 1264                         issue_modify_qp = 1;
 1265                         info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
 1266                         break;
 1267                 default:
 1268                         err = -EINVAL;
 1269                         goto exit;
 1270                 }
 1271 
 1272                 iwqp->ibqp_state = attr->qp_state;
 1273         }
 1274         if (attr_mask & IB_QP_ACCESS_FLAGS) {
 1275                 ctx_info->iwarp_info_valid = true;
 1276                 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
 1277                         offload_info->wr_rdresp_en = true;
 1278                 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
 1279                         offload_info->wr_rdresp_en = true;
 1280                 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
 1281                         offload_info->rd_en = true;
 1282         }
 1283 
 1284         if (ctx_info->iwarp_info_valid) {
 1285                 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
 1286                 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
 1287                 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
 1288         }
 1289         spin_unlock_irqrestore(&iwqp->lock, flags);
 1290 
 1291         if (attr_mask & IB_QP_STATE) {
 1292                 if (issue_modify_qp) {
 1293                         ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
 1294                         if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
 1295                                 return -EINVAL;
 1296                 }
 1297 
 1298                 spin_lock_irqsave(&iwqp->lock, flags);
 1299                 if (iwqp->iwarp_state == info.curr_iwarp_state) {
 1300                         iwqp->iwarp_state = info.next_iwarp_state;
 1301                         iwqp->ibqp_state = attr->qp_state;
 1302                 }
 1303                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1304         }
 1305 
 1306         if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
 1307                 if (dont_wait) {
 1308                         if (iwqp->hw_tcp_state) {
 1309                                 spin_lock_irqsave(&iwqp->lock, flags);
 1310                                 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
 1311                                 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
 1312                                 spin_unlock_irqrestore(&iwqp->lock, flags);
 1313                         }
 1314                         irdma_cm_disconn(iwqp);
 1315                 } else {
 1316                         int close_timer_started;
 1317 
 1318                         spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
 1319 
 1320                         if (iwqp->cm_node) {
 1321                                 atomic_inc(&iwqp->cm_node->refcnt);
 1322                                 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
 1323                                 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
 1324                                 if (iwqp->cm_id && close_timer_started == 1)
 1325                                         irdma_schedule_cm_timer(iwqp->cm_node,
 1326                                                                 (struct irdma_puda_buf *)iwqp,
 1327                                                                 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
 1328 
 1329                                 irdma_rem_ref_cm_node(iwqp->cm_node);
 1330                         } else {
 1331                                 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
 1332                         }
 1333                 }
 1334         }
 1335         if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
 1336             dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
 1337                 struct irdma_ucontext *ucontext;
 1338 
 1339                 ucontext = rdma_udata_to_drv_context(udata,
 1340                                                      struct irdma_ucontext, ibucontext);
 1341                 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
 1342                     !iwqp->push_wqe_mmap_entry &&
 1343                     !irdma_setup_push_mmap_entries(ucontext, iwqp,
 1344                                                    &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
 1345                         uresp.push_valid = 1;
 1346                         uresp.push_offset = iwqp->sc_qp.push_offset;
 1347                 }
 1348                 uresp.rd_fence_rate = iwdev->rd_fence_rate;
 1349 
 1350                 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
 1351                                                           udata->outlen));
 1352                 if (err) {
 1353                         irdma_remove_push_mmap_entries(iwqp);
 1354                         irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 1355                                     "copy_to_udata failed\n");
 1356                         return err;
 1357                 }
 1358         }
 1359 
 1360         return 0;
 1361 exit:
 1362         spin_unlock_irqrestore(&iwqp->lock, flags);
 1363 
 1364         return err;
 1365 }
 1366 
 1367 /**
 1368  * irdma_cq_free_rsrc - free up resources for cq
 1369  * @rf: RDMA PCI function
 1370  * @iwcq: cq ptr
 1371  */
 1372 void
 1373 irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
 1374 {
 1375         struct irdma_sc_cq *cq = &iwcq->sc_cq;
 1376 
 1377         if (!iwcq->user_mode) {
 1378                 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem);
 1379                 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem_shadow);
 1380         }
 1381 
 1382         irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
 1383 }
 1384 
 1385 /**
 1386  * irdma_free_cqbuf - worker to free a cq buffer
 1387  * @work: provides access to the cq buffer to free
 1388  */
 1389 static void
 1390 irdma_free_cqbuf(struct work_struct *work)
 1391 {
 1392         struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
 1393 
 1394         irdma_free_dma_mem(cq_buf->hw, &cq_buf->kmem_buf);
 1395         kfree(cq_buf);
 1396 }
 1397 
 1398 /**
 1399  * irdma_process_resize_list - remove resized cq buffers from the resize_list
 1400  * @iwcq: cq which owns the resize_list
 1401  * @iwdev: irdma device
 1402  * @lcqe_buf: the buffer where the last cqe is received
 1403  */
 1404 int
 1405 irdma_process_resize_list(struct irdma_cq *iwcq,
 1406                           struct irdma_device *iwdev,
 1407                           struct irdma_cq_buf *lcqe_buf)
 1408 {
 1409         struct list_head *tmp_node, *list_node;
 1410         struct irdma_cq_buf *cq_buf;
 1411         int cnt = 0;
 1412 
 1413         list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
 1414                 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
 1415                 if (cq_buf == lcqe_buf)
 1416                         return cnt;
 1417 
 1418                 list_del(&cq_buf->list);
 1419                 queue_work(iwdev->cleanup_wq, &cq_buf->work);
 1420                 cnt++;
 1421         }
 1422 
 1423         return cnt;
 1424 }
 1425 
 1426 /**
 1427  * irdma_resize_cq - resize cq
 1428  * @ibcq: cq to be resized
 1429  * @entries: desired cq size
 1430  * @udata: user data
 1431  */
 1432 static int
 1433 irdma_resize_cq(struct ib_cq *ibcq, int entries,
 1434                 struct ib_udata *udata)
 1435 {
 1436 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
 1437         struct irdma_cq *iwcq = to_iwcq(ibcq);
 1438         struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
 1439         struct irdma_cqp_request *cqp_request;
 1440         struct cqp_cmds_info *cqp_info;
 1441         struct irdma_modify_cq_info *m_info;
 1442         struct irdma_modify_cq_info info = {0};
 1443         struct irdma_dma_mem kmem_buf;
 1444         struct irdma_cq_mr *cqmr_buf;
 1445         struct irdma_pbl *iwpbl_buf;
 1446         struct irdma_device *iwdev;
 1447         struct irdma_pci_f *rf;
 1448         struct irdma_cq_buf *cq_buf = NULL;
 1449         unsigned long flags;
 1450         int ret;
 1451 
 1452         iwdev = to_iwdev(ibcq->device);
 1453         rf = iwdev->rf;
 1454 
 1455         if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
 1456               IRDMA_FEATURE_CQ_RESIZE))
 1457                 return -EOPNOTSUPP;
 1458 
 1459         if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
 1460                 return -EINVAL;
 1461 
 1462         if (entries > rf->max_cqe)
 1463                 return -EINVAL;
 1464 
 1465         if (!iwcq->user_mode) {
 1466                 entries++;
 1467                 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
 1468                         entries *= 2;
 1469         }
 1470 
 1471         info.cq_size = max(entries, 4);
 1472 
 1473         if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
 1474                 return 0;
 1475 
 1476         if (udata) {
 1477                 struct irdma_resize_cq_req req = {};
 1478                 struct irdma_ucontext *ucontext =
 1479                 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
 1480                                           ibucontext);
 1481 
 1482                 /* CQ resize not supported with legacy GEN_1 libi40iw */
 1483                 if (ucontext->legacy_mode)
 1484                         return -EOPNOTSUPP;
 1485 
 1486                 if (ib_copy_from_udata(&req, udata,
 1487                                        min(sizeof(req), udata->inlen)))
 1488                         return -EINVAL;
 1489 
 1490                 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
 1491                 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
 1492                                           &ucontext->cq_reg_mem_list);
 1493                 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 1494 
 1495                 if (!iwpbl_buf)
 1496                         return -ENOMEM;
 1497 
 1498                 cqmr_buf = &iwpbl_buf->cq_mr;
 1499                 if (iwpbl_buf->pbl_allocated) {
 1500                         info.virtual_map = true;
 1501                         info.pbl_chunk_size = 1;
 1502                         info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
 1503                 } else {
 1504                         info.cq_pa = cqmr_buf->cq_pbl.addr;
 1505                 }
 1506         } else {
 1507                 /* Kmode CQ resize */
 1508                 int rsize;
 1509 
 1510                 rsize = info.cq_size * sizeof(struct irdma_cqe);
 1511                 kmem_buf.size = round_up(rsize, 256);
 1512                 kmem_buf.va = irdma_allocate_dma_mem(dev->hw, &kmem_buf,
 1513                                                      kmem_buf.size, 256);
 1514                 if (!kmem_buf.va)
 1515                         return -ENOMEM;
 1516 
 1517                 info.cq_base = kmem_buf.va;
 1518                 info.cq_pa = kmem_buf.pa;
 1519                 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
 1520                 if (!cq_buf) {
 1521                         ret = -ENOMEM;
 1522                         goto error;
 1523                 }
 1524         }
 1525 
 1526         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
 1527         if (!cqp_request) {
 1528                 ret = -ENOMEM;
 1529                 goto error;
 1530         }
 1531 
 1532         info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
 1533         info.cq_resize = true;
 1534 
 1535         cqp_info = &cqp_request->info;
 1536         m_info = &cqp_info->in.u.cq_modify.info;
 1537         memcpy(m_info, &info, sizeof(*m_info));
 1538 
 1539         cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
 1540         cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
 1541         cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
 1542         cqp_info->post_sq = 1;
 1543         ret = irdma_handle_cqp_op(rf, cqp_request);
 1544         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1545         if (ret)
 1546                 goto error;
 1547 
 1548         spin_lock_irqsave(&iwcq->lock, flags);
 1549         if (cq_buf) {
 1550                 cq_buf->kmem_buf = iwcq->kmem;
 1551                 cq_buf->hw = dev->hw;
 1552                 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
 1553                 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
 1554                 list_add_tail(&cq_buf->list, &iwcq->resize_list);
 1555                 iwcq->kmem = kmem_buf;
 1556         }
 1557 
 1558         irdma_sc_cq_resize(&iwcq->sc_cq, &info);
 1559         ibcq->cqe = info.cq_size - 1;
 1560         spin_unlock_irqrestore(&iwcq->lock, flags);
 1561 
 1562         return 0;
 1563 error:
 1564         if (!udata)
 1565                 irdma_free_dma_mem(dev->hw, &kmem_buf);
 1566         kfree(cq_buf);
 1567 
 1568         return ret;
 1569 }
 1570 
 1571 /**
 1572  * irdma_get_mr_access - get hw MR access permissions from IB access flags
 1573  * @access: IB access flags
 1574  */
 1575 static inline u16 irdma_get_mr_access(int access){
 1576         u16 hw_access = 0;
 1577 
 1578         hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
 1579             IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
 1580         hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
 1581             IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
 1582         hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
 1583             IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
 1584         hw_access |= (access & IB_ACCESS_MW_BIND) ?
 1585             IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
 1586         hw_access |= (access & IB_ZERO_BASED) ?
 1587             IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
 1588         hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
 1589 
 1590         return hw_access;
 1591 }
 1592 
 1593 /**
 1594  * irdma_free_stag - free stag resource
 1595  * @iwdev: irdma device
 1596  * @stag: stag to free
 1597  */
 1598 void
 1599 irdma_free_stag(struct irdma_device *iwdev, u32 stag)
 1600 {
 1601         u32 stag_idx;
 1602 
 1603         stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
 1604         irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
 1605 }
 1606 
 1607 /**
 1608  * irdma_create_stag - create random stag
 1609  * @iwdev: irdma device
 1610  */
 1611 u32
 1612 irdma_create_stag(struct irdma_device *iwdev)
 1613 {
 1614         u32 stag;
 1615         u32 stag_index = 0;
 1616         u32 next_stag_index;
 1617         u32 driver_key;
 1618         u32 random;
 1619         u8 consumer_key;
 1620         int ret;
 1621 
 1622         get_random_bytes(&random, sizeof(random));
 1623         consumer_key = (u8)random;
 1624 
 1625         driver_key = random & ~iwdev->rf->mr_stagmask;
 1626         next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
 1627         next_stag_index %= iwdev->rf->max_mr;
 1628 
 1629         ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
 1630                                iwdev->rf->max_mr, &stag_index,
 1631                                &next_stag_index);
 1632         if (ret)
 1633                 return 0;
 1634         stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
 1635         stag |= driver_key;
 1636         stag += (u32)consumer_key;
 1637 
 1638         return stag;
 1639 }
 1640 
 1641 /**
 1642  * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
 1643  * @arr: lvl1 pbl array
 1644  * @npages: page count
 1645  * @pg_size: page size
 1646  *
 1647  */
 1648 static bool
 1649 irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
 1650 {
 1651         u32 pg_idx;
 1652 
 1653         for (pg_idx = 0; pg_idx < npages; pg_idx++) {
 1654                 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
 1655                         return false;
 1656         }
 1657 
 1658         return true;
 1659 }
 1660 
 1661 /**
 1662  * irdma_check_mr_contiguous - check if MR is physically contiguous
 1663  * @palloc: pbl allocation struct
 1664  * @pg_size: page size
 1665  */
 1666 static bool
 1667 irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
 1668                           u32 pg_size)
 1669 {
 1670         struct irdma_pble_level2 *lvl2 = &palloc->level2;
 1671         struct irdma_pble_info *leaf = lvl2->leaf;
 1672         u64 *arr = NULL;
 1673         u64 *start_addr = NULL;
 1674         int i;
 1675         bool ret;
 1676 
 1677         if (palloc->level == PBLE_LEVEL_1) {
 1678                 arr = palloc->level1.addr;
 1679                 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
 1680                                                  pg_size);
 1681                 return ret;
 1682         }
 1683 
 1684         start_addr = leaf->addr;
 1685 
 1686         for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
 1687                 arr = leaf->addr;
 1688                 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
 1689                         return false;
 1690                 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
 1691                 if (!ret)
 1692                         return false;
 1693         }
 1694 
 1695         return true;
 1696 }
 1697 
 1698 /**
 1699  * irdma_setup_pbles - copy user pg address to pble's
 1700  * @rf: RDMA PCI function
 1701  * @iwmr: mr pointer for this memory registration
 1702  * @use_pbles: flag if to use pble's
 1703  * @lvl_1_only: request only level 1 pble if true
 1704  */
 1705 static int
 1706 irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
 1707                   bool use_pbles, bool lvl_1_only)
 1708 {
 1709         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 1710         struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
 1711         struct irdma_pble_info *pinfo;
 1712         u64 *pbl;
 1713         int status;
 1714         enum irdma_pble_level level = PBLE_LEVEL_1;
 1715 
 1716         if (use_pbles) {
 1717                 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
 1718                                         lvl_1_only);
 1719                 if (status)
 1720                         return status;
 1721 
 1722                 iwpbl->pbl_allocated = true;
 1723                 level = palloc->level;
 1724                 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
 1725                     palloc->level2.leaf;
 1726                 pbl = pinfo->addr;
 1727         } else {
 1728                 pbl = iwmr->pgaddrmem;
 1729         }
 1730 
 1731         irdma_copy_user_pgaddrs(iwmr, pbl, level);
 1732 
 1733         if (use_pbles)
 1734                 iwmr->pgaddrmem[0] = *pbl;
 1735 
 1736         return 0;
 1737 }
 1738 
 1739 /**
 1740  * irdma_handle_q_mem - handle memory for qp and cq
 1741  * @iwdev: irdma device
 1742  * @req: information for q memory management
 1743  * @iwpbl: pble struct
 1744  * @use_pbles: flag to use pble
 1745  */
 1746 static int
 1747 irdma_handle_q_mem(struct irdma_device *iwdev,
 1748                    struct irdma_mem_reg_req *req,
 1749                    struct irdma_pbl *iwpbl, bool use_pbles)
 1750 {
 1751         struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
 1752         struct irdma_mr *iwmr = iwpbl->iwmr;
 1753         struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
 1754         struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
 1755         struct irdma_hmc_pble *hmc_p;
 1756         u64 *arr = iwmr->pgaddrmem;
 1757         u32 pg_size, total;
 1758         int err = 0;
 1759         bool ret = true;
 1760 
 1761         pg_size = iwmr->page_size;
 1762         err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
 1763         if (err)
 1764                 return err;
 1765 
 1766         if (use_pbles)
 1767                 arr = palloc->level1.addr;
 1768 
 1769         switch (iwmr->type) {
 1770         case IRDMA_MEMREG_TYPE_QP:
 1771                 total = req->sq_pages + req->rq_pages;
 1772                 hmc_p = &qpmr->sq_pbl;
 1773                 qpmr->shadow = (dma_addr_t) arr[total];
 1774                 if (use_pbles) {
 1775                         ret = irdma_check_mem_contiguous(arr, req->sq_pages,
 1776                                                          pg_size);
 1777                         if (ret)
 1778                                 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
 1779                                                                  req->rq_pages,
 1780                                                                  pg_size);
 1781                 }
 1782 
 1783                 if (!ret) {
 1784                         hmc_p->idx = palloc->level1.idx;
 1785                         hmc_p = &qpmr->rq_pbl;
 1786                         hmc_p->idx = palloc->level1.idx + req->sq_pages;
 1787                 } else {
 1788                         hmc_p->addr = arr[0];
 1789                         hmc_p = &qpmr->rq_pbl;
 1790                         hmc_p->addr = arr[req->sq_pages];
 1791                 }
 1792                 break;
 1793         case IRDMA_MEMREG_TYPE_CQ:
 1794                 hmc_p = &cqmr->cq_pbl;
 1795 
 1796                 if (!cqmr->split)
 1797                         cqmr->shadow = (dma_addr_t) arr[req->cq_pages];
 1798 
 1799                 if (use_pbles)
 1800                         ret = irdma_check_mem_contiguous(arr, req->cq_pages,
 1801                                                          pg_size);
 1802 
 1803                 if (!ret)
 1804                         hmc_p->idx = palloc->level1.idx;
 1805                 else
 1806                         hmc_p->addr = arr[0];
 1807                 break;
 1808         default:
 1809                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 1810                             "MR type error\n");
 1811                 err = -EINVAL;
 1812         }
 1813 
 1814         if (use_pbles && ret) {
 1815                 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
 1816                 iwpbl->pbl_allocated = false;
 1817         }
 1818 
 1819         return err;
 1820 }
 1821 
 1822 /**
 1823  * irdma_hw_alloc_mw - create the hw memory window
 1824  * @iwdev: irdma device
 1825  * @iwmr: pointer to memory window info
 1826  */
 1827 int
 1828 irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
 1829 {
 1830         struct irdma_mw_alloc_info *info;
 1831         struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
 1832         struct irdma_cqp_request *cqp_request;
 1833         struct cqp_cmds_info *cqp_info;
 1834         int status;
 1835 
 1836         cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
 1837         if (!cqp_request)
 1838                 return -ENOMEM;
 1839 
 1840         cqp_info = &cqp_request->info;
 1841         info = &cqp_info->in.u.mw_alloc.info;
 1842         memset(info, 0, sizeof(*info));
 1843         if (iwmr->ibmw.type == IB_MW_TYPE_1)
 1844                 info->mw_wide = true;
 1845 
 1846         info->page_size = PAGE_SIZE;
 1847         info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
 1848         info->pd_id = iwpd->sc_pd.pd_id;
 1849         info->remote_access = true;
 1850         cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
 1851         cqp_info->post_sq = 1;
 1852         cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
 1853         cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
 1854         status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
 1855         irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
 1856 
 1857         return status;
 1858 }
 1859 
 1860 /**
 1861  * irdma_dealloc_mw - Dealloc memory window
 1862  * @ibmw: memory window structure.
 1863  */
 1864 static int
 1865 irdma_dealloc_mw(struct ib_mw *ibmw)
 1866 {
 1867         struct ib_pd *ibpd = ibmw->pd;
 1868         struct irdma_pd *iwpd = to_iwpd(ibpd);
 1869         struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
 1870         struct irdma_device *iwdev = to_iwdev(ibmw->device);
 1871         struct irdma_cqp_request *cqp_request;
 1872         struct cqp_cmds_info *cqp_info;
 1873         struct irdma_dealloc_stag_info *info;
 1874 
 1875         cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
 1876         if (!cqp_request)
 1877                 return -ENOMEM;
 1878 
 1879         cqp_info = &cqp_request->info;
 1880         info = &cqp_info->in.u.dealloc_stag.info;
 1881         memset(info, 0, sizeof(*info));
 1882         info->pd_id = iwpd->sc_pd.pd_id;
 1883         info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
 1884         info->mr = false;
 1885         cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
 1886         cqp_info->post_sq = 1;
 1887         cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
 1888         cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
 1889         irdma_handle_cqp_op(iwdev->rf, cqp_request);
 1890         irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
 1891         irdma_free_stag(iwdev, iwmr->stag);
 1892         kfree(iwmr);
 1893 
 1894         return 0;
 1895 }
 1896 
 1897 /**
 1898  * irdma_hw_alloc_stag - cqp command to allocate stag
 1899  * @iwdev: irdma device
 1900  * @iwmr: irdma mr pointer
 1901  */
 1902 int
 1903 irdma_hw_alloc_stag(struct irdma_device *iwdev,
 1904                     struct irdma_mr *iwmr)
 1905 {
 1906         struct irdma_allocate_stag_info *info;
 1907         struct ib_pd *pd = iwmr->ibmr.pd;
 1908         struct irdma_pd *iwpd = to_iwpd(pd);
 1909         struct irdma_cqp_request *cqp_request;
 1910         struct cqp_cmds_info *cqp_info;
 1911         int status;
 1912 
 1913         cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
 1914         if (!cqp_request)
 1915                 return -ENOMEM;
 1916 
 1917         cqp_info = &cqp_request->info;
 1918         info = &cqp_info->in.u.alloc_stag.info;
 1919         memset(info, 0, sizeof(*info));
 1920         info->page_size = PAGE_SIZE;
 1921         info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
 1922         info->pd_id = iwpd->sc_pd.pd_id;
 1923         info->total_len = iwmr->len;
 1924         info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
 1925         info->remote_access = true;
 1926         cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
 1927         cqp_info->post_sq = 1;
 1928         cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
 1929         cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
 1930         status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
 1931         irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
 1932         if (!status)
 1933                 iwmr->is_hwreg = 1;
 1934 
 1935         return status;
 1936 }
 1937 
 1938 /**
 1939  * irdma_set_page - populate pbl list for fmr
 1940  * @ibmr: ib mem to access iwarp mr pointer
 1941  * @addr: page dma address fro pbl list
 1942  */
 1943 static int
 1944 irdma_set_page(struct ib_mr *ibmr, u64 addr)
 1945 {
 1946         struct irdma_mr *iwmr = to_iwmr(ibmr);
 1947         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 1948         struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
 1949         u64 *pbl;
 1950 
 1951         if (unlikely(iwmr->npages == iwmr->page_cnt))
 1952                 return -ENOMEM;
 1953 
 1954         if (palloc->level == PBLE_LEVEL_2) {
 1955                 struct irdma_pble_info *palloc_info =
 1956                 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
 1957 
 1958                 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
 1959         } else {
 1960                 pbl = palloc->level1.addr;
 1961                 pbl[iwmr->npages] = addr;
 1962         }
 1963 
 1964         iwmr->npages++;
 1965         return 0;
 1966 }
 1967 
 1968 /**
 1969  * irdma_map_mr_sg - map of sg list for fmr
 1970  * @ibmr: ib mem to access iwarp mr pointer
 1971  * @sg: scatter gather list
 1972  * @sg_nents: number of sg pages
 1973  * @sg_offset: scatter gather list for fmr
 1974  */
 1975 static int
 1976 irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
 1977                 int sg_nents, unsigned int *sg_offset)
 1978 {
 1979         struct irdma_mr *iwmr = to_iwmr(ibmr);
 1980 
 1981         iwmr->npages = 0;
 1982 
 1983         return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
 1984 }
 1985 
 1986 /**
 1987  * irdma_hwreg_mr - send cqp command for memory registration
 1988  * @iwdev: irdma device
 1989  * @iwmr: irdma mr pointer
 1990  * @access: access for MR
 1991  */
 1992 int
 1993 irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
 1994                u16 access)
 1995 {
 1996         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 1997         struct irdma_reg_ns_stag_info *stag_info;
 1998         struct ib_pd *pd = iwmr->ibmr.pd;
 1999         struct irdma_pd *iwpd = to_iwpd(pd);
 2000         struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
 2001         struct irdma_cqp_request *cqp_request;
 2002         struct cqp_cmds_info *cqp_info;
 2003         int ret;
 2004 
 2005         cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
 2006         if (!cqp_request)
 2007                 return -ENOMEM;
 2008 
 2009         cqp_info = &cqp_request->info;
 2010         stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
 2011         memset(stag_info, 0, sizeof(*stag_info));
 2012         stag_info->va = iwpbl->user_base;
 2013         stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
 2014         stag_info->stag_key = (u8)iwmr->stag;
 2015         stag_info->total_len = iwmr->len;
 2016         stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
 2017         stag_info->access_rights = irdma_get_mr_access(access);
 2018         stag_info->pd_id = iwpd->sc_pd.pd_id;
 2019         if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
 2020                 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
 2021         else
 2022                 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
 2023         stag_info->page_size = iwmr->page_size;
 2024 
 2025         if (iwpbl->pbl_allocated) {
 2026                 if (palloc->level == PBLE_LEVEL_1) {
 2027                         stag_info->first_pm_pbl_index = palloc->level1.idx;
 2028                         stag_info->chunk_size = 1;
 2029                 } else {
 2030                         stag_info->first_pm_pbl_index = palloc->level2.root.idx;
 2031                         stag_info->chunk_size = 3;
 2032                 }
 2033         } else {
 2034                 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
 2035         }
 2036 
 2037         cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
 2038         cqp_info->post_sq = 1;
 2039         cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
 2040         cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
 2041         ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
 2042         irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
 2043 
 2044         if (!ret)
 2045                 iwmr->is_hwreg = 1;
 2046 
 2047         return ret;
 2048 }
 2049 
 2050 /**
 2051  * irdma_reg_user_mr - Register a user memory region
 2052  * @pd: ptr of pd
 2053  * @start: virtual start address
 2054  * @len: length of mr
 2055  * @virt: virtual address
 2056  * @access: access of mr
 2057  * @udata: user data
 2058  */
 2059 static struct ib_mr *
 2060 irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 2061                   u64 virt, int access,
 2062                   struct ib_udata *udata)
 2063 {
 2064 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
 2065         struct irdma_device *iwdev = to_iwdev(pd->device);
 2066         struct irdma_ucontext *ucontext;
 2067         struct irdma_pble_alloc *palloc;
 2068         struct irdma_pbl *iwpbl;
 2069         struct irdma_mr *iwmr;
 2070         struct ib_umem *region;
 2071         struct irdma_mem_reg_req req = {};
 2072         u32 total, stag = 0;
 2073         u8 shadow_pgcnt = 1;
 2074         bool use_pbles = false;
 2075         unsigned long flags;
 2076         int err = -EINVAL;
 2077         int ret;
 2078 
 2079         if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
 2080                 return ERR_PTR(-EINVAL);
 2081 
 2082         if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
 2083                 return ERR_PTR(-EINVAL);
 2084 
 2085         region = ib_umem_get(pd->uobject->context, start, len, access, 0);
 2086 
 2087         if (IS_ERR(region)) {
 2088                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 2089                             "Failed to create ib_umem region\n");
 2090                 return (struct ib_mr *)region;
 2091         }
 2092 
 2093         if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
 2094                 ib_umem_release(region);
 2095                 return ERR_PTR(-EFAULT);
 2096         }
 2097 
 2098         iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
 2099         if (!iwmr) {
 2100                 ib_umem_release(region);
 2101                 return ERR_PTR(-ENOMEM);
 2102         }
 2103 
 2104         iwpbl = &iwmr->iwpbl;
 2105         iwpbl->iwmr = iwmr;
 2106         iwmr->region = region;
 2107         iwmr->ibmr.pd = pd;
 2108         iwmr->ibmr.device = pd->device;
 2109         iwmr->ibmr.iova = virt;
 2110         iwmr->page_size = IRDMA_HW_PAGE_SIZE;
 2111         iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1);
 2112 
 2113         iwmr->len = region->length;
 2114         iwpbl->user_base = virt;
 2115         palloc = &iwpbl->pble_alloc;
 2116         iwmr->type = req.reg_type;
 2117         iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt);
 2118 
 2119         switch (req.reg_type) {
 2120         case IRDMA_MEMREG_TYPE_QP:
 2121                 total = req.sq_pages + req.rq_pages + shadow_pgcnt;
 2122                 if (total > iwmr->page_cnt) {
 2123                         err = -EINVAL;
 2124                         goto error;
 2125                 }
 2126                 total = req.sq_pages + req.rq_pages;
 2127                 use_pbles = (total > 2);
 2128                 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
 2129                 if (err)
 2130                         goto error;
 2131 
 2132                 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
 2133                                                      ibucontext);
 2134                 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
 2135                 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
 2136                 iwpbl->on_list = true;
 2137                 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
 2138                 break;
 2139         case IRDMA_MEMREG_TYPE_CQ:
 2140                 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
 2141                         shadow_pgcnt = 0;
 2142                 total = req.cq_pages + shadow_pgcnt;
 2143                 if (total > iwmr->page_cnt) {
 2144                         err = -EINVAL;
 2145                         goto error;
 2146                 }
 2147 
 2148                 use_pbles = (req.cq_pages > 1);
 2149                 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
 2150                 if (err)
 2151                         goto error;
 2152 
 2153                 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
 2154                                                      ibucontext);
 2155                 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
 2156                 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
 2157                 iwpbl->on_list = true;
 2158                 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 2159                 break;
 2160         case IRDMA_MEMREG_TYPE_MEM:
 2161                 use_pbles = (iwmr->page_cnt != 1);
 2162 
 2163                 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
 2164                 if (err)
 2165                         goto error;
 2166 
 2167                 if (use_pbles) {
 2168                         ret = irdma_check_mr_contiguous(palloc,
 2169                                                         iwmr->page_size);
 2170                         if (ret) {
 2171                                 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
 2172                                 iwpbl->pbl_allocated = false;
 2173                         }
 2174                 }
 2175 
 2176                 stag = irdma_create_stag(iwdev);
 2177                 if (!stag) {
 2178                         err = -ENOMEM;
 2179                         goto error;
 2180                 }
 2181 
 2182                 iwmr->stag = stag;
 2183                 iwmr->ibmr.rkey = stag;
 2184                 iwmr->ibmr.lkey = stag;
 2185                 iwmr->access = access;
 2186                 err = irdma_hwreg_mr(iwdev, iwmr, access);
 2187                 if (err) {
 2188                         irdma_free_stag(iwdev, stag);
 2189                         goto error;
 2190                 }
 2191 
 2192                 break;
 2193         default:
 2194                 goto error;
 2195         }
 2196 
 2197         iwmr->type = req.reg_type;
 2198 
 2199         return &iwmr->ibmr;
 2200 
 2201 error:
 2202         if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
 2203                 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
 2204         ib_umem_release(region);
 2205         kfree(iwmr);
 2206 
 2207         return ERR_PTR(err);
 2208 }
 2209 
 2210 int
 2211 irdma_hwdereg_mr(struct ib_mr *ib_mr)
 2212 {
 2213         struct irdma_device *iwdev = to_iwdev(ib_mr->device);
 2214         struct irdma_mr *iwmr = to_iwmr(ib_mr);
 2215         struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
 2216         struct irdma_dealloc_stag_info *info;
 2217         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 2218         struct irdma_cqp_request *cqp_request;
 2219         struct cqp_cmds_info *cqp_info;
 2220         int status;
 2221 
 2222         /*
 2223          * Skip HW MR de-register when it is already de-registered during an MR re-reregister and the re-registration
 2224          * fails
 2225          */
 2226         if (!iwmr->is_hwreg)
 2227                 return 0;
 2228 
 2229         cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
 2230         if (!cqp_request)
 2231                 return -ENOMEM;
 2232 
 2233         cqp_info = &cqp_request->info;
 2234         info = &cqp_info->in.u.dealloc_stag.info;
 2235         memset(info, 0, sizeof(*info));
 2236         info->pd_id = iwpd->sc_pd.pd_id;
 2237         info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
 2238         info->mr = true;
 2239         if (iwpbl->pbl_allocated)
 2240                 info->dealloc_pbl = true;
 2241 
 2242         cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
 2243         cqp_info->post_sq = 1;
 2244         cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
 2245         cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
 2246         status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
 2247         irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
 2248 
 2249         if (!status)
 2250                 iwmr->is_hwreg = 0;
 2251 
 2252         return status;
 2253 }
 2254 
 2255 /*
 2256  * irdma_rereg_mr_trans - Re-register a user MR for a change translation. @iwmr: ptr of iwmr @start: virtual start
 2257  * address @len: length of mr @virt: virtual address
 2258  *
 2259  * Re-register a user memory region when a change translation is requested. Re-register a new region while reusing the
 2260  * stag from the original registration.
 2261  */
 2262 struct ib_mr *
 2263 irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
 2264                      u64 virt, struct ib_udata *udata)
 2265 {
 2266         struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
 2267         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 2268         struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
 2269         struct ib_pd *pd = iwmr->ibmr.pd;
 2270         struct ib_umem *region;
 2271         bool use_pbles;
 2272         int err;
 2273 
 2274         region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
 2275 
 2276         if (IS_ERR(region)) {
 2277                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 2278                             "Failed to create ib_umem region\n");
 2279                 return (struct ib_mr *)region;
 2280         }
 2281 
 2282         iwmr->region = region;
 2283         iwmr->ibmr.iova = virt;
 2284         iwmr->ibmr.pd = pd;
 2285         iwmr->page_size = PAGE_SIZE;
 2286 
 2287         iwmr->len = region->length;
 2288         iwpbl->user_base = virt;
 2289         iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
 2290                                                       virt);
 2291 
 2292         use_pbles = (iwmr->page_cnt != 1);
 2293 
 2294         err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
 2295         if (err)
 2296                 goto error;
 2297 
 2298         if (use_pbles) {
 2299                 err = irdma_check_mr_contiguous(palloc,
 2300                                                 iwmr->page_size);
 2301                 if (err) {
 2302                         irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
 2303                         iwpbl->pbl_allocated = false;
 2304                 }
 2305         }
 2306 
 2307         err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
 2308         if (err)
 2309                 goto error;
 2310 
 2311         return &iwmr->ibmr;
 2312 
 2313 error:
 2314         if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) {
 2315                 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
 2316                 iwpbl->pbl_allocated = false;
 2317         }
 2318         ib_umem_release(region);
 2319         iwmr->region = NULL;
 2320 
 2321         return ERR_PTR(err);
 2322 }
 2323 
 2324 /**
 2325  * irdma_reg_phys_mr - register kernel physical memory
 2326  * @pd: ibpd pointer
 2327  * @addr: physical address of memory to register
 2328  * @size: size of memory to register
 2329  * @access: Access rights
 2330  * @iova_start: start of virtual address for physical buffers
 2331  */
 2332 struct ib_mr *
 2333 irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
 2334                   u64 *iova_start)
 2335 {
 2336         struct irdma_device *iwdev = to_iwdev(pd->device);
 2337         struct irdma_pbl *iwpbl;
 2338         struct irdma_mr *iwmr;
 2339         u32 stag;
 2340         int ret;
 2341 
 2342         iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
 2343         if (!iwmr)
 2344                 return ERR_PTR(-ENOMEM);
 2345 
 2346         iwmr->ibmr.pd = pd;
 2347         iwmr->ibmr.device = pd->device;
 2348         iwpbl = &iwmr->iwpbl;
 2349         iwpbl->iwmr = iwmr;
 2350         iwmr->type = IRDMA_MEMREG_TYPE_MEM;
 2351         iwpbl->user_base = *iova_start;
 2352         stag = irdma_create_stag(iwdev);
 2353         if (!stag) {
 2354                 ret = -ENOMEM;
 2355                 goto err;
 2356         }
 2357 
 2358         iwmr->stag = stag;
 2359         iwmr->ibmr.iova = *iova_start;
 2360         iwmr->ibmr.rkey = stag;
 2361         iwmr->ibmr.lkey = stag;
 2362         iwmr->page_cnt = 1;
 2363         iwmr->pgaddrmem[0] = addr;
 2364         iwmr->len = size;
 2365         iwmr->page_size = SZ_4K;
 2366         ret = irdma_hwreg_mr(iwdev, iwmr, access);
 2367         if (ret) {
 2368                 irdma_free_stag(iwdev, stag);
 2369                 goto err;
 2370         }
 2371 
 2372         return &iwmr->ibmr;
 2373 
 2374 err:
 2375         kfree(iwmr);
 2376 
 2377         return ERR_PTR(ret);
 2378 }
 2379 
 2380 /**
 2381  * irdma_get_dma_mr - register physical mem
 2382  * @pd: ptr of pd
 2383  * @acc: access for memory
 2384  */
 2385 static struct ib_mr *
 2386 irdma_get_dma_mr(struct ib_pd *pd, int acc)
 2387 {
 2388         u64 kva = 0;
 2389 
 2390         return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
 2391 }
 2392 
 2393 /**
 2394  * irdma_del_memlist - Deleting pbl list entries for CQ/QP
 2395  * @iwmr: iwmr for IB's user page addresses
 2396  * @ucontext: ptr to user context
 2397  */
 2398 void
 2399 irdma_del_memlist(struct irdma_mr *iwmr,
 2400                   struct irdma_ucontext *ucontext)
 2401 {
 2402         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 2403         unsigned long flags;
 2404 
 2405         switch (iwmr->type) {
 2406         case IRDMA_MEMREG_TYPE_CQ:
 2407                 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
 2408                 if (iwpbl->on_list) {
 2409                         iwpbl->on_list = false;
 2410                         list_del(&iwpbl->list);
 2411                 }
 2412                 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 2413                 break;
 2414         case IRDMA_MEMREG_TYPE_QP:
 2415                 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
 2416                 if (iwpbl->on_list) {
 2417                         iwpbl->on_list = false;
 2418                         list_del(&iwpbl->list);
 2419                 }
 2420                 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
 2421                 break;
 2422         default:
 2423                 break;
 2424         }
 2425 }
 2426 
 2427 /**
 2428  * irdma_copy_sg_list - copy sg list for qp
 2429  * @sg_list: copied into sg_list
 2430  * @sgl: copy from sgl
 2431  * @num_sges: count of sg entries
 2432  */
 2433 static void
 2434 irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
 2435                    int num_sges)
 2436 {
 2437         unsigned int i;
 2438 
 2439         for (i = 0; i < num_sges; i++) {
 2440                 sg_list[i].tag_off = sgl[i].addr;
 2441                 sg_list[i].len = sgl[i].length;
 2442                 sg_list[i].stag = sgl[i].lkey;
 2443         }
 2444 }
 2445 
 2446 /**
 2447  * irdma_post_send -  kernel application wr
 2448  * @ibqp: qp ptr for wr
 2449  * @ib_wr: work request ptr
 2450  * @bad_wr: return of bad wr if err
 2451  */
 2452 static int
 2453 irdma_post_send(struct ib_qp *ibqp,
 2454                 const struct ib_send_wr *ib_wr,
 2455                 const struct ib_send_wr **bad_wr)
 2456 {
 2457         struct irdma_qp *iwqp;
 2458         struct irdma_qp_uk *ukqp;
 2459         struct irdma_sc_dev *dev;
 2460         struct irdma_post_sq_info info;
 2461         int err = 0;
 2462         unsigned long flags;
 2463         bool inv_stag;
 2464         struct irdma_ah *ah;
 2465 
 2466         iwqp = to_iwqp(ibqp);
 2467         ukqp = &iwqp->sc_qp.qp_uk;
 2468         dev = &iwqp->iwdev->rf->sc_dev;
 2469 
 2470         spin_lock_irqsave(&iwqp->lock, flags);
 2471         while (ib_wr) {
 2472                 memset(&info, 0, sizeof(info));
 2473                 inv_stag = false;
 2474                 info.wr_id = (ib_wr->wr_id);
 2475                 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
 2476                         info.signaled = true;
 2477                 if (ib_wr->send_flags & IB_SEND_FENCE)
 2478                         info.read_fence = true;
 2479                 switch (ib_wr->opcode) {
 2480                 case IB_WR_SEND_WITH_IMM:
 2481                         if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
 2482                                 info.imm_data_valid = true;
 2483                                 info.imm_data = ntohl(ib_wr->ex.imm_data);
 2484                         } else {
 2485                                 err = -EINVAL;
 2486                                 break;
 2487                         }
 2488                         /* fallthrough */
 2489                 case IB_WR_SEND:
 2490                 case IB_WR_SEND_WITH_INV:
 2491                         if (ib_wr->opcode == IB_WR_SEND ||
 2492                             ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
 2493                                 if (ib_wr->send_flags & IB_SEND_SOLICITED)
 2494                                         info.op_type = IRDMA_OP_TYPE_SEND_SOL;
 2495                                 else
 2496                                         info.op_type = IRDMA_OP_TYPE_SEND;
 2497                         } else {
 2498                                 if (ib_wr->send_flags & IB_SEND_SOLICITED)
 2499                                         info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
 2500                                 else
 2501                                         info.op_type = IRDMA_OP_TYPE_SEND_INV;
 2502                                 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
 2503                         }
 2504 
 2505                         info.op.send.num_sges = ib_wr->num_sge;
 2506                         info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
 2507                         if (iwqp->ibqp.qp_type == IB_QPT_UD ||
 2508                             iwqp->ibqp.qp_type == IB_QPT_GSI) {
 2509                                 ah = to_iwah(ud_wr(ib_wr)->ah);
 2510                                 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
 2511                                 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
 2512                                 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
 2513                         }
 2514 
 2515                         if (ib_wr->send_flags & IB_SEND_INLINE)
 2516                                 err = irdma_uk_inline_send(ukqp, &info, false);
 2517                         else
 2518                                 err = irdma_uk_send(ukqp, &info, false);
 2519                         break;
 2520                 case IB_WR_RDMA_WRITE_WITH_IMM:
 2521                         if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
 2522                                 info.imm_data_valid = true;
 2523                                 info.imm_data = ntohl(ib_wr->ex.imm_data);
 2524                         } else {
 2525                                 err = -EINVAL;
 2526                                 break;
 2527                         }
 2528                         /* fallthrough */
 2529                 case IB_WR_RDMA_WRITE:
 2530                         if (ib_wr->send_flags & IB_SEND_SOLICITED)
 2531                                 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
 2532                         else
 2533                                 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
 2534 
 2535                         info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
 2536                         info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
 2537                         info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
 2538                         info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
 2539                         if (ib_wr->send_flags & IB_SEND_INLINE)
 2540                                 err = irdma_uk_inline_rdma_write(ukqp, &info, false);
 2541                         else
 2542                                 err = irdma_uk_rdma_write(ukqp, &info, false);
 2543                         break;
 2544                 case IB_WR_RDMA_READ_WITH_INV:
 2545                         inv_stag = true;
 2546                         /* fallthrough */
 2547                 case IB_WR_RDMA_READ:
 2548                         if (ib_wr->num_sge >
 2549                             dev->hw_attrs.uk_attrs.max_hw_read_sges) {
 2550                                 err = -EINVAL;
 2551                                 break;
 2552                         }
 2553                         info.op_type = IRDMA_OP_TYPE_RDMA_READ;
 2554                         info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
 2555                         info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
 2556                         info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
 2557                         info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
 2558                         err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
 2559                         break;
 2560                 case IB_WR_LOCAL_INV:
 2561                         info.op_type = IRDMA_OP_TYPE_INV_STAG;
 2562                         info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
 2563                         err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
 2564                         break;
 2565                 case IB_WR_REG_MR:{
 2566                                 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
 2567                                 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
 2568                                 struct irdma_fast_reg_stag_info stag_info = {0};
 2569 
 2570                                 stag_info.signaled = info.signaled;
 2571                                 stag_info.read_fence = info.read_fence;
 2572                                 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
 2573                                 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
 2574                                 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
 2575                                 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
 2576                                 stag_info.wr_id = ib_wr->wr_id;
 2577                                 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
 2578                                 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
 2579                                 stag_info.total_len = iwmr->ibmr.length;
 2580                                 if (palloc->level == PBLE_LEVEL_2) {
 2581                                         stag_info.chunk_size = 3;
 2582                                         stag_info.first_pm_pbl_index = palloc->level2.root.idx;
 2583                                 } else {
 2584                                         stag_info.chunk_size = 1;
 2585                                         stag_info.first_pm_pbl_index = palloc->level1.idx;
 2586                                 }
 2587                                 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
 2588                                 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
 2589                                                                 true);
 2590                                 break;
 2591                         }
 2592                 default:
 2593                         err = -EINVAL;
 2594                         irdma_debug(iwdev_to_idev(iwqp->iwdev),
 2595                                     IRDMA_DEBUG_VERBS,
 2596                                     "upost_send bad opcode = 0x%x\n",
 2597                                     ib_wr->opcode);
 2598                         break;
 2599                 }
 2600 
 2601                 if (err)
 2602                         break;
 2603                 ib_wr = ib_wr->next;
 2604         }
 2605 
 2606         if (!iwqp->flush_issued) {
 2607                 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
 2608                         irdma_uk_qp_post_wr(ukqp);
 2609                 spin_unlock_irqrestore(&iwqp->lock, flags);
 2610         } else {
 2611                 spin_unlock_irqrestore(&iwqp->lock, flags);
 2612                 irdma_sched_qp_flush_work(iwqp);
 2613         }
 2614         if (err)
 2615                 *bad_wr = ib_wr;
 2616 
 2617         return err;
 2618 }
 2619 
 2620 /**
 2621  * irdma_post_recv - post receive wr for kernel application
 2622  * @ibqp: ib qp pointer
 2623  * @ib_wr: work request for receive
 2624  * @bad_wr: bad wr caused an error
 2625  */
 2626 static int
 2627 irdma_post_recv(struct ib_qp *ibqp,
 2628                 const struct ib_recv_wr *ib_wr,
 2629                 const struct ib_recv_wr **bad_wr)
 2630 {
 2631         struct irdma_qp *iwqp = to_iwqp(ibqp);
 2632         struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk;
 2633         struct irdma_post_rq_info post_recv = {0};
 2634         struct irdma_sge *sg_list = iwqp->sg_list;
 2635         unsigned long flags;
 2636         int err = 0;
 2637 
 2638         spin_lock_irqsave(&iwqp->lock, flags);
 2639 
 2640         while (ib_wr) {
 2641                 if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) {
 2642                         err = -EINVAL;
 2643                         goto out;
 2644                 }
 2645                 post_recv.num_sges = ib_wr->num_sge;
 2646                 post_recv.wr_id = ib_wr->wr_id;
 2647                 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
 2648                 post_recv.sg_list = sg_list;
 2649                 err = irdma_uk_post_receive(ukqp, &post_recv);
 2650                 if (err) {
 2651                         irdma_debug(iwdev_to_idev(iwqp->iwdev),
 2652                                     IRDMA_DEBUG_VERBS, "post_recv err %d\n",
 2653                                     err);
 2654                         goto out;
 2655                 }
 2656 
 2657                 ib_wr = ib_wr->next;
 2658         }
 2659 
 2660 out:
 2661         spin_unlock_irqrestore(&iwqp->lock, flags);
 2662         if (iwqp->flush_issued)
 2663                 irdma_sched_qp_flush_work(iwqp);
 2664         if (err)
 2665                 *bad_wr = ib_wr;
 2666 
 2667         return err;
 2668 }
 2669 
 2670 /**
 2671  * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
 2672  * @opcode: iwarp flush code
 2673  */
 2674 static enum ib_wc_status
 2675 irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
 2676 {
 2677         switch (opcode) {
 2678         case FLUSH_PROT_ERR:
 2679                 return IB_WC_LOC_PROT_ERR;
 2680         case FLUSH_REM_ACCESS_ERR:
 2681                 return IB_WC_REM_ACCESS_ERR;
 2682         case FLUSH_LOC_QP_OP_ERR:
 2683                 return IB_WC_LOC_QP_OP_ERR;
 2684         case FLUSH_REM_OP_ERR:
 2685                 return IB_WC_REM_OP_ERR;
 2686         case FLUSH_LOC_LEN_ERR:
 2687                 return IB_WC_LOC_LEN_ERR;
 2688         case FLUSH_GENERAL_ERR:
 2689                 return IB_WC_WR_FLUSH_ERR;
 2690         case FLUSH_MW_BIND_ERR:
 2691                 return IB_WC_MW_BIND_ERR;
 2692         case FLUSH_REM_INV_REQ_ERR:
 2693                 return IB_WC_REM_INV_REQ_ERR;
 2694         case FLUSH_RETRY_EXC_ERR:
 2695                 return IB_WC_RETRY_EXC_ERR;
 2696         case FLUSH_FATAL_ERR:
 2697         default:
 2698                 return IB_WC_FATAL_ERR;
 2699         }
 2700 }
 2701 
 2702 static inline void
 2703 set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
 2704                 struct ib_wc *entry)
 2705 {
 2706         struct irdma_sc_qp *qp;
 2707 
 2708         switch (cq_poll_info->op_type) {
 2709         case IRDMA_OP_TYPE_RDMA_WRITE:
 2710         case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
 2711                 entry->opcode = IB_WC_RDMA_WRITE;
 2712                 break;
 2713         case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
 2714         case IRDMA_OP_TYPE_RDMA_READ:
 2715                 entry->opcode = IB_WC_RDMA_READ;
 2716                 break;
 2717         case IRDMA_OP_TYPE_SEND_SOL:
 2718         case IRDMA_OP_TYPE_SEND_SOL_INV:
 2719         case IRDMA_OP_TYPE_SEND_INV:
 2720         case IRDMA_OP_TYPE_SEND:
 2721                 entry->opcode = IB_WC_SEND;
 2722                 break;
 2723         case IRDMA_OP_TYPE_FAST_REG_NSMR:
 2724                 entry->opcode = IB_WC_REG_MR;
 2725                 break;
 2726         case IRDMA_OP_TYPE_INV_STAG:
 2727                 entry->opcode = IB_WC_LOCAL_INV;
 2728                 break;
 2729         default:
 2730                 qp = cq_poll_info->qp_handle;
 2731                 ibdev_err(irdma_get_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
 2732                           cq_poll_info->op_type);
 2733                 entry->status = IB_WC_GENERAL_ERR;
 2734         }
 2735 }
 2736 
 2737 static inline void
 2738 set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
 2739                 struct ib_wc *entry, bool send_imm_support)
 2740 {
 2741         /**
 2742          * iWARP does not support sendImm, so the presence of Imm data
 2743          * must be WriteImm.
 2744          */
 2745         if (!send_imm_support) {
 2746                 entry->opcode = cq_poll_info->imm_valid ?
 2747                     IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
 2748                 return;
 2749         }
 2750         switch (cq_poll_info->op_type) {
 2751         case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
 2752         case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 2753                 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
 2754                 break;
 2755         default:
 2756                 entry->opcode = IB_WC_RECV;
 2757         }
 2758 }
 2759 
 2760 /**
 2761  * irdma_process_cqe - process cqe info
 2762  * @entry: processed cqe
 2763  * @cq_poll_info: cqe info
 2764  */
 2765 static void
 2766 irdma_process_cqe(struct ib_wc *entry,
 2767                   struct irdma_cq_poll_info *cq_poll_info)
 2768 {
 2769         struct irdma_sc_qp *qp;
 2770 
 2771         entry->wc_flags = 0;
 2772         entry->pkey_index = 0;
 2773         entry->wr_id = cq_poll_info->wr_id;
 2774 
 2775         qp = cq_poll_info->qp_handle;
 2776         entry->qp = qp->qp_uk.back_qp;
 2777 
 2778         if (cq_poll_info->error) {
 2779                 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
 2780                     irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
 2781 
 2782                 entry->vendor_err = cq_poll_info->major_err << 16 |
 2783                     cq_poll_info->minor_err;
 2784         } else {
 2785                 entry->status = IB_WC_SUCCESS;
 2786                 if (cq_poll_info->imm_valid) {
 2787                         entry->ex.imm_data = htonl(cq_poll_info->imm_data);
 2788                         entry->wc_flags |= IB_WC_WITH_IMM;
 2789                 }
 2790                 if (cq_poll_info->ud_smac_valid) {
 2791                         ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
 2792                         entry->wc_flags |= IB_WC_WITH_SMAC;
 2793                 }
 2794 
 2795                 if (cq_poll_info->ud_vlan_valid) {
 2796                         u16 vlan = cq_poll_info->ud_vlan & EVL_VLID_MASK;
 2797 
 2798                         entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
 2799                         if (vlan) {
 2800                                 entry->vlan_id = vlan;
 2801                                 entry->wc_flags |= IB_WC_WITH_VLAN;
 2802                         }
 2803                 } else {
 2804                         entry->sl = 0;
 2805                 }
 2806         }
 2807 
 2808         if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
 2809                 set_ib_wc_op_sq(cq_poll_info, entry);
 2810         } else {
 2811                 set_ib_wc_op_rq(cq_poll_info, entry,
 2812                                 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
 2813                                 true : false);
 2814                 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
 2815                     cq_poll_info->stag_invalid_set) {
 2816                         entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
 2817                         entry->wc_flags |= IB_WC_WITH_INVALIDATE;
 2818                 }
 2819         }
 2820 
 2821         if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
 2822                 entry->src_qp = cq_poll_info->ud_src_qpn;
 2823                 entry->slid = 0;
 2824                 entry->wc_flags |=
 2825                     (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
 2826                 entry->network_hdr_type = cq_poll_info->ipv4 ?
 2827                     RDMA_NETWORK_IPV4 :
 2828                     RDMA_NETWORK_IPV6;
 2829         } else {
 2830                 entry->src_qp = cq_poll_info->qp_id;
 2831         }
 2832 
 2833         entry->byte_len = cq_poll_info->bytes_xfered;
 2834 }
 2835 
 2836 /**
 2837  * irdma_poll_one - poll one entry of the CQ
 2838  * @ukcq: ukcq to poll
 2839  * @cur_cqe: current CQE info to be filled in
 2840  * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
 2841  *
 2842  * Returns the internal irdma device error code or 0 on success
 2843  */
 2844 static inline int
 2845 irdma_poll_one(struct irdma_cq_uk *ukcq,
 2846                struct irdma_cq_poll_info *cur_cqe,
 2847                struct ib_wc *entry)
 2848 {
 2849         int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
 2850 
 2851         if (ret)
 2852                 return ret;
 2853 
 2854         irdma_process_cqe(entry, cur_cqe);
 2855 
 2856         return 0;
 2857 }
 2858 
 2859 /**
 2860  * __irdma_poll_cq - poll cq for completion (kernel apps)
 2861  * @iwcq: cq to poll
 2862  * @num_entries: number of entries to poll
 2863  * @entry: wr of a completed entry
 2864  */
 2865 static int
 2866 __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
 2867 {
 2868         struct list_head *tmp_node, *list_node;
 2869         struct irdma_cq_buf *last_buf = NULL;
 2870         struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
 2871         struct irdma_cq_buf *cq_buf;
 2872         int ret;
 2873         struct irdma_device *iwdev;
 2874         struct irdma_cq_uk *ukcq;
 2875         bool cq_new_cqe = false;
 2876         int resized_bufs = 0;
 2877         int npolled = 0;
 2878 
 2879         iwdev = to_iwdev(iwcq->ibcq.device);
 2880         ukcq = &iwcq->sc_cq.cq_uk;
 2881 
 2882         /* go through the list of previously resized CQ buffers */
 2883         list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
 2884                 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
 2885                 while (npolled < num_entries) {
 2886                         ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
 2887                         if (!ret) {
 2888                                 ++npolled;
 2889                                 cq_new_cqe = true;
 2890                                 continue;
 2891                         }
 2892                         if (ret == -ENOENT)
 2893                                 break;
 2894                         /* QP using the CQ is destroyed. Skip reporting this CQE */
 2895                         if (ret == -EFAULT) {
 2896                                 cq_new_cqe = true;
 2897                                 continue;
 2898                         }
 2899                         goto error;
 2900                 }
 2901 
 2902                 /* save the resized CQ buffer which received the last cqe */
 2903                 if (cq_new_cqe)
 2904                         last_buf = cq_buf;
 2905                 cq_new_cqe = false;
 2906         }
 2907 
 2908         /* check the current CQ for new cqes */
 2909         while (npolled < num_entries) {
 2910                 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
 2911                 if (ret == -ENOENT) {
 2912                         ret = irdma_generated_cmpls(iwcq, cur_cqe);
 2913                         if (!ret)
 2914                                 irdma_process_cqe(entry + npolled, cur_cqe);
 2915                 }
 2916                 if (!ret) {
 2917                         ++npolled;
 2918                         cq_new_cqe = true;
 2919                         continue;
 2920                 }
 2921 
 2922                 if (ret == -ENOENT)
 2923                         break;
 2924                 /* QP using the CQ is destroyed. Skip reporting this CQE */
 2925                 if (ret == -EFAULT) {
 2926                         cq_new_cqe = true;
 2927                         continue;
 2928                 }
 2929                 goto error;
 2930         }
 2931 
 2932         if (cq_new_cqe)
 2933                 /* all previous CQ resizes are complete */
 2934                 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
 2935         else if (last_buf)
 2936                 /* only CQ resizes up to the last_buf are complete */
 2937                 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
 2938         if (resized_bufs)
 2939                 /* report to the HW the number of complete CQ resizes */
 2940                 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
 2941 
 2942         return npolled;
 2943 error:
 2944         irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 2945                     "%s: Error polling CQ, irdma_err: %d\n", __func__, ret);
 2946 
 2947         return ret;
 2948 }
 2949 
 2950 /**
 2951  * irdma_poll_cq - poll cq for completion (kernel apps)
 2952  * @ibcq: cq to poll
 2953  * @num_entries: number of entries to poll
 2954  * @entry: wr of a completed entry
 2955  */
 2956 static int
 2957 irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
 2958               struct ib_wc *entry)
 2959 {
 2960         struct irdma_cq *iwcq;
 2961         unsigned long flags;
 2962         int ret;
 2963 
 2964         iwcq = to_iwcq(ibcq);
 2965 
 2966         spin_lock_irqsave(&iwcq->lock, flags);
 2967         ret = __irdma_poll_cq(iwcq, num_entries, entry);
 2968         spin_unlock_irqrestore(&iwcq->lock, flags);
 2969 
 2970         return ret;
 2971 }
 2972 
 2973 /**
 2974  * irdma_req_notify_cq - arm cq kernel application
 2975  * @ibcq: cq to arm
 2976  * @notify_flags: notofication flags
 2977  */
 2978 static int
 2979 irdma_req_notify_cq(struct ib_cq *ibcq,
 2980                     enum ib_cq_notify_flags notify_flags)
 2981 {
 2982         struct irdma_cq *iwcq;
 2983         struct irdma_cq_uk *ukcq;
 2984         unsigned long flags;
 2985         enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
 2986         bool promo_event = false;
 2987         int ret = 0;
 2988 
 2989         iwcq = to_iwcq(ibcq);
 2990         ukcq = &iwcq->sc_cq.cq_uk;
 2991 
 2992         spin_lock_irqsave(&iwcq->lock, flags);
 2993         if (notify_flags == IB_CQ_SOLICITED) {
 2994                 cq_notify = IRDMA_CQ_COMPL_SOLICITED;
 2995         } else {
 2996                 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED)
 2997                         promo_event = true;
 2998         }
 2999 
 3000         if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
 3001                 iwcq->last_notify = cq_notify;
 3002                 irdma_uk_cq_request_notification(ukcq, cq_notify);
 3003         }
 3004 
 3005         if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
 3006             (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
 3007                 ret = 1;
 3008         spin_unlock_irqrestore(&iwcq->lock, flags);
 3009 
 3010         return ret;
 3011 }
 3012 
 3013 /**
 3014  * mcast_list_add -  Add a new mcast item to list
 3015  * @rf: RDMA PCI function
 3016  * @new_elem: pointer to element to add
 3017  */
 3018 static void
 3019 mcast_list_add(struct irdma_pci_f *rf,
 3020                struct mc_table_list *new_elem)
 3021 {
 3022         list_add(&new_elem->list, &rf->mc_qht_list.list);
 3023 }
 3024 
 3025 /**
 3026  * mcast_list_del - Remove an mcast item from list
 3027  * @mc_qht_elem: pointer to mcast table list element
 3028  */
 3029 static void
 3030 mcast_list_del(struct mc_table_list *mc_qht_elem)
 3031 {
 3032         if (mc_qht_elem)
 3033                 list_del(&mc_qht_elem->list);
 3034 }
 3035 
 3036 /**
 3037  * mcast_list_lookup_ip - Search mcast list for address
 3038  * @rf: RDMA PCI function
 3039  * @ip_mcast: pointer to mcast IP address
 3040  */
 3041 static struct mc_table_list *
 3042 mcast_list_lookup_ip(struct irdma_pci_f *rf,
 3043                      u32 *ip_mcast)
 3044 {
 3045         struct mc_table_list *mc_qht_el;
 3046         struct list_head *pos, *q;
 3047 
 3048         list_for_each_safe(pos, q, &rf->mc_qht_list.list) {
 3049                 mc_qht_el = list_entry(pos, struct mc_table_list, list);
 3050                 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
 3051                             sizeof(mc_qht_el->mc_info.dest_ip)))
 3052                         return mc_qht_el;
 3053         }
 3054 
 3055         return NULL;
 3056 }
 3057 
 3058 /**
 3059  * irdma_mcast_cqp_op - perform a mcast cqp operation
 3060  * @iwdev: irdma device
 3061  * @mc_grp_ctx: mcast group info
 3062  * @op: operation
 3063  *
 3064  * returns error status
 3065  */
 3066 static int
 3067 irdma_mcast_cqp_op(struct irdma_device *iwdev,
 3068                    struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
 3069 {
 3070         struct cqp_cmds_info *cqp_info;
 3071         struct irdma_cqp_request *cqp_request;
 3072         int status;
 3073 
 3074         cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
 3075         if (!cqp_request)
 3076                 return -ENOMEM;
 3077 
 3078         cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
 3079         cqp_info = &cqp_request->info;
 3080         cqp_info->cqp_cmd = op;
 3081         cqp_info->post_sq = 1;
 3082         cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
 3083         cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
 3084         status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
 3085         irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
 3086 
 3087         return status;
 3088 }
 3089 
 3090 /**
 3091  * irdma_attach_mcast - attach a qp to a multicast group
 3092  * @ibqp: ptr to qp
 3093  * @ibgid: pointer to global ID
 3094  * @lid: local ID
 3095  *
 3096  * returns error status
 3097  */
 3098 static int
 3099 irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
 3100 {
 3101         struct irdma_qp *iwqp = to_iwqp(ibqp);
 3102         struct irdma_device *iwdev = iwqp->iwdev;
 3103         struct irdma_pci_f *rf = iwdev->rf;
 3104         struct mc_table_list *mc_qht_elem;
 3105         struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
 3106         unsigned long flags;
 3107         u32 ip_addr[4] = {0};
 3108         u32 mgn;
 3109         u32 no_mgs;
 3110         int ret = 0;
 3111         bool ipv4;
 3112         u16 vlan_id;
 3113         union {
 3114                 struct sockaddr saddr;
 3115                 struct sockaddr_in saddr_in;
 3116                 struct sockaddr_in6 saddr_in6;
 3117         } sgid_addr;
 3118         unsigned char dmac[ETH_ALEN];
 3119 
 3120         rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
 3121 
 3122         if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
 3123                 irdma_copy_ip_ntohl(ip_addr,
 3124                                     sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
 3125                 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
 3126                 ipv4 = false;
 3127                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 3128                             "qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
 3129                             ip_addr);
 3130                 irdma_mcast_mac_v6(ip_addr, dmac);
 3131         } else {
 3132                 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
 3133                 ipv4 = true;
 3134                 vlan_id = irdma_get_vlan_ipv4(ip_addr);
 3135                 irdma_mcast_mac_v4(ip_addr, dmac);
 3136                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 3137                             "qp_id=%d, IP4address=%pI4, MAC=%pM\n",
 3138                             ibqp->qp_num, ip_addr, dmac);
 3139         }
 3140 
 3141         spin_lock_irqsave(&rf->qh_list_lock, flags);
 3142         mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
 3143         if (!mc_qht_elem) {
 3144                 struct irdma_dma_mem *dma_mem_mc;
 3145 
 3146                 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
 3147                 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
 3148                 if (!mc_qht_elem)
 3149                         return -ENOMEM;
 3150 
 3151                 mc_qht_elem->mc_info.ipv4_valid = ipv4;
 3152                 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
 3153                        sizeof(mc_qht_elem->mc_info.dest_ip));
 3154                 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
 3155                                        &mgn, &rf->next_mcg);
 3156                 if (ret) {
 3157                         kfree(mc_qht_elem);
 3158                         return -ENOMEM;
 3159                 }
 3160 
 3161                 mc_qht_elem->mc_info.mgn = mgn;
 3162                 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
 3163                 dma_mem_mc->size = sizeof(u64)* IRDMA_MAX_MGS_PER_CTX;
 3164                 dma_mem_mc->va = irdma_allocate_dma_mem(&rf->hw, dma_mem_mc,
 3165                                                         dma_mem_mc->size,
 3166                                                         IRDMA_HW_PAGE_SIZE);
 3167                 if (!dma_mem_mc->va) {
 3168                         irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
 3169                         kfree(mc_qht_elem);
 3170                         return -ENOMEM;
 3171                 }
 3172 
 3173                 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
 3174                 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
 3175                        sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
 3176                 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
 3177                 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
 3178                 if (vlan_id < VLAN_N_VID)
 3179                         mc_qht_elem->mc_grp_ctx.vlan_valid = true;
 3180                 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
 3181                 mc_qht_elem->mc_grp_ctx.qs_handle =
 3182                     iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
 3183                 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
 3184 
 3185                 spin_lock_irqsave(&rf->qh_list_lock, flags);
 3186                 mcast_list_add(rf, mc_qht_elem);
 3187         } else {
 3188                 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
 3189                     IRDMA_MAX_MGS_PER_CTX) {
 3190                         spin_unlock_irqrestore(&rf->qh_list_lock, flags);
 3191                         return -ENOMEM;
 3192                 }
 3193         }
 3194 
 3195         mcg_info.qp_id = iwqp->ibqp.qp_num;
 3196         no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
 3197         irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
 3198         spin_unlock_irqrestore(&rf->qh_list_lock, flags);
 3199 
 3200         /* Only if there is a change do we need to modify or create */
 3201         if (!no_mgs) {
 3202                 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
 3203                                          IRDMA_OP_MC_CREATE);
 3204         } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
 3205                 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
 3206                                          IRDMA_OP_MC_MODIFY);
 3207         } else {
 3208                 return 0;
 3209         }
 3210 
 3211         if (ret)
 3212                 goto error;
 3213 
 3214         return 0;
 3215 
 3216 error:
 3217         irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
 3218         if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
 3219                 mcast_list_del(mc_qht_elem);
 3220                 irdma_free_dma_mem(&rf->hw,
 3221                                    &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
 3222                 irdma_free_rsrc(rf, rf->allocated_mcgs,
 3223                                 mc_qht_elem->mc_grp_ctx.mg_id);
 3224                 kfree(mc_qht_elem);
 3225         }
 3226 
 3227         return ret;
 3228 }
 3229 
 3230 /**
 3231  * irdma_detach_mcast - detach a qp from a multicast group
 3232  * @ibqp: ptr to qp
 3233  * @ibgid: pointer to global ID
 3234  * @lid: local ID
 3235  *
 3236  * returns error status
 3237  */
 3238 static int
 3239 irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
 3240 {
 3241         struct irdma_qp *iwqp = to_iwqp(ibqp);
 3242         struct irdma_device *iwdev = iwqp->iwdev;
 3243         struct irdma_pci_f *rf = iwdev->rf;
 3244         u32 ip_addr[4] = {0};
 3245         struct mc_table_list *mc_qht_elem;
 3246         struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
 3247         int ret;
 3248         unsigned long flags;
 3249         union {
 3250                 struct sockaddr saddr;
 3251                 struct sockaddr_in saddr_in;
 3252                 struct sockaddr_in6 saddr_in6;
 3253         } sgid_addr;
 3254 
 3255         rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
 3256         if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
 3257                 irdma_copy_ip_ntohl(ip_addr,
 3258                                     sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
 3259         else
 3260                 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
 3261 
 3262         spin_lock_irqsave(&rf->qh_list_lock, flags);
 3263         mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
 3264         if (!mc_qht_elem) {
 3265                 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
 3266                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 3267                             "address not found MCG\n");
 3268                 return 0;
 3269         }
 3270 
 3271         mcg_info.qp_id = iwqp->ibqp.qp_num;
 3272         irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
 3273         if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
 3274                 mcast_list_del(mc_qht_elem);
 3275                 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
 3276                 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
 3277                                          IRDMA_OP_MC_DESTROY);
 3278                 if (ret) {
 3279                         irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 3280                                     "failed MC_DESTROY MCG\n");
 3281                         spin_lock_irqsave(&rf->qh_list_lock, flags);
 3282                         mcast_list_add(rf, mc_qht_elem);
 3283                         spin_unlock_irqrestore(&rf->qh_list_lock, flags);
 3284                         return -EAGAIN;
 3285                 }
 3286 
 3287                 irdma_free_dma_mem(&rf->hw,
 3288                                    &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
 3289                 irdma_free_rsrc(rf, rf->allocated_mcgs,
 3290                                 mc_qht_elem->mc_grp_ctx.mg_id);
 3291                 kfree(mc_qht_elem);
 3292         } else {
 3293                 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
 3294                 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
 3295                                          IRDMA_OP_MC_MODIFY);
 3296                 if (ret) {
 3297                         irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 3298                                     "failed Modify MCG\n");
 3299                         return ret;
 3300                 }
 3301         }
 3302 
 3303         return 0;
 3304 }
 3305 
 3306 /**
 3307  * irdma_query_ah - Query address handle
 3308  * @ibah: pointer to address handle
 3309  * @ah_attr: address handle attributes
 3310  */
 3311 static int
 3312 irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
 3313 {
 3314         struct irdma_ah *ah = to_iwah(ibah);
 3315 
 3316         memset(ah_attr, 0, sizeof(*ah_attr));
 3317         if (ah->av.attrs.ah_flags & IB_AH_GRH) {
 3318                 ah_attr->ah_flags = IB_AH_GRH;
 3319                 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
 3320                 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
 3321                 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
 3322                 ah_attr->grh.sgid_index = ah->sgid_index;
 3323                 ah_attr->grh.sgid_index = ah->sgid_index;
 3324                 memcpy(&ah_attr->grh.dgid, &ah->dgid,
 3325                        sizeof(ah_attr->grh.dgid));
 3326         }
 3327 
 3328         return 0;
 3329 }
 3330 
 3331 static __be64 irdma_mac_to_guid(struct ifnet *ndev){
 3332         const unsigned char *mac = IF_LLADDR(ndev);
 3333         __be64 guid;
 3334         unsigned char *dst = (unsigned char *)&guid;
 3335 
 3336         dst[0] = mac[0] ^ 2;
 3337         dst[1] = mac[1];
 3338         dst[2] = mac[2];
 3339         dst[3] = 0xff;
 3340         dst[4] = 0xfe;
 3341         dst[5] = mac[3];
 3342         dst[6] = mac[4];
 3343         dst[7] = mac[5];
 3344 
 3345         return guid;
 3346 }
 3347 
 3348 static struct ifnet *
 3349 irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
 3350 {
 3351         struct irdma_device *iwdev = to_iwdev(ibdev);
 3352 
 3353         if (iwdev->netdev) {
 3354                 dev_hold(iwdev->netdev);
 3355                 return iwdev->netdev;
 3356         }
 3357 
 3358         return NULL;
 3359 }
 3360 
 3361 static void
 3362 irdma_set_device_ops(struct ib_device *ibdev)
 3363 {
 3364         struct ib_device *dev_ops = ibdev;
 3365 
 3366         dev_ops->ops.driver_id = RDMA_DRIVER_I40IW;
 3367         dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah);
 3368         dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq);
 3369         dev_ops->ops.size_ib_pd = IRDMA_SET_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd);
 3370         dev_ops->ops.size_ib_ucontext = IRDMA_SET_RDMA_OBJ_SIZE(ib_ucontext,
 3371                                                                 irdma_ucontext,
 3372                                                                 ibucontext);
 3373 
 3374         dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
 3375         dev_ops->alloc_mr = irdma_alloc_mr;
 3376         dev_ops->alloc_mw = irdma_alloc_mw;
 3377         dev_ops->alloc_pd = irdma_alloc_pd;
 3378         dev_ops->alloc_ucontext = irdma_alloc_ucontext;
 3379         dev_ops->create_cq = irdma_create_cq;
 3380         dev_ops->create_qp = irdma_create_qp;
 3381         dev_ops->dealloc_mw = irdma_dealloc_mw;
 3382         dev_ops->dealloc_pd = irdma_dealloc_pd;
 3383         dev_ops->dealloc_ucontext = irdma_dealloc_ucontext;
 3384         dev_ops->dereg_mr = irdma_dereg_mr;
 3385         dev_ops->destroy_cq = irdma_destroy_cq;
 3386         dev_ops->destroy_qp = irdma_destroy_qp;
 3387         dev_ops->disassociate_ucontext = irdma_disassociate_ucontext;
 3388         dev_ops->get_dev_fw_str = irdma_get_dev_fw_str;
 3389         dev_ops->get_dma_mr = irdma_get_dma_mr;
 3390         dev_ops->get_hw_stats = irdma_get_hw_stats;
 3391         dev_ops->get_netdev = irdma_get_netdev;
 3392         dev_ops->map_mr_sg = irdma_map_mr_sg;
 3393         dev_ops->mmap = irdma_mmap;
 3394         dev_ops->mmap_free = irdma_mmap_free;
 3395         dev_ops->poll_cq = irdma_poll_cq;
 3396         dev_ops->post_recv = irdma_post_recv;
 3397         dev_ops->post_send = irdma_post_send;
 3398         dev_ops->query_device = irdma_query_device;
 3399         dev_ops->query_port = irdma_query_port;
 3400         dev_ops->modify_port = irdma_modify_port;
 3401         dev_ops->query_qp = irdma_query_qp;
 3402         dev_ops->reg_user_mr = irdma_reg_user_mr;
 3403         dev_ops->rereg_user_mr = irdma_rereg_user_mr;
 3404         dev_ops->req_notify_cq = irdma_req_notify_cq;
 3405         dev_ops->resize_cq = irdma_resize_cq;
 3406 }
 3407 
 3408 static void
 3409 irdma_set_device_mcast_ops(struct ib_device *ibdev)
 3410 {
 3411         struct ib_device *dev_ops = ibdev;
 3412         dev_ops->attach_mcast = irdma_attach_mcast;
 3413         dev_ops->detach_mcast = irdma_detach_mcast;
 3414 }
 3415 
 3416 static void
 3417 irdma_set_device_roce_ops(struct ib_device *ibdev)
 3418 {
 3419         struct ib_device *dev_ops = ibdev;
 3420         dev_ops->create_ah = irdma_create_ah;
 3421         dev_ops->destroy_ah = irdma_destroy_ah;
 3422         dev_ops->get_link_layer = irdma_get_link_layer;
 3423         dev_ops->get_port_immutable = irdma_roce_port_immutable;
 3424         dev_ops->modify_qp = irdma_modify_qp_roce;
 3425         dev_ops->query_ah = irdma_query_ah;
 3426         dev_ops->query_gid = irdma_query_gid_roce;
 3427         dev_ops->query_pkey = irdma_query_pkey;
 3428         ibdev->add_gid = irdma_add_gid;
 3429         ibdev->del_gid = irdma_del_gid;
 3430 }
 3431 
 3432 static void
 3433 irdma_set_device_iw_ops(struct ib_device *ibdev)
 3434 {
 3435         struct ib_device *dev_ops = ibdev;
 3436 
 3437         ibdev->uverbs_cmd_mask |=
 3438             (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
 3439             (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
 3440 
 3441         dev_ops->create_ah = irdma_create_ah_stub;
 3442         dev_ops->destroy_ah = irdma_destroy_ah_stub;
 3443         dev_ops->get_port_immutable = irdma_iw_port_immutable;
 3444         dev_ops->modify_qp = irdma_modify_qp;
 3445         dev_ops->query_gid = irdma_query_gid;
 3446         dev_ops->query_pkey = irdma_iw_query_pkey;
 3447 }
 3448 
 3449 static inline void
 3450 irdma_set_device_gen1_ops(struct ib_device *ibdev)
 3451 {
 3452 }
 3453 
 3454 /**
 3455  * irdma_init_roce_device - initialization of roce rdma device
 3456  * @iwdev: irdma device
 3457  */
 3458 static void
 3459 irdma_init_roce_device(struct irdma_device *iwdev)
 3460 {
 3461         kc_set_roce_uverbs_cmd_mask(iwdev);
 3462         iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
 3463         iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
 3464         irdma_set_device_roce_ops(&iwdev->ibdev);
 3465         if (iwdev->rf->rdma_ver == IRDMA_GEN_2)
 3466                 irdma_set_device_mcast_ops(&iwdev->ibdev);
 3467 }
 3468 
 3469 /**
 3470  * irdma_init_iw_device - initialization of iwarp rdma device
 3471  * @iwdev: irdma device
 3472  */
 3473 static int
 3474 irdma_init_iw_device(struct irdma_device *iwdev)
 3475 {
 3476         struct ifnet *netdev = iwdev->netdev;
 3477 
 3478         iwdev->ibdev.node_type = RDMA_NODE_RNIC;
 3479         ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, IF_LLADDR(netdev));
 3480         iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL);
 3481         if (!iwdev->ibdev.iwcm)
 3482                 return -ENOMEM;
 3483 
 3484         iwdev->ibdev.iwcm->add_ref = irdma_qp_add_ref;
 3485         iwdev->ibdev.iwcm->rem_ref = irdma_qp_rem_ref;
 3486         iwdev->ibdev.iwcm->get_qp = irdma_get_qp;
 3487         iwdev->ibdev.iwcm->connect = irdma_connect;
 3488         iwdev->ibdev.iwcm->accept = irdma_accept;
 3489         iwdev->ibdev.iwcm->reject = irdma_reject;
 3490         iwdev->ibdev.iwcm->create_listen = irdma_create_listen;
 3491         iwdev->ibdev.iwcm->destroy_listen = irdma_destroy_listen;
 3492         memcpy(iwdev->ibdev.iwcm->ifname, if_name(netdev),
 3493                sizeof(iwdev->ibdev.iwcm->ifname));
 3494         irdma_set_device_iw_ops(&iwdev->ibdev);
 3495 
 3496         return 0;
 3497 }
 3498 
 3499 /**
 3500  * irdma_init_rdma_device - initialization of rdma device
 3501  * @iwdev: irdma device
 3502  */
 3503 static int
 3504 irdma_init_rdma_device(struct irdma_device *iwdev)
 3505 {
 3506         struct pci_dev *pcidev = iwdev->rf->pcidev;
 3507         int ret;
 3508 
 3509         iwdev->ibdev.owner = THIS_MODULE;
 3510         iwdev->ibdev.uverbs_abi_ver = IRDMA_ABI_VER;
 3511         kc_set_rdma_uverbs_cmd_mask(iwdev);
 3512 
 3513         if (iwdev->roce_mode) {
 3514                 irdma_init_roce_device(iwdev);
 3515         } else {
 3516                 ret = irdma_init_iw_device(iwdev);
 3517                 if (ret)
 3518                         return ret;
 3519         }
 3520 
 3521         iwdev->ibdev.phys_port_cnt = 1;
 3522         iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
 3523         iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev;
 3524         set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev);
 3525         irdma_set_device_ops(&iwdev->ibdev);
 3526         if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
 3527                 irdma_set_device_gen1_ops(&iwdev->ibdev);
 3528 
 3529         return 0;
 3530 }
 3531 
 3532 /**
 3533  * irdma_port_ibevent - indicate port event
 3534  * @iwdev: irdma device
 3535  */
 3536 void
 3537 irdma_port_ibevent(struct irdma_device *iwdev)
 3538 {
 3539         struct ib_event event;
 3540 
 3541         event.device = &iwdev->ibdev;
 3542         event.element.port_num = 1;
 3543         event.event =
 3544             iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
 3545         ib_dispatch_event(&event);
 3546 }
 3547 
 3548 /**
 3549  * irdma_ib_unregister_device - unregister rdma device from IB
 3550  * core
 3551  * @iwdev: irdma device
 3552  */
 3553 void
 3554 irdma_ib_unregister_device(struct irdma_device *iwdev)
 3555 {
 3556         iwdev->iw_status = 0;
 3557         irdma_port_ibevent(iwdev);
 3558         ib_unregister_device(&iwdev->ibdev);
 3559         dev_put(iwdev->netdev);
 3560         kfree(iwdev->ibdev.iwcm);
 3561         iwdev->ibdev.iwcm = NULL;
 3562 }
 3563 
 3564 /**
 3565  * irdma_ib_register_device - register irdma device to IB core
 3566  * @iwdev: irdma device
 3567  */
 3568 int
 3569 irdma_ib_register_device(struct irdma_device *iwdev)
 3570 {
 3571         int ret;
 3572 
 3573         ret = irdma_init_rdma_device(iwdev);
 3574         if (ret)
 3575                 return ret;
 3576 
 3577         dev_hold(iwdev->netdev);
 3578         sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev));
 3579         ret = ib_register_device(&iwdev->ibdev, NULL);
 3580         if (ret)
 3581                 goto error;
 3582 
 3583         iwdev->iw_status = 1;
 3584         irdma_port_ibevent(iwdev);
 3585 
 3586         return 0;
 3587 
 3588 error:
 3589         kfree(iwdev->ibdev.iwcm);
 3590         iwdev->ibdev.iwcm = NULL;
 3591         irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "Register RDMA device fail\n");
 3592 
 3593         return ret;
 3594 }

Cache object: 8f9f2711cc85bb640feb541935172cbe


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.