The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/irdma/irdma_kcompat.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
    3  *
    4  * Copyright (c) 2018 - 2022 Intel Corporation
    5  *
    6  * This software is available to you under a choice of one of two
    7  * licenses.  You may choose to be licensed under the terms of the GNU
    8  * General Public License (GPL) Version 2, available from the file
    9  * COPYING in the main directory of this source tree, or the
   10  * OpenFabrics.org BSD license below:
   11  *
   12  *   Redistribution and use in source and binary forms, with or
   13  *   without modification, are permitted provided that the following
   14  *   conditions are met:
   15  *
   16  *    - Redistributions of source code must retain the above
   17  *      copyright notice, this list of conditions and the following
   18  *      disclaimer.
   19  *
   20  *    - Redistributions in binary form must reproduce the above
   21  *      copyright notice, this list of conditions and the following
   22  *      disclaimer in the documentation and/or other materials
   23  *      provided with the distribution.
   24  *
   25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   32  * SOFTWARE.
   33  */
   34 /*$FreeBSD$*/
   35 
   36 #include "irdma_main.h"
   37 
   38 #define IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
   39 
   40 static u16 kc_rdma_flow_label_to_udp_sport(u32 fl) {
   41         u32 fl_low = fl & 0x03FFF;
   42         u32 fl_high = fl & 0xFC000;
   43 
   44         fl_low ^= fl_high >> 14;
   45 
   46         return (u16)(fl_low | IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN);
   47 }
   48 
   49 #define IRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF)
   50 
   51 static u32 kc_rdma_calc_flow_label(u32 lqpn, u32 rqpn) {
   52         u64 fl = (u64)lqpn * rqpn;
   53 
   54         fl ^= fl >> 20;
   55         fl ^= fl >> 40;
   56 
   57         return (u32)(fl & IRDMA_GRH_FLOWLABEL_MASK);
   58 }
   59 
   60 u16
   61 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
   62 {
   63         if (!fl)
   64                 fl = kc_rdma_calc_flow_label(lqpn, rqpn);
   65         return kc_rdma_flow_label_to_udp_sport(fl);
   66 }
   67 
   68 void
   69 irdma_get_dev_fw_str(struct ib_device *dev,
   70                      char *str,
   71                      size_t str_len)
   72 {
   73         struct irdma_device *iwdev = to_iwdev(dev);
   74 
   75         snprintf(str, str_len, "%u.%u",
   76                  irdma_fw_major_ver(&iwdev->rf->sc_dev),
   77                  irdma_fw_minor_ver(&iwdev->rf->sc_dev));
   78 }
   79 
   80 int
   81 irdma_add_gid(struct ib_device *device,
   82               u8 port_num,
   83               unsigned int index,
   84               const union ib_gid *gid,
   85               const struct ib_gid_attr *attr,
   86               void **context)
   87 {
   88         return 0;
   89 }
   90 
   91 int
   92 irdma_del_gid(struct ib_device *device,
   93               u8 port_num,
   94               unsigned int index,
   95               void **context)
   96 {
   97         return 0;
   98 }
   99 
  100 /**
  101  * irdma_alloc_mr - register stag for fast memory registration
  102  * @pd: ibpd pointer
  103  * @mr_type: memory for stag registrion
  104  * @max_num_sg: man number of pages
  105  * @udata: user data
  106  */
  107 struct ib_mr *
  108 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
  109                u32 max_num_sg, struct ib_udata *udata)
  110 {
  111         struct irdma_device *iwdev = to_iwdev(pd->device);
  112         struct irdma_pble_alloc *palloc;
  113         struct irdma_pbl *iwpbl;
  114         struct irdma_mr *iwmr;
  115         int status;
  116         u32 stag;
  117         int err_code = -ENOMEM;
  118 
  119         iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
  120         if (!iwmr)
  121                 return ERR_PTR(-ENOMEM);
  122 
  123         stag = irdma_create_stag(iwdev);
  124         if (!stag) {
  125                 err_code = -ENOMEM;
  126                 goto err;
  127         }
  128 
  129         iwmr->stag = stag;
  130         iwmr->ibmr.rkey = stag;
  131         iwmr->ibmr.lkey = stag;
  132         iwmr->ibmr.pd = pd;
  133         iwmr->ibmr.device = pd->device;
  134         iwpbl = &iwmr->iwpbl;
  135         iwpbl->iwmr = iwmr;
  136         iwmr->type = IRDMA_MEMREG_TYPE_MEM;
  137         palloc = &iwpbl->pble_alloc;
  138         iwmr->page_cnt = max_num_sg;
  139         /* Assume system PAGE_SIZE as the sg page sizes are unknown. */
  140         iwmr->len = max_num_sg * PAGE_SIZE;
  141         status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
  142                                 false);
  143         if (status)
  144                 goto err_get_pble;
  145 
  146         err_code = irdma_hw_alloc_stag(iwdev, iwmr);
  147         if (err_code)
  148                 goto err_alloc_stag;
  149 
  150         iwpbl->pbl_allocated = true;
  151 
  152         return &iwmr->ibmr;
  153 err_alloc_stag:
  154         irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
  155 err_get_pble:
  156         irdma_free_stag(iwdev, stag);
  157 err:
  158         kfree(iwmr);
  159 
  160         return ERR_PTR(err_code);
  161 }
  162 
  163 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
  164 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
  165 /**
  166  * irdma_alloc_ucontext - Allocate the user context data structure
  167  * @uctx: context
  168  * @udata: user data
  169  *
  170  * This keeps track of all objects associated with a particular
  171  * user-mode client.
  172  */
  173 int
  174 irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
  175 {
  176         struct ib_device *ibdev = uctx->device;
  177         struct irdma_device *iwdev = to_iwdev(ibdev);
  178         struct irdma_alloc_ucontext_req req = {0};
  179         struct irdma_alloc_ucontext_resp uresp = {0};
  180         struct irdma_ucontext *ucontext = to_ucontext(uctx);
  181         struct irdma_uk_attrs *uk_attrs;
  182 
  183         if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
  184             udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
  185                 return -EINVAL;
  186 
  187         if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
  188                 return -EINVAL;
  189 
  190         if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
  191                 goto ver_error;
  192 
  193         ucontext->iwdev = iwdev;
  194         ucontext->abi_ver = req.userspace_ver;
  195 
  196         uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
  197         /* GEN_1 support for libi40iw */
  198         if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
  199                 if (uk_attrs->hw_rev != IRDMA_GEN_1)
  200                         return -EOPNOTSUPP;
  201 
  202                 ucontext->legacy_mode = true;
  203                 uresp.max_qps = iwdev->rf->max_qp;
  204                 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
  205                 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
  206                 uresp.kernel_ver = req.userspace_ver;
  207                 if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
  208                         return -EFAULT;
  209         } else {
  210                 u64 bar_off;
  211 
  212                 uresp.kernel_ver = IRDMA_ABI_VER;
  213                 uresp.feature_flags = uk_attrs->feature_flags;
  214                 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
  215                 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
  216                 uresp.max_hw_inline = uk_attrs->max_hw_inline;
  217                 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
  218                 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
  219                 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
  220                 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
  221                 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
  222                 uresp.hw_rev = uk_attrs->hw_rev;
  223 
  224                 bar_off =
  225                     (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
  226                 ucontext->db_mmap_entry =
  227                     irdma_user_mmap_entry_insert(ucontext, bar_off,
  228                                                  IRDMA_MMAP_IO_NC,
  229                                                  &uresp.db_mmap_key);
  230                 if (!ucontext->db_mmap_entry) {
  231                         return -ENOMEM;
  232                 }
  233 
  234                 if (ib_copy_to_udata(udata, &uresp,
  235                                      min(sizeof(uresp), udata->outlen))) {
  236                         rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
  237                         return -EFAULT;
  238                 }
  239         }
  240 
  241         INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
  242         spin_lock_init(&ucontext->cq_reg_mem_list_lock);
  243         INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
  244         spin_lock_init(&ucontext->qp_reg_mem_list_lock);
  245         INIT_LIST_HEAD(&ucontext->vma_list);
  246         mutex_init(&ucontext->vma_list_mutex);
  247 
  248         return 0;
  249 
  250 ver_error:
  251         irdma_dev_err(&iwdev->rf->sc_dev,
  252                       "Invalid userspace driver version detected. Detected version %d, should be %d\n",
  253                       req.userspace_ver, IRDMA_ABI_VER);
  254         return -EINVAL;
  255 }
  256 
  257 /**
  258  * irdma_dealloc_ucontext - deallocate the user context data structure
  259  * @context: user context created during alloc
  260  */
  261 void
  262 irdma_dealloc_ucontext(struct ib_ucontext *context)
  263 {
  264         struct irdma_ucontext *ucontext = to_ucontext(context);
  265 
  266         rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
  267 
  268         return;
  269 }
  270 
  271 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
  272 /**
  273  * irdma_alloc_pd - allocate protection domain
  274  * @pd: protection domain
  275  * @udata: user data
  276  */
  277 int
  278 irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
  279 {
  280         struct irdma_pd *iwpd = to_iwpd(pd);
  281         struct irdma_device *iwdev = to_iwdev(pd->device);
  282         struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
  283         struct irdma_pci_f *rf = iwdev->rf;
  284         struct irdma_alloc_pd_resp uresp = {0};
  285         struct irdma_sc_pd *sc_pd;
  286         u32 pd_id = 0;
  287         int err;
  288 
  289         if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
  290                 return -EINVAL;
  291 
  292         err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
  293                                &rf->next_pd);
  294         if (err)
  295                 return err;
  296 
  297         sc_pd = &iwpd->sc_pd;
  298         if (udata) {
  299                 struct irdma_ucontext *ucontext =
  300                 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
  301                                           ibucontext);
  302 
  303                 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
  304                 uresp.pd_id = pd_id;
  305                 if (ib_copy_to_udata(udata, &uresp,
  306                                      min(sizeof(uresp), udata->outlen))) {
  307                         err = -EFAULT;
  308                         goto error;
  309                 }
  310         } else {
  311                 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
  312         }
  313 
  314         return 0;
  315 
  316 error:
  317 
  318         irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
  319 
  320         return err;
  321 }
  322 
  323 void
  324 irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
  325 {
  326         struct irdma_pd *iwpd = to_iwpd(ibpd);
  327         struct irdma_device *iwdev = to_iwdev(ibpd->device);
  328 
  329         irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
  330 }
  331 
  332 static void
  333 irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
  334                    const struct ib_gid_attr *sgid_attr,
  335                    struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
  336                    u8 *dmac, u8 net_type)
  337 {
  338         if (net_type == RDMA_NETWORK_IPV4) {
  339                 ah_info->ipv4_valid = true;
  340                 ah_info->dest_ip_addr[0] =
  341                     ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
  342                 ah_info->src_ip_addr[0] =
  343                     ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
  344 #ifdef VIMAGE
  345                 CURVNET_SET_QUIET(vnet);
  346                 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
  347                                                      ah_info->dest_ip_addr[0]);
  348                 CURVNET_RESTORE();
  349 #endif
  350                 if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
  351                         irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
  352                 }
  353         } else {
  354                 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
  355                                     ((struct sockaddr_in6 *)dgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
  356                 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
  357                                     ((struct sockaddr_in6 *)sgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
  358                 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
  359                                                      ah_info->dest_ip_addr);
  360                 if (rdma_is_multicast_addr(&((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) {
  361                         irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac);
  362                 }
  363         }
  364 }
  365 
  366 static int
  367 irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
  368                          struct irdma_ah_info *ah_info,
  369                          const struct ib_gid_attr *sgid_attr,
  370                          u8 *dmac)
  371 {
  372         if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev))
  373                 ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
  374         else
  375                 ah_info->vlan_tag = VLAN_N_VID;
  376 
  377         ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac);
  378 
  379         if (ah_info->dst_arpindex == -1)
  380                 return -EINVAL;
  381 
  382         if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
  383                 ah_info->vlan_tag = 0;
  384 
  385         if (ah_info->vlan_tag < VLAN_N_VID) {
  386                 ah_info->insert_vlan_tag = true;
  387                 ah_info->vlan_tag |=
  388                     (u16)rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
  389         }
  390         if (iwdev->roce_dcqcn_en) {
  391                 ah_info->tc_tos &= ~ECN_CODE_PT_MASK;
  392                 ah_info->tc_tos |= ECN_CODE_PT_VAL;
  393         }
  394 
  395         return 0;
  396 }
  397 
  398 static int
  399 irdma_create_ah_wait(struct irdma_pci_f *rf,
  400                      struct irdma_sc_ah *sc_ah, bool sleep)
  401 {
  402         if (!sleep) {
  403                 int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
  404                 CQP_TIMEOUT_THRESHOLD;
  405 
  406                 do {
  407                         irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
  408                         mdelay(1);
  409                 } while (!sc_ah->ah_info.ah_valid && --cnt);
  410 
  411                 if (!cnt)
  412                         return -ETIMEDOUT;
  413         }
  414         return 0;
  415 }
  416 
  417 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
  418 
  419 /**
  420  * irdma_create_ah - create address handle
  421  * @ib_ah: ptr to AH
  422  * @attr: address handle attributes
  423  * @flags: AH flags to wait
  424  * @udata: user data
  425  *
  426  * returns 0 on success, error otherwise
  427  */
  428 int
  429 irdma_create_ah(struct ib_ah *ib_ah,
  430                 struct ib_ah_attr *attr, u32 flags,
  431                 struct ib_udata *udata)
  432 {
  433         struct irdma_pd *pd = to_iwpd(ib_ah->pd);
  434         struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);
  435         struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device);
  436         union ib_gid sgid;
  437         struct ib_gid_attr sgid_attr;
  438         struct irdma_pci_f *rf = iwdev->rf;
  439         struct irdma_sc_ah *sc_ah;
  440         u32 ah_id = 0;
  441         struct irdma_ah_info *ah_info;
  442         struct irdma_create_ah_resp uresp;
  443         union {
  444                 struct sockaddr saddr;
  445                 struct sockaddr_in saddr_in;
  446                 struct sockaddr_in6 saddr_in6;
  447         } sgid_addr, dgid_addr;
  448         int err;
  449         u8 dmac[ETH_ALEN];
  450         bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0;
  451 
  452         if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
  453                 return -EINVAL;
  454 
  455         err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
  456                                rf->max_ah, &ah_id, &rf->next_ah);
  457 
  458         if (err)
  459                 return err;
  460 
  461         ah->pd = pd;
  462         sc_ah = &ah->sc_ah;
  463         sc_ah->ah_info.ah_idx = ah_id;
  464         sc_ah->ah_info.vsi = &iwdev->vsi;
  465         irdma_sc_init_ah(&rf->sc_dev, sc_ah);
  466         ah->sgid_index = attr->grh.sgid_index;
  467         memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
  468         rcu_read_lock();
  469         err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
  470                                 attr->grh.sgid_index, &sgid, &sgid_attr);
  471         rcu_read_unlock();
  472         if (err) {
  473                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  474                             "GID lookup at idx=%d with port=%d failed\n",
  475                             attr->grh.sgid_index, attr->port_num);
  476                 err = -EINVAL;
  477                 goto err_gid_l2;
  478         }
  479         rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
  480         rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
  481         ah->av.attrs = *attr;
  482         ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
  483                                                         sgid_attr.gid_type,
  484                                                         &sgid);
  485 
  486         if (sgid_attr.ndev)
  487                 dev_put(sgid_attr.ndev);
  488 
  489         ah->av.sgid_addr.saddr = sgid_addr.saddr;
  490         ah->av.dgid_addr.saddr = dgid_addr.saddr;
  491         ah_info = &sc_ah->ah_info;
  492         ah_info->ah_idx = ah_id;
  493         ah_info->pd_idx = pd->sc_pd.pd_id;
  494         ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
  495 
  496         if (attr->ah_flags & IB_AH_GRH) {
  497                 ah_info->flow_label = attr->grh.flow_label;
  498                 ah_info->hop_ttl = attr->grh.hop_limit;
  499                 ah_info->tc_tos = attr->grh.traffic_class;
  500         }
  501 
  502         ether_addr_copy(dmac, attr->dmac);
  503 
  504         irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
  505                            dmac, ah->av.net_type);
  506 
  507         err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
  508         if (err)
  509                 goto err_gid_l2;
  510 
  511         err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
  512                               sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
  513         if (err) {
  514                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  515                             "CQP-OP Create AH fail");
  516                 goto err_gid_l2;
  517         }
  518 
  519         err = irdma_create_ah_wait(rf, sc_ah, sleep);
  520         if (err) {
  521                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  522                             "CQP create AH timed out");
  523                 goto err_gid_l2;
  524         }
  525 
  526         if (udata) {
  527                 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
  528                 err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  529                 if (err) {
  530                         irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
  531                                         IRDMA_OP_AH_DESTROY, false, NULL, ah);
  532                         goto err_gid_l2;
  533                 }
  534         }
  535 
  536         return 0;
  537 err_gid_l2:
  538         irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
  539 
  540         return err;
  541 }
  542 
  543 void
  544 irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
  545 {
  546         ether_addr_copy(dmac, attr->dmac);
  547 }
  548 
  549 int
  550 irdma_create_ah_stub(struct ib_ah *ib_ah,
  551                      struct ib_ah_attr *attr, u32 flags,
  552                      struct ib_udata *udata)
  553 {
  554         return -ENOSYS;
  555 }
  556 
  557 void
  558 irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
  559 {
  560         return;
  561 }
  562 
  563 /**
  564  * irdma_free_qp_rsrc - free up memory resources for qp
  565  * @iwqp: qp ptr (user or kernel)
  566  */
  567 void
  568 irdma_free_qp_rsrc(struct irdma_qp *iwqp)
  569 {
  570         struct irdma_device *iwdev = iwqp->iwdev;
  571         struct irdma_pci_f *rf = iwdev->rf;
  572         u32 qp_num = iwqp->ibqp.qp_num;
  573 
  574         irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
  575         irdma_dealloc_push_page(rf, &iwqp->sc_qp);
  576         if (iwqp->sc_qp.vsi) {
  577                 irdma_qp_rem_qos(&iwqp->sc_qp);
  578                 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
  579                                            iwqp->sc_qp.user_pri);
  580         }
  581 
  582         if (qp_num > 2)
  583                 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
  584         irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
  585         irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
  586         kfree(iwqp->kqp.sig_trk_mem);
  587         iwqp->kqp.sig_trk_mem = NULL;
  588         kfree(iwqp->kqp.sq_wrid_mem);
  589         kfree(iwqp->kqp.rq_wrid_mem);
  590         kfree(iwqp->sg_list);
  591         kfree(iwqp);
  592 }
  593 
  594 /**
  595  * irdma_create_qp - create qp
  596  * @ibpd: ptr of pd
  597  * @init_attr: attributes for qp
  598  * @udata: user data for create qp
  599  */
  600 struct ib_qp *
  601 irdma_create_qp(struct ib_pd *ibpd,
  602                 struct ib_qp_init_attr *init_attr,
  603                 struct ib_udata *udata)
  604 {
  605 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
  606 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
  607         struct irdma_pd *iwpd = to_iwpd(ibpd);
  608         struct irdma_device *iwdev = to_iwdev(ibpd->device);
  609         struct irdma_pci_f *rf = iwdev->rf;
  610         struct irdma_qp *iwqp;
  611         struct irdma_create_qp_resp uresp = {0};
  612         u32 qp_num = 0;
  613         int ret;
  614         int err_code;
  615         struct irdma_sc_qp *qp;
  616         struct irdma_sc_dev *dev = &rf->sc_dev;
  617         struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
  618         struct irdma_qp_init_info init_info = {{0}};
  619         struct irdma_qp_host_ctx_info *ctx_info;
  620 
  621         err_code = irdma_validate_qp_attrs(init_attr, iwdev);
  622         if (err_code)
  623                 return ERR_PTR(err_code);
  624 
  625         if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
  626                       udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
  627                 return ERR_PTR(-EINVAL);
  628 
  629         init_info.vsi = &iwdev->vsi;
  630         init_info.qp_uk_init_info.uk_attrs = uk_attrs;
  631         init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
  632         init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
  633         init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
  634         init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
  635         init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
  636 
  637         iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
  638         if (!iwqp)
  639                 return ERR_PTR(-ENOMEM);
  640 
  641         iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list),
  642                                 GFP_KERNEL);
  643         if (!iwqp->sg_list) {
  644                 kfree(iwqp);
  645                 return ERR_PTR(-ENOMEM);
  646         }
  647 
  648         qp = &iwqp->sc_qp;
  649         qp->qp_uk.back_qp = iwqp;
  650         qp->qp_uk.lock = &iwqp->lock;
  651         qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
  652 
  653         iwqp->iwdev = iwdev;
  654         iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE;
  655         iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem,
  656                                                      iwqp->q2_ctx_mem.size,
  657                                                      256);
  658         if (!iwqp->q2_ctx_mem.va) {
  659                 kfree(iwqp->sg_list);
  660                 kfree(iwqp);
  661                 return ERR_PTR(-ENOMEM);
  662         }
  663 
  664         init_info.q2 = iwqp->q2_ctx_mem.va;
  665         init_info.q2_pa = iwqp->q2_ctx_mem.pa;
  666         init_info.host_ctx = (__le64 *) (init_info.q2 + IRDMA_Q2_BUF_SIZE);
  667         init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
  668 
  669         if (init_attr->qp_type == IB_QPT_GSI)
  670                 qp_num = 1;
  671         else
  672                 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
  673                                             &qp_num, &rf->next_qp);
  674         if (err_code)
  675                 goto error;
  676 
  677         iwqp->iwpd = iwpd;
  678         iwqp->ibqp.qp_num = qp_num;
  679         qp = &iwqp->sc_qp;
  680         iwqp->iwscq = to_iwcq(init_attr->send_cq);
  681         iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
  682         iwqp->host_ctx.va = init_info.host_ctx;
  683         iwqp->host_ctx.pa = init_info.host_ctx_pa;
  684         iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
  685 
  686         init_info.pd = &iwpd->sc_pd;
  687         init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
  688         if (!rdma_protocol_roce(&iwdev->ibdev, 1))
  689                 init_info.qp_uk_init_info.first_sq_wq = 1;
  690         iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
  691         init_waitqueue_head(&iwqp->waitq);
  692         init_waitqueue_head(&iwqp->mod_qp_waitq);
  693 
  694         if (udata) {
  695                 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
  696                 err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
  697         } else {
  698                 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
  699                 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
  700                 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
  701         }
  702 
  703         if (err_code) {
  704                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  705                             "setup qp failed\n");
  706                 goto error;
  707         }
  708 
  709         if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
  710                 if (init_attr->qp_type == IB_QPT_RC) {
  711                         init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
  712                         init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
  713                             IRDMA_WRITE_WITH_IMM |
  714                             IRDMA_ROCE;
  715                 } else {
  716                         init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
  717                         init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
  718                             IRDMA_ROCE;
  719                 }
  720         } else {
  721                 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
  722                 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
  723         }
  724 
  725         ret = irdma_sc_qp_init(qp, &init_info);
  726         if (ret) {
  727                 err_code = -EPROTO;
  728                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  729                             "qp_init fail\n");
  730                 goto error;
  731         }
  732 
  733         ctx_info = &iwqp->ctx_info;
  734         ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
  735         ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
  736 
  737         if (rdma_protocol_roce(&iwdev->ibdev, 1))
  738                 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
  739         else
  740                 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
  741 
  742         err_code = irdma_cqp_create_qp_cmd(iwqp);
  743         if (err_code)
  744                 goto error;
  745 
  746         atomic_set(&iwqp->refcnt, 1);
  747         spin_lock_init(&iwqp->lock);
  748         spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
  749         iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
  750         rf->qp_table[qp_num] = iwqp;
  751 
  752         if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
  753                 if (dev->ws_add(&iwdev->vsi, 0)) {
  754                         irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
  755                         err_code = -EINVAL;
  756                         goto error;
  757                 }
  758 
  759                 irdma_qp_add_qos(&iwqp->sc_qp);
  760         }
  761 
  762         if (udata) {
  763                 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
  764                 if (udata->outlen < sizeof(uresp)) {
  765                         uresp.lsmm = 1;
  766                         uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
  767                 } else {
  768                         if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
  769                                 uresp.lsmm = 1;
  770                 }
  771                 uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
  772                 uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
  773                 uresp.qp_id = qp_num;
  774                 uresp.qp_caps = qp->qp_uk.qp_caps;
  775 
  776                 err_code = ib_copy_to_udata(udata, &uresp,
  777                                             min(sizeof(uresp), udata->outlen));
  778                 if (err_code) {
  779                         irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  780                                     "copy_to_udata failed\n");
  781                         kc_irdma_destroy_qp(&iwqp->ibqp, udata);
  782                         return ERR_PTR(err_code);
  783                 }
  784         }
  785 
  786         init_completion(&iwqp->free_qp);
  787         return &iwqp->ibqp;
  788 
  789 error:
  790         irdma_free_qp_rsrc(iwqp);
  791 
  792         return ERR_PTR(err_code);
  793 }
  794 
  795 /**
  796  * irdma_destroy_qp - destroy qp
  797  * @ibqp: qp's ib pointer also to get to device's qp address
  798  * @udata: user data
  799  */
  800 int
  801 irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
  802 {
  803         struct irdma_qp *iwqp = to_iwqp(ibqp);
  804         struct irdma_device *iwdev = iwqp->iwdev;
  805 
  806         if (iwqp->sc_qp.qp_uk.destroy_pending)
  807                 goto free_rsrc;
  808         iwqp->sc_qp.qp_uk.destroy_pending = true;
  809         if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
  810                 irdma_modify_qp_to_err(&iwqp->sc_qp);
  811 
  812         irdma_qp_rem_ref(&iwqp->ibqp);
  813         wait_for_completion(&iwqp->free_qp);
  814         irdma_free_lsmm_rsrc(iwqp);
  815         if (!iwdev->rf->reset &&
  816             irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
  817                 return -ENOTRECOVERABLE;
  818 free_rsrc:
  819         if (!iwqp->user_mode) {
  820                 if (iwqp->iwscq) {
  821                         irdma_clean_cqes(iwqp, iwqp->iwscq);
  822                         if (iwqp->iwrcq != iwqp->iwscq)
  823                                 irdma_clean_cqes(iwqp, iwqp->iwrcq);
  824                 }
  825         }
  826         irdma_remove_push_mmap_entries(iwqp);
  827         irdma_free_qp_rsrc(iwqp);
  828 
  829         return 0;
  830 }
  831 
  832 /**
  833  * irdma_create_cq - create cq
  834  * @ibcq: CQ allocated
  835  * @attr: attributes for cq
  836  * @udata: user data
  837  */
  838 int
  839 irdma_create_cq(struct ib_cq *ibcq,
  840                 const struct ib_cq_init_attr *attr,
  841                 struct ib_udata *udata)
  842 {
  843 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
  844 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
  845         struct ib_device *ibdev = ibcq->device;
  846         struct irdma_device *iwdev = to_iwdev(ibdev);
  847         struct irdma_pci_f *rf = iwdev->rf;
  848         struct irdma_cq *iwcq = to_iwcq(ibcq);
  849         u32 cq_num = 0;
  850         struct irdma_sc_cq *cq;
  851         struct irdma_sc_dev *dev = &rf->sc_dev;
  852         struct irdma_cq_init_info info = {0};
  853         int status;
  854         struct irdma_cqp_request *cqp_request;
  855         struct cqp_cmds_info *cqp_info;
  856         struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
  857         unsigned long flags;
  858         int err_code;
  859         int entries = attr->cqe;
  860         bool cqe_64byte_ena;
  861 
  862         err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
  863         if (err_code)
  864                 return err_code;
  865 
  866         if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
  867                       udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
  868                 return -EINVAL;
  869         err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
  870                                     &rf->next_cq);
  871         if (err_code)
  872                 return err_code;
  873         cq = &iwcq->sc_cq;
  874         cq->back_cq = iwcq;
  875         atomic_set(&iwcq->refcnt, 1);
  876         spin_lock_init(&iwcq->lock);
  877         INIT_LIST_HEAD(&iwcq->resize_list);
  878         INIT_LIST_HEAD(&iwcq->cmpl_generated);
  879         info.dev = dev;
  880         ukinfo->cq_size = max(entries, 4);
  881         ukinfo->cq_id = cq_num;
  882         cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
  883         ukinfo->avoid_mem_cflct = cqe_64byte_ena;
  884         iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
  885         if (attr->comp_vector < rf->ceqs_count)
  886                 info.ceq_id = attr->comp_vector;
  887         info.ceq_id_valid = true;
  888         info.ceqe_mask = 1;
  889         info.type = IRDMA_CQ_TYPE_IWARP;
  890         info.vsi = &iwdev->vsi;
  891 
  892         if (udata) {
  893                 struct irdma_ucontext *ucontext;
  894                 struct irdma_create_cq_req req = {0};
  895                 struct irdma_cq_mr *cqmr;
  896                 struct irdma_pbl *iwpbl;
  897                 struct irdma_pbl *iwpbl_shadow;
  898                 struct irdma_cq_mr *cqmr_shadow;
  899 
  900                 iwcq->user_mode = true;
  901                 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
  902 
  903                 if (ib_copy_from_udata(&req, udata,
  904                                        min(sizeof(req), udata->inlen))) {
  905                         err_code = -EFAULT;
  906                         goto cq_free_rsrc;
  907                 }
  908 
  909                 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
  910                 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
  911                                       &ucontext->cq_reg_mem_list);
  912                 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
  913                 if (!iwpbl) {
  914                         err_code = -EPROTO;
  915                         goto cq_free_rsrc;
  916                 }
  917                 iwcq->iwpbl = iwpbl;
  918                 iwcq->cq_mem_size = 0;
  919                 cqmr = &iwpbl->cq_mr;
  920 
  921                 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
  922                     IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
  923                         spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
  924                         iwpbl_shadow = irdma_get_pbl((unsigned long)req.user_shadow_area,
  925                                                      &ucontext->cq_reg_mem_list);
  926                         spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
  927 
  928                         if (!iwpbl_shadow) {
  929                                 err_code = -EPROTO;
  930                                 goto cq_free_rsrc;
  931                         }
  932                         iwcq->iwpbl_shadow = iwpbl_shadow;
  933                         cqmr_shadow = &iwpbl_shadow->cq_mr;
  934                         info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
  935                         cqmr->split = true;
  936                 } else {
  937                         info.shadow_area_pa = cqmr->shadow;
  938                 }
  939                 if (iwpbl->pbl_allocated) {
  940                         info.virtual_map = true;
  941                         info.pbl_chunk_size = 1;
  942                         info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
  943                 } else {
  944                         info.cq_base_pa = cqmr->cq_pbl.addr;
  945                 }
  946         } else {
  947                 /* Kmode allocations */
  948                 int rsize;
  949 
  950                 if (entries < 1 || entries > rf->max_cqe) {
  951                         err_code = -EINVAL;
  952                         goto cq_free_rsrc;
  953                 }
  954 
  955                 entries++;
  956                 if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  957                         entries *= 2;
  958                 ukinfo->cq_size = entries;
  959 
  960                 if (cqe_64byte_ena)
  961                         rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
  962                 else
  963                         rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
  964                 iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE);
  965                 iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
  966                                                        iwcq->kmem.size, IRDMA_HW_PAGE_SIZE);
  967                 if (!iwcq->kmem.va) {
  968                         err_code = -ENOMEM;
  969                         goto cq_free_rsrc;
  970                 }
  971 
  972                 iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3;
  973                 iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw,
  974                                                               &iwcq->kmem_shadow,
  975                                                               iwcq->kmem_shadow.size,
  976                                                               64);
  977 
  978                 if (!iwcq->kmem_shadow.va) {
  979                         err_code = -ENOMEM;
  980                         goto cq_free_rsrc;
  981                 }
  982                 info.shadow_area_pa = iwcq->kmem_shadow.pa;
  983                 ukinfo->shadow_area = iwcq->kmem_shadow.va;
  984                 ukinfo->cq_base = iwcq->kmem.va;
  985                 info.cq_base_pa = iwcq->kmem.pa;
  986         }
  987 
  988         if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  989                 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
  990                                                  (u32)IRDMA_MAX_CQ_READ_THRESH);
  991         if (irdma_sc_cq_init(cq, &info)) {
  992                 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
  993                             "init cq fail\n");
  994                 err_code = -EPROTO;
  995                 goto cq_free_rsrc;
  996         }
  997 
  998         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
  999         if (!cqp_request) {
 1000                 err_code = -ENOMEM;
 1001                 goto cq_free_rsrc;
 1002         }
 1003         cqp_info = &cqp_request->info;
 1004         cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
 1005         cqp_info->post_sq = 1;
 1006         cqp_info->in.u.cq_create.cq = cq;
 1007         cqp_info->in.u.cq_create.check_overflow = true;
 1008         cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
 1009         status = irdma_handle_cqp_op(rf, cqp_request);
 1010         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1011         if (status) {
 1012                 err_code = -ENOMEM;
 1013                 goto cq_free_rsrc;
 1014         }
 1015 
 1016         if (udata) {
 1017                 struct irdma_create_cq_resp resp = {0};
 1018 
 1019                 resp.cq_id = info.cq_uk_init_info.cq_id;
 1020                 resp.cq_size = info.cq_uk_init_info.cq_size;
 1021                 if (ib_copy_to_udata(udata, &resp,
 1022                                      min(sizeof(resp), udata->outlen))) {
 1023                         irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
 1024                                     "copy to user data\n");
 1025                         err_code = -EPROTO;
 1026                         goto cq_destroy;
 1027                 }
 1028         }
 1029 
 1030         rf->cq_table[cq_num] = iwcq;
 1031         init_completion(&iwcq->free_cq);
 1032 
 1033         return 0;
 1034 cq_destroy:
 1035         irdma_cq_wq_destroy(rf, cq);
 1036 cq_free_rsrc:
 1037         irdma_cq_free_rsrc(rf, iwcq);
 1038         return err_code;
 1039 }
 1040 
 1041 /**
 1042  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
 1043  * @iwmr: iwmr for IB's user page addresses
 1044  * @pbl: ple pointer to save 1 level or 0 level pble
 1045  * @level: indicated level 0, 1 or 2
 1046  */
 1047 
 1048 void
 1049 irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
 1050                         enum irdma_pble_level level)
 1051 {
 1052         struct ib_umem *region = iwmr->region;
 1053         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 1054         int chunk_pages, entry, i;
 1055         struct scatterlist *sg;
 1056         u64 pg_addr = 0;
 1057         struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
 1058         struct irdma_pble_info *pinfo;
 1059         u32 idx = 0;
 1060         u32 pbl_cnt = 0;
 1061 
 1062         pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
 1063         for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
 1064                 chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size);
 1065                 if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page)
 1066                         iwpbl->qp_mr.sq_page = sg_page(sg);
 1067                 for (i = 0; i < chunk_pages; i++) {
 1068                         pg_addr = sg_dma_address(sg) + (i * iwmr->page_size);
 1069                         if ((entry + i) == 0)
 1070                                 *pbl = pg_addr & iwmr->page_msk;
 1071                         else if (!(pg_addr & ~iwmr->page_msk))
 1072                                 *pbl = pg_addr;
 1073                         else
 1074                                 continue;
 1075                         if (++pbl_cnt == palloc->total_cnt)
 1076                                 break;
 1077                         pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
 1078                 }
 1079         }
 1080 }
 1081 
 1082 /**
 1083  * irdma_destroy_ah - Destroy address handle
 1084  * @ibah: pointer to address handle
 1085  * @ah_flags: destroy flags
 1086  */
 1087 
 1088 void
 1089 irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
 1090 {
 1091         struct irdma_device *iwdev = to_iwdev(ibah->device);
 1092         struct irdma_ah *ah = to_iwah(ibah);
 1093 
 1094         irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
 1095                         false, NULL, ah);
 1096 
 1097         irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
 1098                         ah->sc_ah.ah_info.ah_idx);
 1099 }
 1100 
 1101 int
 1102 irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
 1103 {
 1104         struct irdma_mr *iwmr = to_iwmr(ib_mr);
 1105         struct irdma_device *iwdev = to_iwdev(ib_mr->device);
 1106         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 1107         int ret;
 1108 
 1109         if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
 1110                 if (iwmr->region) {
 1111                         struct irdma_ucontext *ucontext;
 1112 
 1113                         ucontext = rdma_udata_to_drv_context(udata,
 1114                                                              struct irdma_ucontext,
 1115                                                              ibucontext);
 1116                         irdma_del_memlist(iwmr, ucontext);
 1117                 }
 1118                 goto done;
 1119         }
 1120 
 1121         ret = irdma_hwdereg_mr(ib_mr);
 1122         if (ret)
 1123                 return ret;
 1124 
 1125         irdma_free_stag(iwdev, iwmr->stag);
 1126 done:
 1127         if (iwpbl->pbl_allocated)
 1128                 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
 1129 
 1130         if (iwmr->region)
 1131                 ib_umem_release(iwmr->region);
 1132 
 1133         kfree(iwmr);
 1134 
 1135         return 0;
 1136 }
 1137 
 1138 /*
 1139  * irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to
 1140  * indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address
 1141  * @new access flags: bit mask of access flags @new_pd: ptr of pd @udata: user data
 1142  */
 1143 int
 1144 irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
 1145                     u64 virt, int new_access, struct ib_pd *new_pd,
 1146                     struct ib_udata *udata)
 1147 {
 1148         struct irdma_device *iwdev = to_iwdev(ib_mr->device);
 1149         struct irdma_mr *iwmr = to_iwmr(ib_mr);
 1150         struct irdma_pbl *iwpbl = &iwmr->iwpbl;
 1151         int ret;
 1152 
 1153         if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
 1154                 return -EINVAL;
 1155 
 1156         if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
 1157                 return -EOPNOTSUPP;
 1158 
 1159         ret = irdma_hwdereg_mr(ib_mr);
 1160         if (ret)
 1161                 return ret;
 1162 
 1163         if (flags & IB_MR_REREG_ACCESS)
 1164                 iwmr->access = new_access;
 1165 
 1166         if (flags & IB_MR_REREG_PD) {
 1167                 iwmr->ibmr.pd = new_pd;
 1168                 iwmr->ibmr.device = new_pd->device;
 1169         }
 1170 
 1171         if (flags & IB_MR_REREG_TRANS) {
 1172                 if (iwpbl->pbl_allocated) {
 1173                         irdma_free_pble(iwdev->rf->pble_rsrc,
 1174                                         &iwpbl->pble_alloc);
 1175                         iwpbl->pbl_allocated = false;
 1176                 }
 1177                 if (iwmr->region) {
 1178                         ib_umem_release(iwmr->region);
 1179                         iwmr->region = NULL;
 1180                 }
 1181 
 1182                 ib_mr = irdma_rereg_mr_trans(iwmr, start, len, virt, udata);
 1183                 if (IS_ERR(ib_mr))
 1184                         return PTR_ERR(ib_mr);
 1185 
 1186         } else {
 1187                 ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
 1188                 if (ret)
 1189                         return ret;
 1190         }
 1191 
 1192         return 0;
 1193 }
 1194 
 1195 int
 1196 kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
 1197                           u16 *vlan_id)
 1198 {
 1199         int ret;
 1200         union ib_gid sgid;
 1201         struct ib_gid_attr sgid_attr;
 1202         struct irdma_av *av = &iwqp->roce_ah.av;
 1203 
 1204         ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num,
 1205                                 attr->ah_attr.grh.sgid_index, &sgid,
 1206                                 &sgid_attr);
 1207         if (ret)
 1208                 return ret;
 1209 
 1210         if (sgid_attr.ndev) {
 1211                 *vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
 1212                 ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, IF_LLADDR(sgid_attr.ndev));
 1213         }
 1214 
 1215         rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
 1216         dev_put(sgid_attr.ndev);
 1217         iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
 1218 
 1219         return 0;
 1220 }
 1221 
 1222 /**
 1223  * irdma_destroy_cq - destroy cq
 1224  * @ib_cq: cq pointer
 1225  * @udata: user data
 1226  */
 1227 void
 1228 irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 1229 {
 1230         struct irdma_device *iwdev = to_iwdev(ib_cq->device);
 1231         struct irdma_cq *iwcq = to_iwcq(ib_cq);
 1232         struct irdma_sc_cq *cq = &iwcq->sc_cq;
 1233         struct irdma_sc_dev *dev = cq->dev;
 1234         struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
 1235         struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
 1236         unsigned long flags;
 1237 
 1238         spin_lock_irqsave(&iwcq->lock, flags);
 1239         if (!list_empty(&iwcq->cmpl_generated))
 1240                 irdma_remove_cmpls_list(iwcq);
 1241         if (!list_empty(&iwcq->resize_list))
 1242                 irdma_process_resize_list(iwcq, iwdev, NULL);
 1243         spin_unlock_irqrestore(&iwcq->lock, flags);
 1244 
 1245         irdma_cq_rem_ref(ib_cq);
 1246         wait_for_completion(&iwcq->free_cq);
 1247 
 1248         irdma_cq_wq_destroy(iwdev->rf, cq);
 1249 
 1250         spin_lock_irqsave(&iwceq->ce_lock, flags);
 1251         irdma_sc_cleanup_ceqes(cq, ceq);
 1252         spin_unlock_irqrestore(&iwceq->ce_lock, flags);
 1253         irdma_cq_free_rsrc(iwdev->rf, iwcq);
 1254 }
 1255 
 1256 /**
 1257  * irdma_alloc_mw - Allocate memory window
 1258  * @pd: Protection domain
 1259  * @type: Window type
 1260  * @udata: user data pointer
 1261  */
 1262 struct ib_mw *
 1263 irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 1264                struct ib_udata *udata)
 1265 {
 1266         struct irdma_device *iwdev = to_iwdev(pd->device);
 1267         struct irdma_mr *iwmr;
 1268         int err_code;
 1269         u32 stag;
 1270 
 1271         iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
 1272         if (!iwmr)
 1273                 return ERR_PTR(-ENOMEM);
 1274 
 1275         stag = irdma_create_stag(iwdev);
 1276         if (!stag) {
 1277                 kfree(iwmr);
 1278                 return ERR_PTR(-ENOMEM);
 1279         }
 1280 
 1281         iwmr->stag = stag;
 1282         iwmr->ibmw.rkey = stag;
 1283         iwmr->ibmw.pd = pd;
 1284         iwmr->ibmw.type = type;
 1285         iwmr->ibmw.device = pd->device;
 1286 
 1287         err_code = irdma_hw_alloc_mw(iwdev, iwmr);
 1288         if (err_code) {
 1289                 irdma_free_stag(iwdev, stag);
 1290                 kfree(iwmr);
 1291                 return ERR_PTR(err_code);
 1292         }
 1293 
 1294         return &iwmr->ibmw;
 1295 }
 1296 
 1297 /**
 1298  * kc_set_loc_seq_num_mss - Set local seq number and mss
 1299  * @cm_node: cm node info
 1300  */
 1301 void
 1302 kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node)
 1303 {
 1304         struct timespec ts;
 1305 
 1306         getnanotime(&ts);
 1307         cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
 1308         if (cm_node->iwdev->vsi.mtu > 1500 &&
 1309             2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd)
 1310                 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
 1311                     (1500 - IRDMA_MTU_TO_MSS_IPV4) :
 1312                     (1500 - IRDMA_MTU_TO_MSS_IPV6);
 1313         else
 1314                 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
 1315                     (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) :
 1316                     (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
 1317 }
 1318 
 1319 /**
 1320  * irdma_disassociate_ucontext - Disassociate user context
 1321  * @context: ib user context
 1322  */
 1323 void
 1324 irdma_disassociate_ucontext(struct ib_ucontext *context)
 1325 {
 1326 }
 1327 
 1328 struct ib_device *
 1329 ib_device_get_by_netdev(struct ifnet *netdev, int driver_id)
 1330 {
 1331         struct irdma_device *iwdev;
 1332         struct irdma_handler *hdl;
 1333         unsigned long flags;
 1334 
 1335         spin_lock_irqsave(&irdma_handler_lock, flags);
 1336         list_for_each_entry(hdl, &irdma_handlers, list) {
 1337                 iwdev = hdl->iwdev;
 1338                 if (netdev == iwdev->netdev) {
 1339                         spin_unlock_irqrestore(&irdma_handler_lock,
 1340                                                flags);
 1341                         return &iwdev->ibdev;
 1342                 }
 1343         }
 1344         spin_unlock_irqrestore(&irdma_handler_lock, flags);
 1345 
 1346         return NULL;
 1347 }
 1348 
 1349 void
 1350 ib_unregister_device_put(struct ib_device *device)
 1351 {
 1352         ib_unregister_device(device);
 1353 }
 1354 
 1355 /**
 1356  * irdma_query_gid_roce - Query port GID for Roce
 1357  * @ibdev: device pointer from stack
 1358  * @port: port number
 1359  * @index: Entry index
 1360  * @gid: Global ID
 1361  */
 1362 int
 1363 irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
 1364                      union ib_gid *gid)
 1365 {
 1366         int ret;
 1367 
 1368         ret = rdma_query_gid(ibdev, port, index, gid);
 1369         if (ret == -EAGAIN) {
 1370                 memcpy(gid, &zgid, sizeof(*gid));
 1371                 return 0;
 1372         }
 1373 
 1374         return ret;
 1375 }
 1376 
 1377 /**
 1378  * irdma_modify_port - modify port attributes
 1379  * @ibdev: device pointer from stack
 1380  * @port: port number for query
 1381  * @mask: Property mask
 1382  * @props: returning device attributes
 1383  */
 1384 int
 1385 irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
 1386                   struct ib_port_modify *props)
 1387 {
 1388         if (port > 1)
 1389                 return -EINVAL;
 1390 
 1391         return 0;
 1392 }
 1393 
 1394 /**
 1395  * irdma_query_pkey - Query partition key
 1396  * @ibdev: device pointer from stack
 1397  * @port: port number
 1398  * @index: index of pkey
 1399  * @pkey: pointer to store the pkey
 1400  */
 1401 int
 1402 irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
 1403                  u16 *pkey)
 1404 {
 1405         if (index >= IRDMA_PKEY_TBL_SZ)
 1406                 return -EINVAL;
 1407 
 1408         *pkey = IRDMA_DEFAULT_PKEY;
 1409         return 0;
 1410 }
 1411 
 1412 int
 1413 irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
 1414                           struct ib_port_immutable *immutable)
 1415 {
 1416         struct ib_port_attr attr;
 1417         int err;
 1418 
 1419         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
 1420         err = ib_query_port(ibdev, port_num, &attr);
 1421         if (err)
 1422                 return err;
 1423 
 1424         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
 1425         immutable->pkey_tbl_len = attr.pkey_tbl_len;
 1426         immutable->gid_tbl_len = attr.gid_tbl_len;
 1427 
 1428         return 0;
 1429 }
 1430 
 1431 int
 1432 irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
 1433                         struct ib_port_immutable *immutable)
 1434 {
 1435         struct ib_port_attr attr;
 1436         int err;
 1437 
 1438         immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
 1439         err = ib_query_port(ibdev, port_num, &attr);
 1440         if (err)
 1441                 return err;
 1442         immutable->gid_tbl_len = 1;
 1443 
 1444         return 0;
 1445 }
 1446 
 1447 /**
 1448  * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
 1449  * @link_speed: netdev phy link speed
 1450  * @active_speed: IB port speed
 1451  * @active_width: IB port width
 1452  */
 1453 void
 1454 irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
 1455                               u8 *active_width)
 1456 {
 1457         if (link_speed <= SPEED_1000) {
 1458                 *active_width = IB_WIDTH_1X;
 1459                 *active_speed = IB_SPEED_SDR;
 1460         } else if (link_speed <= SPEED_10000) {
 1461                 *active_width = IB_WIDTH_1X;
 1462                 *active_speed = IB_SPEED_FDR10;
 1463         } else if (link_speed <= SPEED_20000) {
 1464                 *active_width = IB_WIDTH_4X;
 1465                 *active_speed = IB_SPEED_DDR;
 1466         } else if (link_speed <= SPEED_25000) {
 1467                 *active_width = IB_WIDTH_1X;
 1468                 *active_speed = IB_SPEED_EDR;
 1469         } else if (link_speed <= SPEED_40000) {
 1470                 *active_width = IB_WIDTH_4X;
 1471                 *active_speed = IB_SPEED_FDR10;
 1472         } else {
 1473                 *active_width = IB_WIDTH_4X;
 1474                 *active_speed = IB_SPEED_EDR;
 1475         }
 1476 }
 1477 
 1478 /**
 1479  * irdma_query_port - get port attributes
 1480  * @ibdev: device pointer from stack
 1481  * @port: port number for query
 1482  * @props: returning device attributes
 1483  */
 1484 int
 1485 irdma_query_port(struct ib_device *ibdev, u8 port,
 1486                  struct ib_port_attr *props)
 1487 {
 1488         struct irdma_device *iwdev = to_iwdev(ibdev);
 1489         struct ifnet *netdev = iwdev->netdev;
 1490 
 1491         /* no need to zero out pros here. done by caller */
 1492 
 1493         props->max_mtu = IB_MTU_4096;
 1494         props->active_mtu = ib_mtu_int_to_enum(netdev->if_mtu);
 1495         props->lid = 1;
 1496         props->lmc = 0;
 1497         props->sm_lid = 0;
 1498         props->sm_sl = 0;
 1499         if ((netdev->if_link_state == LINK_STATE_UP) && (netdev->if_drv_flags & IFF_DRV_RUNNING)) {
 1500                 props->state = IB_PORT_ACTIVE;
 1501                 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
 1502         } else {
 1503                 props->state = IB_PORT_DOWN;
 1504                 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
 1505         }
 1506         irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
 1507                                       &props->active_width);
 1508 
 1509         if (rdma_protocol_roce(ibdev, 1)) {
 1510                 props->gid_tbl_len = 32;
 1511                 kc_set_props_ip_gid_caps(props);
 1512                 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
 1513         } else {
 1514                 props->gid_tbl_len = 1;
 1515         }
 1516         props->qkey_viol_cntr = 0;
 1517         props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
 1518         props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
 1519 
 1520         return 0;
 1521 }
 1522 
 1523 static const char *const irdma_hw_stat_names[] = {
 1524         /* gen1 - 32-bit */
 1525         [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
 1526         [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
 1527         [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
 1528         [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
 1529         [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
 1530         [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
 1531         [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
 1532         /* gen1 - 64-bit */
 1533         [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
 1534         [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
 1535         [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
 1536         [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
 1537         [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
 1538         [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
 1539         [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
 1540         [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
 1541         [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
 1542         [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
 1543         [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
 1544         [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
 1545         [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
 1546         [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
 1547         [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
 1548         [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
 1549         [IRDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads",
 1550         [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends",
 1551         [IRDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites",
 1552         [IRDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads",
 1553         [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends",
 1554         [IRDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites",
 1555         [IRDMA_HW_STAT_INDEX_RDMAVBND] = "RdmaBnd",
 1556         [IRDMA_HW_STAT_INDEX_RDMAVINV] = "RdmaInv",
 1557 
 1558         /* gen2 - 32-bit */
 1559         [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
 1560         [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
 1561         [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
 1562         /* gen2 - 64-bit */
 1563         [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
 1564         [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
 1565         [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
 1566         [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
 1567         [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
 1568         [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
 1569         [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
 1570         [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "RetransSegs",
 1571         [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "InOptErrors",
 1572         [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "InProtoErrors",
 1573         [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "InSegs",
 1574         [IRDMA_HW_STAT_INDEX_TCPTXSEG] = "OutSegs",
 1575 };
 1576 
 1577 /**
 1578  * irdma_alloc_hw_stats - Allocate a hw stats structure
 1579  * @ibdev: device pointer from stack
 1580  * @port_num: port number
 1581  */
 1582 struct rdma_hw_stats *
 1583 irdma_alloc_hw_stats(struct ib_device *ibdev,
 1584                      u8 port_num)
 1585 {
 1586         struct irdma_device *iwdev = to_iwdev(ibdev);
 1587         struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
 1588 
 1589         int num_counters = dev->hw_attrs.max_stat_idx;
 1590         unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
 1591 
 1592         return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
 1593                                           lifespan);
 1594 }
 1595 
 1596 /**
 1597  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
 1598  * @ibdev: device pointer from stack
 1599  * @stats: stats pointer from stack
 1600  * @port_num: port number
 1601  * @index: which hw counter the stack is requesting we update
 1602  */
 1603 int
 1604 irdma_get_hw_stats(struct ib_device *ibdev,
 1605                    struct rdma_hw_stats *stats, u8 port_num,
 1606                    int index)
 1607 {
 1608         struct irdma_device *iwdev = to_iwdev(ibdev);
 1609         struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
 1610 
 1611         if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
 1612                 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
 1613 
 1614         memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters);
 1615 
 1616         return stats->num_counters;
 1617 }
 1618 
 1619 /**
 1620  * irdma_query_gid - Query port GID
 1621  * @ibdev: device pointer from stack
 1622  * @port: port number
 1623  * @index: Entry index
 1624  * @gid: Global ID
 1625  */
 1626 int
 1627 irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
 1628                 union ib_gid *gid)
 1629 {
 1630         struct irdma_device *iwdev = to_iwdev(ibdev);
 1631 
 1632         memset(gid->raw, 0, sizeof(gid->raw));
 1633         ether_addr_copy(gid->raw, IF_LLADDR(iwdev->netdev));
 1634 
 1635         return 0;
 1636 }
 1637 
 1638 enum rdma_link_layer
 1639 irdma_get_link_layer(struct ib_device *ibdev,
 1640                      u8 port_num)
 1641 {
 1642         return IB_LINK_LAYER_ETHERNET;
 1643 }
 1644 
 1645 inline enum ib_mtu
 1646 ib_mtu_int_to_enum(int mtu)
 1647 {
 1648         if (mtu >= 4096)
 1649                 return IB_MTU_4096;
 1650         else if (mtu >= 2048)
 1651                 return IB_MTU_2048;
 1652         else if (mtu >= 1024)
 1653                 return IB_MTU_1024;
 1654         else if (mtu >= 512)
 1655                 return IB_MTU_512;
 1656         else
 1657                 return IB_MTU_256;
 1658 }
 1659 
 1660 inline void
 1661 kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev)
 1662 {
 1663         iwdev->ibdev.uverbs_cmd_mask |=
 1664             BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
 1665             BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
 1666             BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
 1667             BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST);
 1668 }
 1669 
 1670 inline void
 1671 kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
 1672 {
 1673         iwdev->ibdev.uverbs_cmd_mask =
 1674             BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
 1675             BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
 1676             BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
 1677             BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
 1678             BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
 1679             BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
 1680             BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
 1681             BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
 1682             BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
 1683             BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
 1684             BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) |
 1685             BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
 1686             BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
 1687             BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
 1688             BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
 1689             BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
 1690             BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
 1691             BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
 1692             BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
 1693             BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
 1694             BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
 1695             BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
 1696             BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
 1697         iwdev->ibdev.uverbs_ex_cmd_mask =
 1698             BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) |
 1699             BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 1700 
 1701         if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
 1702                 iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);
 1703 }

Cache object: 93878da8d46fb8c5a64b6f11bfce4bb3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.