The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/irdma/irdma_utils.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
    3  *
    4  * Copyright (c) 2015 - 2022 Intel Corporation
    5  *
    6  * This software is available to you under a choice of one of two
    7  * licenses.  You may choose to be licensed under the terms of the GNU
    8  * General Public License (GPL) Version 2, available from the file
    9  * COPYING in the main directory of this source tree, or the
   10  * OpenFabrics.org BSD license below:
   11  *
   12  *   Redistribution and use in source and binary forms, with or
   13  *   without modification, are permitted provided that the following
   14  *   conditions are met:
   15  *
   16  *    - Redistributions of source code must retain the above
   17  *      copyright notice, this list of conditions and the following
   18  *      disclaimer.
   19  *
   20  *    - Redistributions in binary form must reproduce the above
   21  *      copyright notice, this list of conditions and the following
   22  *      disclaimer in the documentation and/or other materials
   23  *      provided with the distribution.
   24  *
   25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   32  * SOFTWARE.
   33  */
   34 /*$FreeBSD$*/
   35 
   36 #include "irdma_main.h"
   37 
   38 LIST_HEAD(irdma_handlers);
   39 DEFINE_SPINLOCK(irdma_handler_lock);
   40 
   41 /**
   42  * irdma_arp_table -manage arp table
   43  * @rf: RDMA PCI function
   44  * @ip_addr: ip address for device
   45  * @mac_addr: mac address ptr
   46  * @action: modify, delete or add
   47  */
   48 int
   49 irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
   50                 u32 action)
   51 {
   52         unsigned long flags;
   53         int arp_index;
   54         u32 ip[4] = {};
   55 
   56         memcpy(ip, ip_addr, sizeof(ip));
   57 
   58         spin_lock_irqsave(&rf->arp_lock, flags);
   59         for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
   60                 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
   61                         break;
   62         }
   63 
   64         switch (action) {
   65         case IRDMA_ARP_ADD:
   66                 if (arp_index != rf->arp_table_size) {
   67                         arp_index = -1;
   68                         break;
   69                 }
   70 
   71                 arp_index = 0;
   72                 if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
   73                                      (u32 *)&arp_index, &rf->next_arp_index)) {
   74                         arp_index = -1;
   75                         break;
   76                 }
   77 
   78                 memcpy(rf->arp_table[arp_index].ip_addr, ip,
   79                        sizeof(rf->arp_table[arp_index].ip_addr));
   80                 ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
   81                 break;
   82         case IRDMA_ARP_RESOLVE:
   83                 if (arp_index == rf->arp_table_size)
   84                         arp_index = -1;
   85                 break;
   86         case IRDMA_ARP_DELETE:
   87                 if (arp_index == rf->arp_table_size) {
   88                         arp_index = -1;
   89                         break;
   90                 }
   91 
   92                 memset(rf->arp_table[arp_index].ip_addr, 0,
   93                        sizeof(rf->arp_table[arp_index].ip_addr));
   94                 eth_zero_addr(rf->arp_table[arp_index].mac_addr);
   95                 irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
   96                 break;
   97         default:
   98                 arp_index = -1;
   99                 break;
  100         }
  101 
  102         spin_unlock_irqrestore(&rf->arp_lock, flags);
  103         return arp_index;
  104 }
  105 
  106 /**
  107  * irdma_add_arp - add a new arp entry if needed
  108  * @rf: RDMA function
  109  * @ip: IP address
  110  * @mac: MAC address
  111  */
  112 int
  113 irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac)
  114 {
  115         int arpidx;
  116 
  117         arpidx = irdma_arp_table(rf, &ip[0], NULL, IRDMA_ARP_RESOLVE);
  118         if (arpidx >= 0) {
  119                 if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
  120                         return arpidx;
  121 
  122                 irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
  123                                        IRDMA_ARP_DELETE);
  124         }
  125 
  126         irdma_manage_arp_cache(rf, mac, ip, IRDMA_ARP_ADD);
  127 
  128         return irdma_arp_table(rf, ip, NULL, IRDMA_ARP_RESOLVE);
  129 }
  130 
  131 /**
  132  * irdma_netdevice_event - system notifier for netdev events
  133  * @notifier: not used
  134  * @event: event for notifier
  135  * @ptr: netdev
  136  */
  137 int
  138 irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
  139                       void *ptr)
  140 {
  141         struct irdma_device *iwdev;
  142         struct ifnet *netdev = netdev_notifier_info_to_ifp(ptr);
  143 
  144         iwdev = container_of(notifier, struct irdma_device, nb_netdevice_event);
  145         if (iwdev->netdev != netdev)
  146                 return NOTIFY_DONE;
  147 
  148         iwdev->iw_status = 1;
  149         switch (event) {
  150         case NETDEV_DOWN:
  151                 iwdev->iw_status = 0;
  152                 /* fallthrough */
  153         case NETDEV_UP:
  154                 irdma_port_ibevent(iwdev);
  155                 break;
  156         default:
  157                 break;
  158         }
  159 
  160         return NOTIFY_DONE;
  161 }
  162 
  163 void
  164 irdma_unregister_notifiers(struct irdma_device *iwdev)
  165 {
  166         unregister_netdevice_notifier(&iwdev->nb_netdevice_event);
  167 }
  168 
  169 int
  170 irdma_register_notifiers(struct irdma_device *iwdev)
  171 {
  172         int ret;
  173 
  174         iwdev->nb_netdevice_event.notifier_call = irdma_netdevice_event;
  175         ret = register_netdevice_notifier(&iwdev->nb_netdevice_event);
  176         if (ret) {
  177                 ibdev_err(&iwdev->ibdev, "register_netdevice_notifier failed\n");
  178                 return ret;
  179         }
  180         return ret;
  181 }
  182 /**
  183  * irdma_alloc_and_get_cqp_request - get cqp struct
  184  * @cqp: device cqp ptr
  185  * @wait: cqp to be used in wait mode
  186  */
  187 struct irdma_cqp_request *
  188 irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
  189                                 bool wait)
  190 {
  191         struct irdma_cqp_request *cqp_request = NULL;
  192         unsigned long flags;
  193 
  194         spin_lock_irqsave(&cqp->req_lock, flags);
  195         if (!list_empty(&cqp->cqp_avail_reqs)) {
  196                 cqp_request = list_entry(cqp->cqp_avail_reqs.next,
  197                                          struct irdma_cqp_request, list);
  198                 list_del_init(&cqp_request->list);
  199         }
  200         spin_unlock_irqrestore(&cqp->req_lock, flags);
  201         if (!cqp_request) {
  202                 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
  203                 if (cqp_request) {
  204                         cqp_request->dynamic = true;
  205                         if (wait)
  206                                 init_waitqueue_head(&cqp_request->waitq);
  207                 }
  208         }
  209         if (!cqp_request) {
  210                 irdma_debug(cqp->sc_cqp.dev, IRDMA_DEBUG_ERR,
  211                             "CQP Request Fail: No Memory");
  212                 return NULL;
  213         }
  214 
  215         cqp_request->waiting = wait;
  216         atomic_set(&cqp_request->refcnt, 1);
  217         memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
  218 
  219         return cqp_request;
  220 }
  221 
  222 /**
  223  * irdma_get_cqp_request - increase refcount for cqp_request
  224  * @cqp_request: pointer to cqp_request instance
  225  */
  226 static inline void
  227 irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
  228 {
  229         atomic_inc(&cqp_request->refcnt);
  230 }
  231 
  232 /**
  233  * irdma_free_cqp_request - free cqp request
  234  * @cqp: cqp ptr
  235  * @cqp_request: to be put back in cqp list
  236  */
  237 void
  238 irdma_free_cqp_request(struct irdma_cqp *cqp,
  239                        struct irdma_cqp_request *cqp_request)
  240 {
  241         unsigned long flags;
  242 
  243         if (cqp_request->dynamic) {
  244                 kfree(cqp_request);
  245         } else {
  246                 cqp_request->request_done = false;
  247                 cqp_request->callback_fcn = NULL;
  248                 cqp_request->waiting = false;
  249 
  250                 spin_lock_irqsave(&cqp->req_lock, flags);
  251                 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
  252                 spin_unlock_irqrestore(&cqp->req_lock, flags);
  253         }
  254         wake_up(&cqp->remove_wq);
  255 }
  256 
  257 /**
  258  * irdma_put_cqp_request - dec ref count and free if 0
  259  * @cqp: cqp ptr
  260  * @cqp_request: to be put back in cqp list
  261  */
  262 void
  263 irdma_put_cqp_request(struct irdma_cqp *cqp,
  264                       struct irdma_cqp_request *cqp_request)
  265 {
  266         if (atomic_dec_and_test(&cqp_request->refcnt))
  267                 irdma_free_cqp_request(cqp, cqp_request);
  268 }
  269 
  270 /**
  271  * irdma_free_pending_cqp_request -free pending cqp request objs
  272  * @cqp: cqp ptr
  273  * @cqp_request: to be put back in cqp list
  274  */
  275 static void
  276 irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
  277                                struct irdma_cqp_request *cqp_request)
  278 {
  279         if (cqp_request->waiting) {
  280                 cqp_request->compl_info.error = true;
  281                 cqp_request->request_done = true;
  282                 wake_up(&cqp_request->waitq);
  283         }
  284         wait_event_timeout(cqp->remove_wq,
  285                            atomic_read(&cqp_request->refcnt) == 1, 1000);
  286         irdma_put_cqp_request(cqp, cqp_request);
  287 }
  288 
  289 /**
  290  * irdma_cleanup_pending_cqp_op - clean-up cqp with no
  291  * completions
  292  * @rf: RDMA PCI function
  293  */
  294 void
  295 irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
  296 {
  297         struct irdma_sc_dev *dev = &rf->sc_dev;
  298         struct irdma_cqp *cqp = &rf->cqp;
  299         struct irdma_cqp_request *cqp_request = NULL;
  300         struct cqp_cmds_info *pcmdinfo = NULL;
  301         u32 i, pending_work, wqe_idx;
  302 
  303         pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
  304         wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
  305         for (i = 0; i < pending_work; i++) {
  306                 cqp_request = (struct irdma_cqp_request *)(uintptr_t)
  307                     cqp->scratch_array[wqe_idx];
  308                 if (cqp_request)
  309                         irdma_free_pending_cqp_request(cqp, cqp_request);
  310                 wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
  311         }
  312 
  313         while (!list_empty(&dev->cqp_cmd_head)) {
  314                 pcmdinfo = irdma_remove_cqp_head(dev);
  315                 cqp_request =
  316                     container_of(pcmdinfo, struct irdma_cqp_request, info);
  317                 if (cqp_request)
  318                         irdma_free_pending_cqp_request(cqp, cqp_request);
  319         }
  320 }
  321 
  322 /**
  323  * irdma_wait_event - wait for completion
  324  * @rf: RDMA PCI function
  325  * @cqp_request: cqp request to wait
  326  */
  327 static int
  328 irdma_wait_event(struct irdma_pci_f *rf,
  329                  struct irdma_cqp_request *cqp_request)
  330 {
  331         struct irdma_cqp_timeout cqp_timeout = {0};
  332         int timeout_threshold = CQP_TIMEOUT_THRESHOLD;
  333         bool cqp_error = false;
  334         int err_code = 0;
  335 
  336         cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
  337         do {
  338                 int wait_time_ms = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms;
  339 
  340                 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
  341                 if (wait_event_timeout(cqp_request->waitq,
  342                                        cqp_request->request_done,
  343                                        msecs_to_jiffies(wait_time_ms)))
  344                         break;
  345 
  346                 irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
  347 
  348                 if (cqp_timeout.count < timeout_threshold)
  349                         continue;
  350 
  351                 if (!rf->reset) {
  352                         rf->reset = true;
  353                         rf->gen_ops.request_reset(rf);
  354                 }
  355                 return -ETIMEDOUT;
  356         } while (1);
  357 
  358         cqp_error = cqp_request->compl_info.error;
  359         if (cqp_error) {
  360                 err_code = -EIO;
  361                 if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
  362                         if (cqp_request->compl_info.min_err_code == 0x8002) {
  363                                 err_code = -EBUSY;
  364                         } else if (cqp_request->compl_info.min_err_code == 0x8029) {
  365                                 if (!rf->reset) {
  366                                         rf->reset = true;
  367                                         rf->gen_ops.request_reset(rf);
  368                                 }
  369                         }
  370                 }
  371         }
  372 
  373         return err_code;
  374 }
  375 
  376 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
  377         [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
  378         [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
  379         [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
  380         [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
  381         [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
  382         [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
  383         [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
  384         [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
  385         [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
  386         [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
  387         [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
  388         [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
  389         [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
  390         [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
  391         [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
  392         [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
  393         [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
  394         [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
  395         [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
  396         [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
  397         [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
  398         [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
  399         [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
  400         [IRDMA_OP_RESUME] = "Resume QP Cmd",
  401         [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
  402         [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
  403         [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
  404         [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
  405         [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
  406         [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
  407         [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
  408         [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
  409         [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
  410         [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
  411         [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
  412         [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
  413         [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
  414         [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
  415         [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
  416         [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
  417         [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
  418         [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
  419         [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
  420         [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
  421         [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
  422         [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
  423 };
  424 
  425 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
  426         {0xffff, 0x8002, "Invalid State"},
  427         {0xffff, 0x8006, "Flush No Wqe Pending"},
  428         {0xffff, 0x8007, "Modify QP Bad Close"},
  429         {0xffff, 0x8009, "LLP Closed"},
  430         {0xffff, 0x800a, "Reset Not Sent"},
  431         {0xffff, 0x200, "Failover Pending"}
  432 };
  433 
  434 /**
  435  * irdma_cqp_crit_err - check if CQP error is critical
  436  * @dev: pointer to dev structure
  437  * @cqp_cmd: code for last CQP operation
  438  * @maj_err_code: major error code
  439  * @min_err_code: minot error code
  440  */
  441 bool
  442 irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
  443                    u16 maj_err_code, u16 min_err_code)
  444 {
  445         int i;
  446 
  447         for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
  448                 if (maj_err_code == irdma_noncrit_err_list[i].maj &&
  449                     min_err_code == irdma_noncrit_err_list[i].min) {
  450                         irdma_debug(dev, IRDMA_DEBUG_CQP,
  451                                     "[%s Error][%s] maj=0x%x min=0x%x\n",
  452                                     irdma_noncrit_err_list[i].desc,
  453                                     irdma_cqp_cmd_names[cqp_cmd],
  454                                     maj_err_code,
  455                                     min_err_code);
  456                         return false;
  457                 }
  458         }
  459         return true;
  460 }
  461 
  462 /**
  463  * irdma_handle_cqp_op - process cqp command
  464  * @rf: RDMA PCI function
  465  * @cqp_request: cqp request to process
  466  */
  467 int
  468 irdma_handle_cqp_op(struct irdma_pci_f *rf,
  469                     struct irdma_cqp_request *cqp_request)
  470 {
  471         struct irdma_sc_dev *dev = &rf->sc_dev;
  472         struct cqp_cmds_info *info = &cqp_request->info;
  473         int status;
  474         bool put_cqp_request = true;
  475 
  476         if (rf->reset)
  477                 return -EBUSY;
  478 
  479         irdma_get_cqp_request(cqp_request);
  480         status = irdma_process_cqp_cmd(dev, info);
  481         if (status)
  482                 goto err;
  483 
  484         if (cqp_request->waiting) {
  485                 put_cqp_request = false;
  486                 status = irdma_wait_event(rf, cqp_request);
  487                 if (status)
  488                         goto err;
  489         }
  490 
  491         return 0;
  492 
  493 err:
  494         if (irdma_cqp_crit_err(dev, info->cqp_cmd,
  495                                cqp_request->compl_info.maj_err_code,
  496                                cqp_request->compl_info.min_err_code))
  497                 irdma_dev_err(dev,
  498                               "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
  499                               irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
  500                               cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
  501                               cqp_request->compl_info.min_err_code);
  502 
  503         if (put_cqp_request)
  504                 irdma_put_cqp_request(&rf->cqp, cqp_request);
  505 
  506         return status;
  507 }
  508 
  509 void
  510 irdma_qp_add_ref(struct ib_qp *ibqp)
  511 {
  512         struct irdma_qp *iwqp = to_iwqp(ibqp);
  513 
  514         atomic_inc(&iwqp->refcnt);
  515 }
  516 
  517 void
  518 irdma_qp_rem_ref(struct ib_qp *ibqp)
  519 {
  520         struct irdma_qp *iwqp = to_iwqp(ibqp);
  521         struct irdma_device *iwdev = iwqp->iwdev;
  522         unsigned long flags;
  523 
  524         spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
  525         if (!atomic_dec_and_test(&iwqp->refcnt)) {
  526                 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
  527                 return;
  528         }
  529 
  530         iwdev->rf->qp_table[iwqp->ibqp.qp_num] = NULL;
  531         spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
  532         complete(&iwqp->free_qp);
  533 }
  534 
  535 void
  536 irdma_cq_add_ref(struct ib_cq *ibcq)
  537 {
  538         struct irdma_cq *iwcq = to_iwcq(ibcq);
  539 
  540         atomic_inc(&iwcq->refcnt);
  541 }
  542 
  543 void
  544 irdma_cq_rem_ref(struct ib_cq *ibcq)
  545 {
  546         struct irdma_cq *iwcq = to_iwcq(ibcq);
  547         struct irdma_pci_f *rf = container_of(iwcq->sc_cq.dev, struct irdma_pci_f, sc_dev);
  548         unsigned long flags;
  549 
  550         spin_lock_irqsave(&rf->cqtable_lock, flags);
  551         if (!atomic_dec_and_test(&iwcq->refcnt)) {
  552                 spin_unlock_irqrestore(&rf->cqtable_lock, flags);
  553                 return;
  554         }
  555 
  556         rf->cq_table[iwcq->cq_num] = NULL;
  557         spin_unlock_irqrestore(&rf->cqtable_lock, flags);
  558         complete(&iwcq->free_cq);
  559 }
  560 
  561 struct ib_device *
  562 irdma_get_ibdev(struct irdma_sc_dev *dev)
  563 {
  564         return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
  565 }
  566 
  567 /**
  568  * irdma_get_qp - get qp address
  569  * @device: iwarp device
  570  * @qpn: qp number
  571  */
  572 struct ib_qp *
  573 irdma_get_qp(struct ib_device *device, int qpn)
  574 {
  575         struct irdma_device *iwdev = to_iwdev(device);
  576 
  577         if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
  578                 return NULL;
  579 
  580         return &iwdev->rf->qp_table[qpn]->ibqp;
  581 }
  582 
  583 /**
  584  * irdma_remove_cqp_head - return head entry and remove
  585  * @dev: device
  586  */
  587 void *
  588 irdma_remove_cqp_head(struct irdma_sc_dev *dev)
  589 {
  590         struct list_head *entry;
  591         struct list_head *list = &dev->cqp_cmd_head;
  592 
  593         if (list_empty(list))
  594                 return NULL;
  595 
  596         entry = list->next;
  597         list_del(entry);
  598 
  599         return entry;
  600 }
  601 
  602 /**
  603  * irdma_cqp_sds_cmd - create cqp command for sd
  604  * @dev: hardware control device structure
  605  * @sdinfo: information for sd cqp
  606  *
  607  */
  608 int
  609 irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
  610                   struct irdma_update_sds_info *sdinfo)
  611 {
  612         struct irdma_cqp_request *cqp_request;
  613         struct cqp_cmds_info *cqp_info;
  614         struct irdma_pci_f *rf = dev_to_rf(dev);
  615         int status;
  616 
  617         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
  618         if (!cqp_request)
  619                 return -ENOMEM;
  620 
  621         cqp_info = &cqp_request->info;
  622         memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
  623                sizeof(cqp_info->in.u.update_pe_sds.info));
  624         cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
  625         cqp_info->post_sq = 1;
  626         cqp_info->in.u.update_pe_sds.dev = dev;
  627         cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
  628 
  629         status = irdma_handle_cqp_op(rf, cqp_request);
  630         irdma_put_cqp_request(&rf->cqp, cqp_request);
  631 
  632         return status;
  633 }
  634 
  635 /**
  636  * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
  637  * @qp: hardware control qp
  638  * @op: suspend or resume
  639  */
  640 int
  641 irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
  642 {
  643         struct irdma_sc_dev *dev = qp->dev;
  644         struct irdma_cqp_request *cqp_request;
  645         struct irdma_sc_cqp *cqp = dev->cqp;
  646         struct cqp_cmds_info *cqp_info;
  647         struct irdma_pci_f *rf = dev_to_rf(dev);
  648         int status;
  649 
  650         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
  651         if (!cqp_request)
  652                 return -ENOMEM;
  653 
  654         cqp_info = &cqp_request->info;
  655         cqp_info->cqp_cmd = op;
  656         cqp_info->in.u.suspend_resume.cqp = cqp;
  657         cqp_info->in.u.suspend_resume.qp = qp;
  658         cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
  659 
  660         status = irdma_handle_cqp_op(rf, cqp_request);
  661         irdma_put_cqp_request(&rf->cqp, cqp_request);
  662 
  663         return status;
  664 }
  665 
  666 /**
  667  * irdma_term_modify_qp - modify qp for term message
  668  * @qp: hardware control qp
  669  * @next_state: qp's next state
  670  * @term: terminate code
  671  * @term_len: length
  672  */
  673 void
  674 irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
  675                      u8 term_len)
  676 {
  677         struct irdma_qp *iwqp;
  678 
  679         iwqp = qp->qp_uk.back_qp;
  680         irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
  681 };
  682 
  683 /**
  684  * irdma_terminate_done - after terminate is completed
  685  * @qp: hardware control qp
  686  * @timeout_occurred: indicates if terminate timer expired
  687  */
  688 void
  689 irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
  690 {
  691         struct irdma_qp *iwqp;
  692         u8 hte = 0;
  693         bool first_time;
  694         unsigned long flags;
  695 
  696         iwqp = qp->qp_uk.back_qp;
  697         spin_lock_irqsave(&iwqp->lock, flags);
  698         if (iwqp->hte_added) {
  699                 iwqp->hte_added = 0;
  700                 hte = 1;
  701         }
  702         first_time = !(qp->term_flags & IRDMA_TERM_DONE);
  703         qp->term_flags |= IRDMA_TERM_DONE;
  704         spin_unlock_irqrestore(&iwqp->lock, flags);
  705         if (first_time) {
  706                 if (!timeout_occurred)
  707                         irdma_terminate_del_timer(qp);
  708 
  709                 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
  710                 irdma_cm_disconn(iwqp);
  711         }
  712 }
  713 
  714 static void
  715 irdma_terminate_timeout(struct timer_list *t)
  716 {
  717         struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
  718         struct irdma_sc_qp *qp = &iwqp->sc_qp;
  719 
  720         irdma_terminate_done(qp, 1);
  721         irdma_qp_rem_ref(&iwqp->ibqp);
  722 }
  723 
  724 /**
  725  * irdma_terminate_start_timer - start terminate timeout
  726  * @qp: hardware control qp
  727  */
  728 void
  729 irdma_terminate_start_timer(struct irdma_sc_qp *qp)
  730 {
  731         struct irdma_qp *iwqp;
  732 
  733         iwqp = qp->qp_uk.back_qp;
  734         irdma_qp_add_ref(&iwqp->ibqp);
  735         timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
  736         iwqp->terminate_timer.expires = jiffies + HZ;
  737 
  738         add_timer(&iwqp->terminate_timer);
  739 }
  740 
  741 /**
  742  * irdma_terminate_del_timer - delete terminate timeout
  743  * @qp: hardware control qp
  744  */
  745 void
  746 irdma_terminate_del_timer(struct irdma_sc_qp *qp)
  747 {
  748         struct irdma_qp *iwqp;
  749         int ret;
  750 
  751         iwqp = qp->qp_uk.back_qp;
  752         ret = irdma_del_timer_compat(&iwqp->terminate_timer);
  753         if (ret)
  754                 irdma_qp_rem_ref(&iwqp->ibqp);
  755 }
  756 
  757 /**
  758  * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
  759  * @dev: function device struct
  760  * @val_mem: buffer for fpm
  761  * @hmc_fn_id: function id for fpm
  762  */
  763 int
  764 irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
  765                             struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
  766 {
  767         struct irdma_cqp_request *cqp_request;
  768         struct cqp_cmds_info *cqp_info;
  769         struct irdma_pci_f *rf = dev_to_rf(dev);
  770         int status;
  771 
  772         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
  773         if (!cqp_request)
  774                 return -ENOMEM;
  775 
  776         cqp_info = &cqp_request->info;
  777         cqp_request->param = NULL;
  778         cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
  779         cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
  780         cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
  781         cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
  782         cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
  783         cqp_info->post_sq = 1;
  784         cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
  785 
  786         status = irdma_handle_cqp_op(rf, cqp_request);
  787         irdma_put_cqp_request(&rf->cqp, cqp_request);
  788 
  789         return status;
  790 }
  791 
  792 /**
  793  * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
  794  * @dev: hardware control device structure
  795  * @val_mem: buffer with fpm values
  796  * @hmc_fn_id: function id for fpm
  797  */
  798 int
  799 irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
  800                              struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
  801 {
  802         struct irdma_cqp_request *cqp_request;
  803         struct cqp_cmds_info *cqp_info;
  804         struct irdma_pci_f *rf = dev_to_rf(dev);
  805         int status;
  806 
  807         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
  808         if (!cqp_request)
  809                 return -ENOMEM;
  810 
  811         cqp_info = &cqp_request->info;
  812         cqp_request->param = NULL;
  813         cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
  814         cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
  815         cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
  816         cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
  817         cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
  818         cqp_info->post_sq = 1;
  819         cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
  820 
  821         status = irdma_handle_cqp_op(rf, cqp_request);
  822         irdma_put_cqp_request(&rf->cqp, cqp_request);
  823 
  824         return status;
  825 }
  826 
  827 /**
  828  * irdma_cqp_cq_create_cmd - create a cq for the cqp
  829  * @dev: device pointer
  830  * @cq: pointer to created cq
  831  */
  832 int
  833 irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
  834 {
  835         struct irdma_pci_f *rf = dev_to_rf(dev);
  836         struct irdma_cqp *iwcqp = &rf->cqp;
  837         struct irdma_cqp_request *cqp_request;
  838         struct cqp_cmds_info *cqp_info;
  839         int status;
  840 
  841         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
  842         if (!cqp_request)
  843                 return -ENOMEM;
  844 
  845         cqp_info = &cqp_request->info;
  846         cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
  847         cqp_info->post_sq = 1;
  848         cqp_info->in.u.cq_create.cq = cq;
  849         cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
  850 
  851         status = irdma_handle_cqp_op(rf, cqp_request);
  852         irdma_put_cqp_request(iwcqp, cqp_request);
  853 
  854         return status;
  855 }
  856 
  857 /**
  858  * irdma_cqp_qp_create_cmd - create a qp for the cqp
  859  * @dev: device pointer
  860  * @qp: pointer to created qp
  861  */
  862 int
  863 irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
  864 {
  865         struct irdma_pci_f *rf = dev_to_rf(dev);
  866         struct irdma_cqp *iwcqp = &rf->cqp;
  867         struct irdma_cqp_request *cqp_request;
  868         struct cqp_cmds_info *cqp_info;
  869         struct irdma_create_qp_info *qp_info;
  870         int status;
  871 
  872         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
  873         if (!cqp_request)
  874                 return -ENOMEM;
  875 
  876         cqp_info = &cqp_request->info;
  877         qp_info = &cqp_request->info.in.u.qp_create.info;
  878         memset(qp_info, 0, sizeof(*qp_info));
  879         qp_info->cq_num_valid = true;
  880         qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
  881         cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
  882         cqp_info->post_sq = 1;
  883         cqp_info->in.u.qp_create.qp = qp;
  884         cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
  885 
  886         status = irdma_handle_cqp_op(rf, cqp_request);
  887         irdma_put_cqp_request(iwcqp, cqp_request);
  888 
  889         return status;
  890 }
  891 
  892 /**
  893  * irdma_dealloc_push_page - free a push page for qp
  894  * @rf: RDMA PCI function
  895  * @qp: hardware control qp
  896  */
  897 void
  898 irdma_dealloc_push_page(struct irdma_pci_f *rf,
  899                         struct irdma_sc_qp *qp)
  900 {
  901         struct irdma_cqp_request *cqp_request;
  902         struct cqp_cmds_info *cqp_info;
  903         int status;
  904 
  905         if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
  906                 return;
  907 
  908         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
  909         if (!cqp_request)
  910                 return;
  911 
  912         cqp_info = &cqp_request->info;
  913         cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
  914         cqp_info->post_sq = 1;
  915         cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
  916         cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
  917         cqp_info->in.u.manage_push_page.info.free_page = 1;
  918         cqp_info->in.u.manage_push_page.info.push_page_type = 0;
  919         cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
  920         cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
  921         status = irdma_handle_cqp_op(rf, cqp_request);
  922         if (!status)
  923                 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
  924         irdma_put_cqp_request(&rf->cqp, cqp_request);
  925 }
  926 
  927 /**
  928  * irdma_cq_wq_destroy - send cq destroy cqp
  929  * @rf: RDMA PCI function
  930  * @cq: hardware control cq
  931  */
  932 void
  933 irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
  934 {
  935         struct irdma_cqp_request *cqp_request;
  936         struct cqp_cmds_info *cqp_info;
  937 
  938         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
  939         if (!cqp_request)
  940                 return;
  941 
  942         cqp_info = &cqp_request->info;
  943         cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
  944         cqp_info->post_sq = 1;
  945         cqp_info->in.u.cq_destroy.cq = cq;
  946         cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
  947 
  948         irdma_handle_cqp_op(rf, cqp_request);
  949         irdma_put_cqp_request(&rf->cqp, cqp_request);
  950 }
  951 
  952 /**
  953  * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
  954  * @cqp_request: modify QP completion
  955  */
  956 static void
  957 irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
  958 {
  959         struct cqp_cmds_info *cqp_info;
  960         struct irdma_qp *iwqp;
  961 
  962         cqp_info = &cqp_request->info;
  963         iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
  964         atomic_dec(&iwqp->hw_mod_qp_pend);
  965         wake_up(&iwqp->mod_qp_waitq);
  966 }
  967 
  968 /**
  969  * irdma_hw_modify_qp - setup cqp for modify qp
  970  * @iwdev: RDMA device
  971  * @iwqp: qp ptr (user or kernel)
  972  * @info: info for modify qp
  973  * @wait: flag to wait or not for modify qp completion
  974  */
  975 int
  976 irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
  977                    struct irdma_modify_qp_info *info, bool wait)
  978 {
  979         int status;
  980         struct irdma_pci_f *rf = iwdev->rf;
  981         struct irdma_cqp_request *cqp_request;
  982         struct cqp_cmds_info *cqp_info;
  983         struct irdma_modify_qp_info *m_info;
  984 
  985         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
  986         if (!cqp_request)
  987                 return -ENOMEM;
  988 
  989         if (!wait) {
  990                 cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
  991                 atomic_inc(&iwqp->hw_mod_qp_pend);
  992         }
  993         cqp_info = &cqp_request->info;
  994         m_info = &cqp_info->in.u.qp_modify.info;
  995         memcpy(m_info, info, sizeof(*m_info));
  996         cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
  997         cqp_info->post_sq = 1;
  998         cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
  999         cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
 1000         status = irdma_handle_cqp_op(rf, cqp_request);
 1001         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1002         if (status) {
 1003                 if (rdma_protocol_roce(&iwdev->ibdev, 1))
 1004                         return status;
 1005 
 1006                 switch (m_info->next_iwarp_state) {
 1007                         struct irdma_gen_ae_info ae_info;
 1008 
 1009                 case IRDMA_QP_STATE_RTS:
 1010                 case IRDMA_QP_STATE_IDLE:
 1011                 case IRDMA_QP_STATE_TERMINATE:
 1012                 case IRDMA_QP_STATE_CLOSING:
 1013                         if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
 1014                                 irdma_send_reset(iwqp->cm_node);
 1015                         else
 1016                                 iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
 1017                         if (!wait) {
 1018                                 ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
 1019                                 ae_info.ae_src = 0;
 1020                                 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
 1021                         } else {
 1022                                 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
 1023                                                                               wait);
 1024                                 if (!cqp_request)
 1025                                         return -ENOMEM;
 1026 
 1027                                 cqp_info = &cqp_request->info;
 1028                                 m_info = &cqp_info->in.u.qp_modify.info;
 1029                                 memcpy(m_info, info, sizeof(*m_info));
 1030                                 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
 1031                                 cqp_info->post_sq = 1;
 1032                                 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
 1033                                 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
 1034                                 m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
 1035                                 m_info->reset_tcp_conn = true;
 1036                                 irdma_handle_cqp_op(rf, cqp_request);
 1037                                 irdma_put_cqp_request(&rf->cqp, cqp_request);
 1038                         }
 1039                         break;
 1040                 case IRDMA_QP_STATE_ERROR:
 1041                 default:
 1042                         break;
 1043                 }
 1044         }
 1045 
 1046         return status;
 1047 }
 1048 
 1049 /**
 1050  * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
 1051  * @dev: device pointer
 1052  * @cq: pointer to cq
 1053  */
 1054 void
 1055 irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
 1056 {
 1057         struct irdma_pci_f *rf = dev_to_rf(dev);
 1058 
 1059         irdma_cq_wq_destroy(rf, cq);
 1060 }
 1061 
 1062 /**
 1063  * irdma_cqp_qp_destroy_cmd - destroy the cqp
 1064  * @dev: device pointer
 1065  * @qp: pointer to qp
 1066  */
 1067 int
 1068 irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
 1069 {
 1070         struct irdma_pci_f *rf = dev_to_rf(dev);
 1071         struct irdma_cqp *iwcqp = &rf->cqp;
 1072         struct irdma_cqp_request *cqp_request;
 1073         struct cqp_cmds_info *cqp_info;
 1074         int status;
 1075 
 1076         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
 1077         if (!cqp_request)
 1078                 return -ENOMEM;
 1079 
 1080         cqp_info = &cqp_request->info;
 1081         memset(cqp_info, 0, sizeof(*cqp_info));
 1082         cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
 1083         cqp_info->post_sq = 1;
 1084         cqp_info->in.u.qp_destroy.qp = qp;
 1085         cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
 1086         cqp_info->in.u.qp_destroy.remove_hash_idx = true;
 1087 
 1088         status = irdma_handle_cqp_op(rf, cqp_request);
 1089         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1090 
 1091         return status;
 1092 }
 1093 
 1094 /**
 1095  * irdma_ieq_mpa_crc_ae - generate AE for crc error
 1096  * @dev: hardware control device structure
 1097  * @qp: hardware control qp
 1098  */
 1099 void
 1100 irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
 1101 {
 1102         struct irdma_gen_ae_info info = {0};
 1103         struct irdma_pci_f *rf = dev_to_rf(dev);
 1104 
 1105         irdma_debug(dev, IRDMA_DEBUG_AEQ, "Generate MPA CRC AE\n");
 1106         info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
 1107         info.ae_src = IRDMA_AE_SOURCE_RQ;
 1108         irdma_gen_ae(rf, qp, &info, false);
 1109 }
 1110 
 1111 /**
 1112  * irdma_ieq_get_qp - get qp based on quad in puda buffer
 1113  * @dev: hardware control device structure
 1114  * @buf: receive puda buffer on exception q
 1115  */
 1116 struct irdma_sc_qp *
 1117 irdma_ieq_get_qp(struct irdma_sc_dev *dev,
 1118                  struct irdma_puda_buf *buf)
 1119 {
 1120         struct irdma_qp *iwqp;
 1121         struct irdma_cm_node *cm_node;
 1122         struct irdma_device *iwdev = buf->vsi->back_vsi;
 1123         u32 loc_addr[4] = {0};
 1124         u32 rem_addr[4] = {0};
 1125         u16 loc_port, rem_port;
 1126         struct ip6_hdr *ip6h;
 1127         struct ip *iph = (struct ip *)buf->iph;
 1128         struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
 1129 
 1130         if (iph->ip_v == 4) {
 1131                 loc_addr[0] = ntohl(iph->ip_dst.s_addr);
 1132                 rem_addr[0] = ntohl(iph->ip_src.s_addr);
 1133         } else {
 1134                 ip6h = (struct ip6_hdr *)buf->iph;
 1135                 irdma_copy_ip_ntohl(loc_addr, ip6h->ip6_dst.__u6_addr.__u6_addr32);
 1136                 irdma_copy_ip_ntohl(rem_addr, ip6h->ip6_src.__u6_addr.__u6_addr32);
 1137         }
 1138         loc_port = ntohs(tcph->th_dport);
 1139         rem_port = ntohs(tcph->th_sport);
 1140         cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
 1141                                   loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
 1142         if (!cm_node)
 1143                 return NULL;
 1144 
 1145         iwqp = cm_node->iwqp;
 1146         irdma_rem_ref_cm_node(cm_node);
 1147 
 1148         return &iwqp->sc_qp;
 1149 }
 1150 
 1151 /**
 1152  * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
 1153  * @qp: qp ptr
 1154  */
 1155 void
 1156 irdma_send_ieq_ack(struct irdma_sc_qp *qp)
 1157 {
 1158         struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
 1159         struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
 1160         struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
 1161 
 1162         cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
 1163         cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack);
 1164 
 1165         irdma_send_ack(cm_node);
 1166 }
 1167 
 1168 /**
 1169  * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
 1170  * @qp: qp pointer
 1171  * @ah_info: AH info pointer
 1172  */
 1173 void
 1174 irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
 1175                            struct irdma_ah_info *ah_info)
 1176 {
 1177         struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
 1178         struct ip *iph;
 1179         struct ip6_hdr *ip6h;
 1180 
 1181         memset(ah_info, 0, sizeof(*ah_info));
 1182         ah_info->do_lpbk = true;
 1183         ah_info->vlan_tag = buf->vlan_id;
 1184         ah_info->insert_vlan_tag = buf->vlan_valid;
 1185         ah_info->ipv4_valid = buf->ipv4;
 1186         ah_info->vsi = qp->vsi;
 1187 
 1188         if (buf->smac_valid)
 1189                 ether_addr_copy(ah_info->mac_addr, buf->smac);
 1190 
 1191         if (buf->ipv4) {
 1192                 ah_info->ipv4_valid = true;
 1193                 iph = (struct ip *)buf->iph;
 1194                 ah_info->hop_ttl = iph->ip_ttl;
 1195                 ah_info->tc_tos = iph->ip_tos;
 1196                 ah_info->dest_ip_addr[0] = ntohl(iph->ip_dst.s_addr);
 1197                 ah_info->src_ip_addr[0] = ntohl(iph->ip_src.s_addr);
 1198         } else {
 1199                 ip6h = (struct ip6_hdr *)buf->iph;
 1200                 ah_info->hop_ttl = ip6h->ip6_hops;
 1201                 ah_info->tc_tos = ip6h->ip6_vfc;
 1202                 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
 1203                                     ip6h->ip6_dst.__u6_addr.__u6_addr32);
 1204                 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
 1205                                     ip6h->ip6_src.__u6_addr.__u6_addr32);
 1206         }
 1207 
 1208         ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
 1209                                                 ah_info->dest_ip_addr,
 1210                                                 NULL, IRDMA_ARP_RESOLVE);
 1211 }
 1212 
 1213 /**
 1214  * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
 1215  * @buf: puda to update
 1216  * @len: length of buffer
 1217  * @seqnum: seq number for tcp
 1218  */
 1219 static void
 1220 irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
 1221                                  u16 len, u32 seqnum)
 1222 {
 1223         struct tcphdr *tcph;
 1224         struct ip *iph;
 1225         u16 iphlen;
 1226         u16 pktsize;
 1227         u8 *addr = buf->mem.va;
 1228 
 1229         iphlen = (buf->ipv4) ? 20 : 40;
 1230         iph = (struct ip *)(addr + buf->maclen);
 1231         tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
 1232         pktsize = len + buf->tcphlen + iphlen;
 1233         iph->ip_len = htons(pktsize);
 1234         tcph->th_seq = htonl(seqnum);
 1235 }
 1236 
 1237 /**
 1238  * irdma_ieq_update_tcpip_info - update tcpip in the buffer
 1239  * @buf: puda to update
 1240  * @len: length of buffer
 1241  * @seqnum: seq number for tcp
 1242  */
 1243 void
 1244 irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
 1245                             u32 seqnum)
 1246 {
 1247         struct tcphdr *tcph;
 1248         u8 *addr;
 1249 
 1250         if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
 1251                 return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
 1252 
 1253         addr = buf->mem.va;
 1254         tcph = (struct tcphdr *)addr;
 1255         tcph->th_seq = htonl(seqnum);
 1256 }
 1257 
 1258 /**
 1259  * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
 1260  * buffer
 1261  * @info: to get information
 1262  * @buf: puda buffer
 1263  */
 1264 static int
 1265 irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
 1266                                struct irdma_puda_buf *buf)
 1267 {
 1268         struct ip *iph;
 1269         struct ip6_hdr *ip6h;
 1270         struct tcphdr *tcph;
 1271         u16 iphlen;
 1272         u16 pkt_len;
 1273         u8 *mem = buf->mem.va;
 1274         struct ether_header *ethh = buf->mem.va;
 1275 
 1276         if (ethh->ether_type == htons(0x8100)) {
 1277                 info->vlan_valid = true;
 1278                 buf->vlan_id = ntohs(((struct ether_vlan_header *)ethh)->evl_tag) &
 1279                     EVL_VLID_MASK;
 1280         }
 1281 
 1282         buf->maclen = (info->vlan_valid) ? 18 : 14;
 1283         iphlen = (info->l3proto) ? 40 : 20;
 1284         buf->ipv4 = (info->l3proto) ? false : true;
 1285         buf->iph = mem + buf->maclen;
 1286         iph = (struct ip *)buf->iph;
 1287         buf->tcph = buf->iph + iphlen;
 1288         tcph = (struct tcphdr *)buf->tcph;
 1289 
 1290         if (buf->ipv4) {
 1291                 pkt_len = ntohs(iph->ip_len);
 1292         } else {
 1293                 ip6h = (struct ip6_hdr *)buf->iph;
 1294                 pkt_len = ntohs(ip6h->ip6_plen) + iphlen;
 1295         }
 1296 
 1297         buf->totallen = pkt_len + buf->maclen;
 1298 
 1299         if (info->payload_len < buf->totallen) {
 1300                 irdma_debug(buf->vsi->dev, IRDMA_DEBUG_ERR,
 1301                             "payload_len = 0x%x totallen expected0x%x\n",
 1302                             info->payload_len, buf->totallen);
 1303                 return -EINVAL;
 1304         }
 1305 
 1306         buf->tcphlen = tcph->th_off << 2;
 1307         buf->datalen = pkt_len - iphlen - buf->tcphlen;
 1308         buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
 1309         buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
 1310         buf->seqnum = ntohl(tcph->th_seq);
 1311 
 1312         return 0;
 1313 }
 1314 
 1315 /**
 1316  * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
 1317  * @info: to get information
 1318  * @buf: puda buffer
 1319  */
 1320 int
 1321 irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
 1322                           struct irdma_puda_buf *buf)
 1323 {
 1324         struct tcphdr *tcph;
 1325         u32 pkt_len;
 1326         u8 *mem;
 1327 
 1328         if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
 1329                 return irdma_gen1_puda_get_tcpip_info(info, buf);
 1330 
 1331         mem = buf->mem.va;
 1332         buf->vlan_valid = info->vlan_valid;
 1333         if (info->vlan_valid)
 1334                 buf->vlan_id = info->vlan;
 1335 
 1336         buf->ipv4 = info->ipv4;
 1337         if (buf->ipv4)
 1338                 buf->iph = mem + IRDMA_IPV4_PAD;
 1339         else
 1340                 buf->iph = mem;
 1341 
 1342         buf->tcph = mem + IRDMA_TCP_OFFSET;
 1343         tcph = (struct tcphdr *)buf->tcph;
 1344         pkt_len = info->payload_len;
 1345         buf->totallen = pkt_len;
 1346         buf->tcphlen = tcph->th_off << 2;
 1347         buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
 1348         buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
 1349         buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
 1350         buf->seqnum = ntohl(tcph->th_seq);
 1351 
 1352         if (info->smac_valid) {
 1353                 ether_addr_copy(buf->smac, info->smac);
 1354                 buf->smac_valid = true;
 1355         }
 1356 
 1357         return 0;
 1358 }
 1359 
 1360 /**
 1361  * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
 1362  * @t: timer_list pointer
 1363  */
 1364 static void
 1365 irdma_hw_stats_timeout(struct timer_list *t)
 1366 {
 1367         struct irdma_vsi_pestat *pf_devstat =
 1368         from_timer(pf_devstat, t, stats_timer);
 1369         struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
 1370 
 1371         if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
 1372                 irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
 1373 
 1374         mod_timer(&pf_devstat->stats_timer,
 1375                   jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
 1376 }
 1377 
 1378 /**
 1379  * irdma_hw_stats_start_timer - Start periodic stats timer
 1380  * @vsi: vsi structure pointer
 1381  */
 1382 void
 1383 irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
 1384 {
 1385         struct irdma_vsi_pestat *devstat = vsi->pestat;
 1386 
 1387         timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
 1388         mod_timer(&devstat->stats_timer,
 1389                   jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
 1390 }
 1391 
 1392 /**
 1393  * irdma_hw_stats_stop_timer - Delete periodic stats timer
 1394  * @vsi: pointer to vsi structure
 1395  */
 1396 void
 1397 irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
 1398 {
 1399         struct irdma_vsi_pestat *devstat = vsi->pestat;
 1400 
 1401         del_timer_sync(&devstat->stats_timer);
 1402 }
 1403 
 1404 /**
 1405  * irdma_process_stats - Checking for wrap and update stats
 1406  * @pestat: stats structure pointer
 1407  */
 1408 static inline void
 1409 irdma_process_stats(struct irdma_vsi_pestat *pestat)
 1410 {
 1411         sc_vsi_update_stats(pestat->vsi);
 1412 }
 1413 
 1414 /**
 1415  * irdma_process_cqp_stats - Checking for wrap and update stats
 1416  * @cqp_request: cqp_request structure pointer
 1417  */
 1418 static void
 1419 irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
 1420 {
 1421         struct irdma_vsi_pestat *pestat = cqp_request->param;
 1422 
 1423         irdma_process_stats(pestat);
 1424 }
 1425 
 1426 /**
 1427  * irdma_cqp_gather_stats_cmd - Gather stats
 1428  * @dev: pointer to device structure
 1429  * @pestat: pointer to stats info
 1430  * @wait: flag to wait or not wait for stats
 1431  */
 1432 int
 1433 irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
 1434                            struct irdma_vsi_pestat *pestat, bool wait)
 1435 {
 1436 
 1437         struct irdma_pci_f *rf = dev_to_rf(dev);
 1438         struct irdma_cqp *iwcqp = &rf->cqp;
 1439         struct irdma_cqp_request *cqp_request;
 1440         struct cqp_cmds_info *cqp_info;
 1441         int status;
 1442 
 1443         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
 1444         if (!cqp_request)
 1445                 return -ENOMEM;
 1446 
 1447         cqp_info = &cqp_request->info;
 1448         memset(cqp_info, 0, sizeof(*cqp_info));
 1449         cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
 1450         cqp_info->post_sq = 1;
 1451         cqp_info->in.u.stats_gather.info = pestat->gather_info;
 1452         cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
 1453         cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
 1454         cqp_request->param = pestat;
 1455         if (!wait)
 1456                 cqp_request->callback_fcn = irdma_process_cqp_stats;
 1457         status = irdma_handle_cqp_op(rf, cqp_request);
 1458         if (wait)
 1459                 irdma_process_stats(pestat);
 1460         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1461 
 1462         return status;
 1463 }
 1464 
 1465 /**
 1466  * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
 1467  * @vsi: pointer to vsi structure
 1468  * @cmd: command to allocate or free
 1469  * @stats_info: pointer to allocate stats info
 1470  */
 1471 int
 1472 irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
 1473                          struct irdma_stats_inst_info *stats_info)
 1474 {
 1475         struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
 1476         struct irdma_cqp *iwcqp = &rf->cqp;
 1477         struct irdma_cqp_request *cqp_request;
 1478         struct cqp_cmds_info *cqp_info;
 1479         int status;
 1480         bool wait = false;
 1481 
 1482         if (cmd == IRDMA_OP_STATS_ALLOCATE)
 1483                 wait = true;
 1484         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
 1485         if (!cqp_request)
 1486                 return -ENOMEM;
 1487 
 1488         cqp_info = &cqp_request->info;
 1489         memset(cqp_info, 0, sizeof(*cqp_info));
 1490         cqp_info->cqp_cmd = cmd;
 1491         cqp_info->post_sq = 1;
 1492         cqp_info->in.u.stats_manage.info = *stats_info;
 1493         cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
 1494         cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
 1495         status = irdma_handle_cqp_op(rf, cqp_request);
 1496         if (wait)
 1497                 stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
 1498         irdma_put_cqp_request(iwcqp, cqp_request);
 1499 
 1500         return status;
 1501 }
 1502 
 1503 /**
 1504  * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
 1505  * @dev: pointer to device info
 1506  * @sc_ceq: pointer to ceq structure
 1507  * @op: Create or Destroy
 1508  */
 1509 int
 1510 irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
 1511                   u8 op)
 1512 {
 1513         struct irdma_cqp_request *cqp_request;
 1514         struct cqp_cmds_info *cqp_info;
 1515         struct irdma_pci_f *rf = dev_to_rf(dev);
 1516         int status;
 1517 
 1518         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
 1519         if (!cqp_request)
 1520                 return -ENOMEM;
 1521 
 1522         cqp_info = &cqp_request->info;
 1523         cqp_info->post_sq = 1;
 1524         cqp_info->cqp_cmd = op;
 1525         cqp_info->in.u.ceq_create.ceq = sc_ceq;
 1526         cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
 1527 
 1528         status = irdma_handle_cqp_op(rf, cqp_request);
 1529         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1530 
 1531         return status;
 1532 }
 1533 
 1534 /**
 1535  * irdma_cqp_aeq_cmd - Create/Destroy AEQ
 1536  * @dev: pointer to device info
 1537  * @sc_aeq: pointer to aeq structure
 1538  * @op: Create or Destroy
 1539  */
 1540 int
 1541 irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
 1542                   u8 op)
 1543 {
 1544         struct irdma_cqp_request *cqp_request;
 1545         struct cqp_cmds_info *cqp_info;
 1546         struct irdma_pci_f *rf = dev_to_rf(dev);
 1547         int status;
 1548 
 1549         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
 1550         if (!cqp_request)
 1551                 return -ENOMEM;
 1552 
 1553         cqp_info = &cqp_request->info;
 1554         cqp_info->post_sq = 1;
 1555         cqp_info->cqp_cmd = op;
 1556         cqp_info->in.u.aeq_create.aeq = sc_aeq;
 1557         cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
 1558 
 1559         status = irdma_handle_cqp_op(rf, cqp_request);
 1560         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1561 
 1562         return status;
 1563 }
 1564 
 1565 /**
 1566  * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
 1567  * @dev: pointer to device structure
 1568  * @cmd: Add, modify or delete
 1569  * @node_info: pointer to ws node info
 1570  */
 1571 int
 1572 irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
 1573                       struct irdma_ws_node_info *node_info)
 1574 {
 1575         struct irdma_pci_f *rf = dev_to_rf(dev);
 1576         struct irdma_cqp *iwcqp = &rf->cqp;
 1577         struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
 1578         struct irdma_cqp_request *cqp_request;
 1579         struct cqp_cmds_info *cqp_info;
 1580         int status;
 1581         bool poll;
 1582 
 1583         if (!rf->sc_dev.ceq_valid)
 1584                 poll = true;
 1585         else
 1586                 poll = false;
 1587 
 1588         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
 1589         if (!cqp_request)
 1590                 return -ENOMEM;
 1591 
 1592         cqp_info = &cqp_request->info;
 1593         memset(cqp_info, 0, sizeof(*cqp_info));
 1594         cqp_info->cqp_cmd = cmd;
 1595         cqp_info->post_sq = 1;
 1596         cqp_info->in.u.ws_node.info = *node_info;
 1597         cqp_info->in.u.ws_node.cqp = cqp;
 1598         cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
 1599         status = irdma_handle_cqp_op(rf, cqp_request);
 1600         if (status)
 1601                 goto exit;
 1602 
 1603         if (poll) {
 1604                 struct irdma_ccq_cqe_info compl_info;
 1605 
 1606                 status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
 1607                                                        &compl_info);
 1608                 node_info->qs_handle = compl_info.op_ret_val;
 1609                 irdma_debug(cqp->dev, IRDMA_DEBUG_DCB,
 1610                             "opcode=%d, compl_info.retval=%d\n",
 1611                             compl_info.op_code, compl_info.op_ret_val);
 1612         } else {
 1613                 node_info->qs_handle = cqp_request->compl_info.op_ret_val;
 1614         }
 1615 
 1616 exit:
 1617         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1618 
 1619         return status;
 1620 }
 1621 
 1622 /**
 1623  * irdma_cqp_up_map_cmd - Set the up-up mapping
 1624  * @dev: pointer to device structure
 1625  * @cmd: map command
 1626  * @map_info: pointer to up map info
 1627  */
 1628 int
 1629 irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
 1630                      struct irdma_up_info *map_info)
 1631 {
 1632         struct irdma_pci_f *rf = dev_to_rf(dev);
 1633         struct irdma_cqp *iwcqp = &rf->cqp;
 1634         struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
 1635         struct irdma_cqp_request *cqp_request;
 1636         struct cqp_cmds_info *cqp_info;
 1637         int status;
 1638 
 1639         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
 1640         if (!cqp_request)
 1641                 return -ENOMEM;
 1642 
 1643         cqp_info = &cqp_request->info;
 1644         memset(cqp_info, 0, sizeof(*cqp_info));
 1645         cqp_info->cqp_cmd = cmd;
 1646         cqp_info->post_sq = 1;
 1647         cqp_info->in.u.up_map.info = *map_info;
 1648         cqp_info->in.u.up_map.cqp = cqp;
 1649         cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request;
 1650 
 1651         status = irdma_handle_cqp_op(rf, cqp_request);
 1652         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1653 
 1654         return status;
 1655 }
 1656 
 1657 /**
 1658  * irdma_ah_cqp_op - perform an AH cqp operation
 1659  * @rf: RDMA PCI function
 1660  * @sc_ah: address handle
 1661  * @cmd: AH operation
 1662  * @wait: wait if true
 1663  * @callback_fcn: Callback function on CQP op completion
 1664  * @cb_param: parameter for callback function
 1665  *
 1666  * returns errno
 1667  */
 1668 int
 1669 irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
 1670                 bool wait,
 1671                 void (*callback_fcn) (struct irdma_cqp_request *),
 1672                 void *cb_param)
 1673 {
 1674         struct irdma_cqp_request *cqp_request;
 1675         struct cqp_cmds_info *cqp_info;
 1676         int status;
 1677 
 1678         if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
 1679                 return -EINVAL;
 1680 
 1681         cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
 1682         if (!cqp_request)
 1683                 return -ENOMEM;
 1684 
 1685         cqp_info = &cqp_request->info;
 1686         cqp_info->cqp_cmd = cmd;
 1687         cqp_info->post_sq = 1;
 1688         if (cmd == IRDMA_OP_AH_CREATE) {
 1689                 cqp_info->in.u.ah_create.info = sc_ah->ah_info;
 1690                 cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
 1691                 cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
 1692         } else if (cmd == IRDMA_OP_AH_DESTROY) {
 1693                 cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
 1694                 cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
 1695                 cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
 1696         }
 1697 
 1698         if (!wait) {
 1699                 cqp_request->callback_fcn = callback_fcn;
 1700                 cqp_request->param = cb_param;
 1701         }
 1702         status = irdma_handle_cqp_op(rf, cqp_request);
 1703         irdma_put_cqp_request(&rf->cqp, cqp_request);
 1704 
 1705         if (status)
 1706                 return -ENOMEM;
 1707 
 1708         if (wait)
 1709                 sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
 1710 
 1711         return 0;
 1712 }
 1713 
 1714 /**
 1715  * irdma_ieq_ah_cb - callback after creation of AH for IEQ
 1716  * @cqp_request: pointer to cqp_request of create AH
 1717  */
 1718 static void
 1719 irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
 1720 {
 1721         struct irdma_sc_qp *qp = cqp_request->param;
 1722         struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
 1723         unsigned long flags;
 1724 
 1725         spin_lock_irqsave(&qp->pfpdu.lock, flags);
 1726         if (!cqp_request->compl_info.op_ret_val) {
 1727                 sc_ah->ah_info.ah_valid = true;
 1728                 irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
 1729         } else {
 1730                 sc_ah->ah_info.ah_valid = false;
 1731                 irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
 1732         }
 1733         spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
 1734 }
 1735 
 1736 /**
 1737  * irdma_ilq_ah_cb - callback after creation of AH for ILQ
 1738  * @cqp_request: pointer to cqp_request of create AH
 1739  */
 1740 static void
 1741 irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
 1742 {
 1743         struct irdma_cm_node *cm_node = cqp_request->param;
 1744         struct irdma_sc_ah *sc_ah = cm_node->ah;
 1745 
 1746         sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
 1747         irdma_add_conn_est_qh(cm_node);
 1748 }
 1749 
 1750 /**
 1751  * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
 1752  * @dev: device pointer
 1753  * @ah_info: Address handle info
 1754  * @wait: When true will wait for operation to complete
 1755  * @type: ILQ/IEQ
 1756  * @cb_param: Callback param when not waiting
 1757  * @ah_ret: Returned pointer to address handle if created
 1758  *
 1759  */
 1760 int
 1761 irdma_puda_create_ah(struct irdma_sc_dev *dev,
 1762                      struct irdma_ah_info *ah_info, bool wait,
 1763                      enum puda_rsrc_type type, void *cb_param,
 1764                      struct irdma_sc_ah **ah_ret)
 1765 {
 1766         struct irdma_sc_ah *ah;
 1767         struct irdma_pci_f *rf = dev_to_rf(dev);
 1768         int err;
 1769 
 1770         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
 1771         *ah_ret = ah;
 1772         if (!ah)
 1773                 return -ENOMEM;
 1774 
 1775         err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
 1776                                &ah_info->ah_idx, &rf->next_ah);
 1777         if (err)
 1778                 goto err_free;
 1779 
 1780         ah->dev = dev;
 1781         ah->ah_info = *ah_info;
 1782 
 1783         if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
 1784                 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
 1785                                       irdma_ilq_ah_cb, cb_param);
 1786         else
 1787                 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
 1788                                       irdma_ieq_ah_cb, cb_param);
 1789 
 1790         if (err)
 1791                 goto error;
 1792         return 0;
 1793 
 1794 error:
 1795         irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
 1796 err_free:
 1797         kfree(ah);
 1798         *ah_ret = NULL;
 1799         return -ENOMEM;
 1800 }
 1801 
 1802 /**
 1803  * irdma_puda_free_ah - free a puda address handle
 1804  * @dev: device pointer
 1805  * @ah: The address handle to free
 1806  */
 1807 void
 1808 irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
 1809 {
 1810         struct irdma_pci_f *rf = dev_to_rf(dev);
 1811 
 1812         if (!ah)
 1813                 return;
 1814 
 1815         if (ah->ah_info.ah_valid) {
 1816                 irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
 1817                 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
 1818         }
 1819 
 1820         kfree(ah);
 1821 }
 1822 
 1823 /**
 1824  * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
 1825  * @cqp_request: pointer to cqp_request of create AH
 1826  */
 1827 void
 1828 irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
 1829 {
 1830         struct irdma_sc_ah *sc_ah = cqp_request->param;
 1831 
 1832         if (!cqp_request->compl_info.op_ret_val)
 1833                 sc_ah->ah_info.ah_valid = true;
 1834         else
 1835                 sc_ah->ah_info.ah_valid = false;
 1836 }
 1837 
 1838 /**
 1839  * irdma_prm_add_pble_mem - add moemory to pble resources
 1840  * @pprm: pble resource manager
 1841  * @pchunk: chunk of memory to add
 1842  */
 1843 int
 1844 irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
 1845                        struct irdma_chunk *pchunk)
 1846 {
 1847         u64 sizeofbitmap;
 1848 
 1849         if (pchunk->size & 0xfff)
 1850                 return -EINVAL;
 1851 
 1852         sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
 1853 
 1854         pchunk->bitmapmem.size = sizeofbitmap >> 3;
 1855         pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
 1856 
 1857         if (!pchunk->bitmapmem.va)
 1858                 return -ENOMEM;
 1859 
 1860         pchunk->bitmapbuf = pchunk->bitmapmem.va;
 1861         bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
 1862 
 1863         pchunk->sizeofbitmap = sizeofbitmap;
 1864         /* each pble is 8 bytes hence shift by 3 */
 1865         pprm->total_pble_alloc += pchunk->size >> 3;
 1866         pprm->free_pble_cnt += pchunk->size >> 3;
 1867 
 1868         return 0;
 1869 }
 1870 
 1871 /**
 1872  * irdma_prm_get_pbles - get pble's from prm
 1873  * @pprm: pble resource manager
 1874  * @chunkinfo: nformation about chunk where pble's were acquired
 1875  * @mem_size: size of pble memory needed
 1876  * @vaddr: returns virtual address of pble memory
 1877  * @fpm_addr: returns fpm address of pble memory
 1878  */
 1879 int
 1880 irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
 1881                     struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
 1882                     u64 **vaddr, u64 *fpm_addr)
 1883 {
 1884         u64 bits_needed;
 1885         u64 bit_idx = PBLE_INVALID_IDX;
 1886         struct irdma_chunk *pchunk = NULL;
 1887         struct list_head *chunk_entry = (&pprm->clist)->next;
 1888         u32 offset;
 1889         unsigned long flags;
 1890         *vaddr = NULL;
 1891         *fpm_addr = 0;
 1892 
 1893         bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
 1894 
 1895         spin_lock_irqsave(&pprm->prm_lock, flags);
 1896         while (chunk_entry != &pprm->clist) {
 1897                 pchunk = (struct irdma_chunk *)chunk_entry;
 1898                 bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
 1899                                                      pchunk->sizeofbitmap, 0,
 1900                                                      bits_needed, 0);
 1901                 if (bit_idx < pchunk->sizeofbitmap)
 1902                         break;
 1903 
 1904                 /* list.next used macro */
 1905                 chunk_entry = (&pchunk->list)->next;
 1906         }
 1907 
 1908         if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
 1909                 spin_unlock_irqrestore(&pprm->prm_lock, flags);
 1910                 return -ENOMEM;
 1911         }
 1912 
 1913         bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
 1914         offset = bit_idx << pprm->pble_shift;
 1915         *vaddr = (u64 *)((u8 *)pchunk->vaddr + offset);
 1916         *fpm_addr = pchunk->fpm_addr + offset;
 1917 
 1918         chunkinfo->pchunk = pchunk;
 1919         chunkinfo->bit_idx = bit_idx;
 1920         chunkinfo->bits_used = bits_needed;
 1921         /* 3 is sizeof pble divide */
 1922         pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
 1923         spin_unlock_irqrestore(&pprm->prm_lock, flags);
 1924 
 1925         return 0;
 1926 }
 1927 
 1928 /**
 1929  * irdma_prm_return_pbles - return pbles back to prm
 1930  * @pprm: pble resource manager
 1931  * @chunkinfo: chunk where pble's were acquired and to be freed
 1932  */
 1933 void
 1934 irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
 1935                        struct irdma_pble_chunkinfo *chunkinfo)
 1936 {
 1937         unsigned long flags;
 1938 
 1939         spin_lock_irqsave(&pprm->prm_lock, flags);
 1940         pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
 1941         bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
 1942                      chunkinfo->bits_used);
 1943         spin_unlock_irqrestore(&pprm->prm_lock, flags);
 1944 }
 1945 
 1946 int
 1947 irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t * pg_dma,
 1948                        u32 pg_cnt)
 1949 {
 1950         struct page *vm_page;
 1951         int i;
 1952         u8 *addr;
 1953 
 1954         addr = (u8 *)(uintptr_t)va;
 1955         for (i = 0; i < pg_cnt; i++) {
 1956                 vm_page = vmalloc_to_page(addr);
 1957                 if (!vm_page)
 1958                         goto err;
 1959 
 1960                 pg_dma[i] = dma_map_page(hw_to_dev(hw), vm_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
 1961                 if (dma_mapping_error(hw_to_dev(hw), pg_dma[i]))
 1962                         goto err;
 1963 
 1964                 addr += PAGE_SIZE;
 1965         }
 1966 
 1967         return 0;
 1968 
 1969 err:
 1970         irdma_unmap_vm_page_list(hw, pg_dma, i);
 1971         return -ENOMEM;
 1972 }
 1973 
 1974 void
 1975 irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t * pg_dma, u32 pg_cnt)
 1976 {
 1977         int i;
 1978 
 1979         for (i = 0; i < pg_cnt; i++)
 1980                 dma_unmap_page(hw_to_dev(hw), pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
 1981 }
 1982 
 1983 /**
 1984  * irdma_pble_free_paged_mem - free virtual paged memory
 1985  * @chunk: chunk to free with paged memory
 1986  */
 1987 void
 1988 irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
 1989 {
 1990         if (!chunk->pg_cnt)
 1991                 goto done;
 1992 
 1993         irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
 1994                                  chunk->pg_cnt);
 1995 
 1996 done:
 1997         kfree(chunk->dmainfo.dmaaddrs);
 1998         chunk->dmainfo.dmaaddrs = NULL;
 1999         vfree(chunk->vaddr);
 2000         chunk->vaddr = NULL;
 2001         chunk->type = 0;
 2002 }
 2003 
 2004 /**
 2005  * irdma_pble_get_paged_mem -allocate paged memory for pbles
 2006  * @chunk: chunk to add for paged memory
 2007  * @pg_cnt: number of pages needed
 2008  */
 2009 int
 2010 irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
 2011 {
 2012         u32 size;
 2013         void *va;
 2014 
 2015         chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
 2016         if (!chunk->dmainfo.dmaaddrs)
 2017                 return -ENOMEM;
 2018 
 2019         size = PAGE_SIZE * pg_cnt;
 2020         va = vmalloc(size);
 2021         if (!va)
 2022                 goto err;
 2023 
 2024         if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
 2025                                    pg_cnt)) {
 2026                 vfree(va);
 2027                 goto err;
 2028         }
 2029         chunk->vaddr = va;
 2030         chunk->size = size;
 2031         chunk->pg_cnt = pg_cnt;
 2032         chunk->type = PBLE_SD_PAGED;
 2033 
 2034         return 0;
 2035 err:
 2036         kfree(chunk->dmainfo.dmaaddrs);
 2037         chunk->dmainfo.dmaaddrs = NULL;
 2038 
 2039         return -ENOMEM;
 2040 }
 2041 
 2042 /**
 2043  * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
 2044  * @dev: device pointer
 2045  */
 2046 u16
 2047 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
 2048 {
 2049         struct irdma_pci_f *rf = dev_to_rf(dev);
 2050         u32 next = 1;
 2051         u32 node_id;
 2052 
 2053         if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
 2054                              &node_id, &next))
 2055                 return IRDMA_WS_NODE_INVALID;
 2056 
 2057         return (u16)node_id;
 2058 }
 2059 
 2060 /**
 2061  * irdma_free_ws_node_id - Free a tx scheduler node ID
 2062  * @dev: device pointer
 2063  * @node_id: Work scheduler node ID
 2064  */
 2065 void
 2066 irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
 2067 {
 2068         struct irdma_pci_f *rf = dev_to_rf(dev);
 2069 
 2070         irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
 2071 }
 2072 
 2073 /**
 2074  * irdma_modify_qp_to_err - Modify a QP to error
 2075  * @sc_qp: qp structure
 2076  */
 2077 void
 2078 irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
 2079 {
 2080         struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
 2081         struct ib_qp_attr attr;
 2082 
 2083         if (qp->iwdev->rf->reset)
 2084                 return;
 2085         attr.qp_state = IB_QPS_ERR;
 2086 
 2087         if (rdma_protocol_roce(qp->ibqp.device, 1))
 2088                 irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
 2089         else
 2090                 irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
 2091 }
 2092 
 2093 void
 2094 irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
 2095 {
 2096         struct ib_event ibevent;
 2097 
 2098         if (!iwqp->ibqp.event_handler)
 2099                 return;
 2100 
 2101         switch (event) {
 2102         case IRDMA_QP_EVENT_CATASTROPHIC:
 2103                 ibevent.event = IB_EVENT_QP_FATAL;
 2104                 break;
 2105         case IRDMA_QP_EVENT_ACCESS_ERR:
 2106                 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
 2107                 break;
 2108         case IRDMA_QP_EVENT_REQ_ERR:
 2109                 ibevent.event = IB_EVENT_QP_REQ_ERR;
 2110                 break;
 2111         }
 2112         ibevent.device = iwqp->ibqp.device;
 2113         ibevent.element.qp = &iwqp->ibqp;
 2114         iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
 2115 }
 2116 
 2117 static void
 2118 clear_qp_ctx_addr(__le64 * ctx)
 2119 {
 2120         u64 tmp;
 2121 
 2122         get_64bit_val(ctx, 272, &tmp);
 2123         tmp &= GENMASK_ULL(63, 58);
 2124         set_64bit_val(ctx, 272, tmp);
 2125 
 2126         get_64bit_val(ctx, 296, &tmp);
 2127         tmp &= GENMASK_ULL(7, 0);
 2128         set_64bit_val(ctx, 296, tmp);
 2129 
 2130         get_64bit_val(ctx, 312, &tmp);
 2131         tmp &= GENMASK_ULL(7, 0);
 2132         set_64bit_val(ctx, 312, tmp);
 2133 
 2134         set_64bit_val(ctx, 368, 0);
 2135 }
 2136 
 2137 /**
 2138  * irdma_upload_qp_context - upload raw QP context
 2139  * @iwqp: QP pointer
 2140  * @freeze: freeze QP
 2141  * @raw: raw context flag
 2142  */
 2143 int
 2144 irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw)
 2145 {
 2146         struct irdma_dma_mem dma_mem;
 2147         struct irdma_sc_dev *dev;
 2148         struct irdma_sc_qp *qp;
 2149         struct irdma_cqp *iwcqp;
 2150         struct irdma_cqp_request *cqp_request;
 2151         struct cqp_cmds_info *cqp_info;
 2152         struct irdma_upload_context_info *info;
 2153         struct irdma_pci_f *rf;
 2154         int ret;
 2155         u32 *ctx;
 2156 
 2157         rf = iwqp->iwdev->rf;
 2158         if (!rf)
 2159                 return -EINVAL;
 2160 
 2161         qp = &iwqp->sc_qp;
 2162         dev = &rf->sc_dev;
 2163         iwcqp = &rf->cqp;
 2164 
 2165         cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
 2166         if (!cqp_request)
 2167                 return -EINVAL;
 2168 
 2169         cqp_info = &cqp_request->info;
 2170         info = &cqp_info->in.u.qp_upload_context.info;
 2171         memset(info, 0, sizeof(struct irdma_upload_context_info));
 2172         cqp_info->cqp_cmd = IRDMA_OP_QP_UPLOAD_CONTEXT;
 2173         cqp_info->post_sq = 1;
 2174         cqp_info->in.u.qp_upload_context.dev = dev;
 2175         cqp_info->in.u.qp_upload_context.scratch = (uintptr_t)cqp_request;
 2176 
 2177         dma_mem.size = PAGE_SIZE;
 2178         dma_mem.va = irdma_allocate_dma_mem(dev->hw, &dma_mem, dma_mem.size, PAGE_SIZE);
 2179         if (!dma_mem.va) {
 2180                 irdma_put_cqp_request(&rf->cqp, cqp_request);
 2181                 return -ENOMEM;
 2182         }
 2183 
 2184         ctx = dma_mem.va;
 2185         info->buf_pa = dma_mem.pa;
 2186         info->raw_format = raw;
 2187         info->freeze_qp = freeze;
 2188         info->qp_type = qp->qp_uk.qp_type;      /* 1 is iWARP and 2 UDA */
 2189         info->qp_id = qp->qp_uk.qp_id;
 2190         ret = irdma_handle_cqp_op(rf, cqp_request);
 2191         if (ret)
 2192                 goto error;
 2193 
 2194         irdma_debug(dev, IRDMA_DEBUG_QP, "PRINT CONTXT QP [%d]\n", info->qp_id);
 2195         {
 2196                 u32 i, j;
 2197 
 2198                 clear_qp_ctx_addr(dma_mem.va);
 2199                 for (i = 0, j = 0; i < 32; i++, j += 4)
 2200                         irdma_debug(dev, IRDMA_DEBUG_QP,
 2201                                     "%d:\t [%08X %08x %08X %08X]\n",
 2202                                     (j * 4), ctx[j], ctx[j + 1], ctx[j + 2],
 2203                                     ctx[j + 3]);
 2204         }
 2205 error:
 2206         irdma_put_cqp_request(iwcqp, cqp_request);
 2207         irdma_free_dma_mem(dev->hw, &dma_mem);
 2208 
 2209         return ret;
 2210 }
 2211 
 2212 bool
 2213 irdma_cq_empty(struct irdma_cq *iwcq)
 2214 {
 2215         struct irdma_cq_uk *ukcq;
 2216         u64 qword3;
 2217         __le64 *cqe;
 2218         u8 polarity;
 2219 
 2220         ukcq = &iwcq->sc_cq.cq_uk;
 2221         cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
 2222         get_64bit_val(cqe, 24, &qword3);
 2223         polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
 2224 
 2225         return polarity != ukcq->polarity;
 2226 }
 2227 
 2228 void
 2229 irdma_remove_cmpls_list(struct irdma_cq *iwcq)
 2230 {
 2231         struct irdma_cmpl_gen *cmpl_node;
 2232         struct list_head *tmp_node, *list_node;
 2233 
 2234         list_for_each_safe(list_node, tmp_node, &iwcq->cmpl_generated) {
 2235                 cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
 2236                 list_del(&cmpl_node->list);
 2237                 kfree(cmpl_node);
 2238         }
 2239 }
 2240 
 2241 int
 2242 irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
 2243 {
 2244         struct irdma_cmpl_gen *cmpl;
 2245 
 2246         if (list_empty(&iwcq->cmpl_generated))
 2247                 return -ENOENT;
 2248         cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
 2249         list_del(&cmpl->list);
 2250         memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
 2251         kfree(cmpl);
 2252 
 2253         irdma_debug(iwcq->sc_cq.dev, IRDMA_DEBUG_VERBS,
 2254                     "%s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%lx\n",
 2255                     __func__, cq_poll_info->qp_id, cq_poll_info->op_type, cq_poll_info->wr_id);
 2256 
 2257         return 0;
 2258 }
 2259 
 2260 /**
 2261  * irdma_set_cpi_common_values - fill in values for polling info struct
 2262  * @cpi: resulting structure of cq_poll_info type
 2263  * @qp: QPair
 2264  * @qp_num: id of the QP
 2265  */
 2266 static void
 2267 irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
 2268                             struct irdma_qp_uk *qp, u32 qp_num)
 2269 {
 2270         cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
 2271         cpi->error = 1;
 2272         cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
 2273         cpi->minor_err = FLUSH_GENERAL_ERR;
 2274         cpi->qp_handle = (irdma_qp_handle) (uintptr_t)qp;
 2275         cpi->qp_id = qp_num;
 2276 }
 2277 
 2278 static inline void
 2279 irdma_comp_handler(struct irdma_cq *cq)
 2280 {
 2281         if (!cq->ibcq.comp_handler)
 2282                 return;
 2283 
 2284         if (atomic_cmpxchg(&cq->armed, 1, 0))
 2285                 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 2286 }
 2287 
 2288 /**
 2289  * irdma_generate_flush_completions - generate completion from WRs
 2290  * @iwqp: pointer to QP
 2291  */
 2292 void
 2293 irdma_generate_flush_completions(struct irdma_qp *iwqp)
 2294 {
 2295         struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
 2296         struct irdma_ring *sq_ring = &qp->sq_ring;
 2297         struct irdma_ring *rq_ring = &qp->rq_ring;
 2298         struct irdma_cmpl_gen *cmpl;
 2299         __le64 *sw_wqe;
 2300         u64 wqe_qword;
 2301         u32 wqe_idx;
 2302         u8 compl_generated = 0;
 2303         unsigned long flags;
 2304         bool reschedule = false;
 2305 
 2306 #define SQ_COMPL_GENERATED (0x01)
 2307 #define RQ_COMPL_GENERATED (0x02)
 2308 
 2309         spin_lock_irqsave(&iwqp->iwscq->lock, flags);
 2310         if (irdma_cq_empty(iwqp->iwscq)) {
 2311                 unsigned long flags2;
 2312 
 2313                 spin_lock_irqsave(&iwqp->lock, flags2);
 2314                 while (IRDMA_RING_MORE_WORK(*sq_ring)) {
 2315                         cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
 2316                         if (!cmpl) {
 2317                                 spin_unlock_irqrestore(&iwqp->lock, flags2);
 2318                                 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
 2319                                 return;
 2320                         }
 2321 
 2322                         wqe_idx = sq_ring->tail;
 2323                         irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
 2324 
 2325                         cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
 2326                         cmpl->cpi.signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
 2327                         sw_wqe = qp->sq_base[wqe_idx].elem;
 2328                         get_64bit_val(sw_wqe, IRDMA_BYTE_24, &wqe_qword);
 2329                         cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
 2330                         cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
 2331                         /* remove the SQ WR by moving SQ tail */
 2332                         IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
 2333 
 2334                         irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
 2335                                     "%s: adding wr_id = 0x%lx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id);
 2336                         list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
 2337                         compl_generated |= SQ_COMPL_GENERATED;
 2338                 }
 2339                 spin_unlock_irqrestore(&iwqp->lock, flags2);
 2340                 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
 2341         } else {
 2342                 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
 2343                 reschedule = true;
 2344         }
 2345 
 2346         spin_lock_irqsave(&iwqp->iwrcq->lock, flags);
 2347         if (irdma_cq_empty(iwqp->iwrcq)) {
 2348                 unsigned long flags2;
 2349 
 2350                 spin_lock_irqsave(&iwqp->lock, flags2);
 2351                 while (IRDMA_RING_MORE_WORK(*rq_ring)) {
 2352                         cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
 2353                         if (!cmpl) {
 2354                                 spin_unlock_irqrestore(&iwqp->lock, flags2);
 2355                                 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
 2356                                 return;
 2357                         }
 2358 
 2359                         wqe_idx = rq_ring->tail;
 2360                         irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
 2361 
 2362                         cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
 2363                         cmpl->cpi.signaled = 1;
 2364                         cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
 2365                         cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
 2366                         /* remove the RQ WR by moving RQ tail */
 2367                         IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
 2368                         irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
 2369                                     "%s: adding wr_id = 0x%lx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
 2370                                     __func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx);
 2371                         list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
 2372 
 2373                         compl_generated |= RQ_COMPL_GENERATED;
 2374                 }
 2375                 spin_unlock_irqrestore(&iwqp->lock, flags2);
 2376                 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
 2377         } else {
 2378                 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
 2379                 reschedule = true;
 2380         }
 2381 
 2382         if (reschedule)
 2383                 irdma_sched_qp_flush_work(iwqp);
 2384         if (compl_generated) {
 2385                 if (iwqp->iwscq == iwqp->iwrcq) {
 2386                         irdma_comp_handler(iwqp->iwscq);
 2387                 } else {
 2388                         if (compl_generated & SQ_COMPL_GENERATED)
 2389                                 irdma_comp_handler(iwqp->iwscq);
 2390                         if (compl_generated & RQ_COMPL_GENERATED)
 2391                                 irdma_comp_handler(iwqp->iwrcq);
 2392                 }
 2393                 irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_VERBS,
 2394                             "0x%X (SQ 0x1, RQ 0x2, both 0x3) completions generated for QP %d\n",
 2395                             compl_generated, iwqp->ibqp.qp_num);
 2396         }
 2397 }

Cache object: 89e1bc86bde6e862553e725af6e1160c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.