The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_spq.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2017-2018 Cavium, Inc. 
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 /*
   28  * File : ecore_spq.c
   29  */
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include "bcm_osal.h"
   34 #include "reg_addr.h"
   35 #include "ecore_gtt_reg_addr.h"
   36 #include "ecore_hsi_common.h"
   37 #include "ecore.h"
   38 #include "ecore_sp_api.h"
   39 #include "ecore_spq.h"
   40 #include "ecore_iro.h"
   41 #include "ecore_init_fw_funcs.h"
   42 #include "ecore_cxt.h"
   43 #include "ecore_int.h"
   44 #include "ecore_dev_api.h"
   45 #include "ecore_mcp.h"
   46 #ifdef CONFIG_ECORE_RDMA
   47 #include "ecore_rdma.h"
   48 #endif
   49 #include "ecore_hw.h"
   50 #include "ecore_sriov.h"
   51 #ifdef CONFIG_ECORE_ISCSI
   52 #include "ecore_iscsi.h"
   53 #include "ecore_ooo.h"
   54 #endif
   55 
   56 #ifdef _NTDDK_
   57 #pragma warning(push)
   58 #pragma warning(disable : 28167)
   59 #pragma warning(disable : 28123)
   60 #endif
   61 
   62 /***************************************************************************
   63  * Structures & Definitions
   64  ***************************************************************************/
   65 
   66 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
   67 
   68 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
   69 #define SPQ_BLOCK_DELAY_US              (10)
   70 #define SPQ_BLOCK_SLEEP_MAX_ITER        (200)
   71 #define SPQ_BLOCK_SLEEP_MS              (5)
   72 
   73 #ifndef REMOVE_DBG
   74 /***************************************************************************
   75  * Debug [iSCSI] tool
   76  ***************************************************************************/
   77 static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
   78                                 struct event_ring_entry *p_eqe)
   79 {
   80         if (p_eqe->opcode >= MAX_ISCSI_EQE_OPCODE) {
   81                 DP_NOTICE(p_hwfn, false, "Unknown iSCSI EQ: %x\n",
   82                           p_eqe->opcode);
   83         }
   84 
   85         switch (p_eqe->opcode) {
   86         case ISCSI_EVENT_TYPE_INIT_FUNC:
   87         case ISCSI_EVENT_TYPE_DESTROY_FUNC:
   88                 /* NOPE */
   89                 break;
   90         case ISCSI_EVENT_TYPE_OFFLOAD_CONN:
   91         case ISCSI_EVENT_TYPE_TERMINATE_CONN:
   92                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
   93                            "iSCSI EQE: Port %x, Op %x, echo %x, FWret %x, CID %x, ConnID %x, ERR %x\n",
   94                            p_hwfn->port_id, p_eqe->opcode,
   95                            OSAL_LE16_TO_CPU(p_eqe->echo),
   96                            p_eqe->fw_return_code,
   97                            OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.icid),
   98                            OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
   99                            p_eqe->data.iscsi_info.error_code);
  100                 break;
  101         case ISCSI_EVENT_TYPE_UPDATE_CONN:
  102         case ISCSI_EVENT_TYPE_CLEAR_SQ:
  103         case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
  104         case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
  105         case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
  106         case ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD:
  107         case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
  108         case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
  109         case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
  110         case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
  111         case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
  112         case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
  113         case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
  114         default:
  115                 /* NOPE */
  116                 break;
  117         }
  118 }
  119 #endif
  120 
  121 /***************************************************************************
  122  * Blocking Imp. (BLOCK/EBLOCK mode)
  123  ***************************************************************************/
  124 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
  125                                   union event_ring_data OSAL_UNUSED *data,
  126                                   u8 fw_return_code)
  127 {
  128         struct ecore_spq_comp_done *comp_done;
  129 
  130         comp_done = (struct ecore_spq_comp_done *)cookie;
  131 
  132         comp_done->done = 0x1;
  133         comp_done->fw_return_code = fw_return_code;
  134 
  135         /* make update visible to waiting thread */
  136         OSAL_SMP_WMB(p_hwfn->p_dev);
  137 }
  138 
  139 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
  140                                               struct ecore_spq_entry *p_ent,
  141                                               u8 *p_fw_ret,
  142                                               bool sleep_between_iter)
  143 {
  144         struct ecore_spq_comp_done *comp_done;
  145         u32 iter_cnt;
  146 
  147         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
  148         iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
  149                                       : SPQ_BLOCK_DELAY_MAX_ITER;
  150 #ifndef ASIC_ONLY
  151         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
  152                 iter_cnt *= 5;
  153 #endif
  154 
  155         while (iter_cnt--) {
  156                 OSAL_POLL_MODE_DPC(p_hwfn);
  157                 OSAL_SMP_RMB(p_hwfn->p_dev);
  158                 if (comp_done->done == 1) {
  159                         if (p_fw_ret)
  160                                 *p_fw_ret = comp_done->fw_return_code;
  161                         return ECORE_SUCCESS;
  162                 }
  163 
  164                 if (sleep_between_iter) {
  165                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
  166                 } else {
  167                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
  168                 }
  169         }
  170 
  171         return ECORE_TIMEOUT;
  172 }
  173 
  174 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
  175                                             struct ecore_spq_entry *p_ent,
  176                                             u8 *p_fw_ret, bool skip_quick_poll)
  177 {
  178         struct ecore_spq_comp_done *comp_done;
  179         struct ecore_ptt *p_ptt;
  180         enum _ecore_status_t rc;
  181 
  182         /* A relatively short polling period w/o sleeping, to allow the FW to
  183          * complete the ramrod and thus possibly to avoid the following sleeps.
  184          */
  185         if (!skip_quick_poll) {
  186                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
  187                 if (rc == ECORE_SUCCESS)
  188                         return ECORE_SUCCESS;
  189         }
  190 
  191         /* Move to polling with a sleeping period between iterations */
  192         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
  193         if (rc == ECORE_SUCCESS)
  194                 return ECORE_SUCCESS;
  195 
  196         p_ptt = ecore_ptt_acquire(p_hwfn);
  197         if (!p_ptt)
  198                 return ECORE_AGAIN;
  199 
  200         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
  201         rc = ecore_mcp_drain(p_hwfn, p_ptt);
  202         ecore_ptt_release(p_hwfn, p_ptt);
  203         if (rc != ECORE_SUCCESS) {
  204                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
  205                 goto err;
  206         }
  207 
  208         /* Retry after drain */
  209         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
  210         if (rc == ECORE_SUCCESS)
  211                 return ECORE_SUCCESS;
  212 
  213         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
  214         if (comp_done->done == 1) {
  215                 if (p_fw_ret)
  216                         *p_fw_ret = comp_done->fw_return_code;
  217                 return ECORE_SUCCESS;
  218         }
  219 err:
  220         DP_NOTICE(p_hwfn, true,
  221                   "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
  222                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
  223                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
  224                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
  225 
  226         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
  227 
  228         return ECORE_BUSY;
  229 }
  230 
  231 /***************************************************************************
  232  * SPQ entries inner API
  233  ***************************************************************************/
  234 static enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
  235                                                  struct ecore_spq_entry *p_ent)
  236 {
  237         p_ent->flags = 0;
  238 
  239         switch (p_ent->comp_mode) {
  240         case ECORE_SPQ_MODE_EBLOCK:
  241         case ECORE_SPQ_MODE_BLOCK:
  242                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
  243                 break;
  244         case ECORE_SPQ_MODE_CB:
  245                 break;
  246         default:
  247                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
  248                           p_ent->comp_mode);
  249                 return ECORE_INVAL;
  250         }
  251 
  252         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
  253                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
  254                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
  255                    p_ent->elem.hdr.protocol_id,
  256                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
  257                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
  258                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
  259                            "MODE_CB"));
  260 
  261         return ECORE_SUCCESS;
  262 }
  263 
  264 /***************************************************************************
  265  * HSI access
  266  ***************************************************************************/
  267 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
  268                                     struct ecore_spq  *p_spq)
  269 {
  270         struct e4_core_conn_context *p_cxt;
  271         struct ecore_cxt_info cxt_info;
  272         u16 physical_q;
  273         enum _ecore_status_t rc;
  274 
  275         cxt_info.iid = p_spq->cid;
  276 
  277         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
  278 
  279         if (rc < 0) {
  280                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
  281                           p_spq->cid);
  282                 return;
  283         }
  284 
  285         p_cxt = cxt_info.p_cxt;
  286 
  287         /* @@@TBD we zero the context until we have ilt_reset implemented. */
  288         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
  289 
  290         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
  291                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
  292                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
  293                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
  294                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
  295                 /*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
  296                           E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);*/
  297                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
  298                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
  299         } else { /* E5 */
  300                 ECORE_E5_MISSING_CODE;
  301         }
  302 
  303         /* CDU validation - FIXME currently disabled */
  304 
  305         /* QM physical queue */
  306         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
  307         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
  308 
  309         p_cxt->xstorm_st_context.spq_base_lo =
  310                 DMA_LO_LE(p_spq->chain.p_phys_addr);
  311         p_cxt->xstorm_st_context.spq_base_hi =
  312                 DMA_HI_LE(p_spq->chain.p_phys_addr);
  313 
  314         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
  315                        p_hwfn->p_consq->chain.p_phys_addr);
  316 }
  317 
  318 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn         *p_hwfn,
  319                                               struct ecore_spq          *p_spq,
  320                                               struct ecore_spq_entry    *p_ent)
  321 {
  322         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
  323         struct core_db_data *p_db_data = &p_spq->db_data;
  324         u16 echo = ecore_chain_get_prod_idx(p_chain);
  325         struct slow_path_element *elem;
  326 
  327         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
  328         elem = ecore_chain_produce(p_chain);
  329         if (!elem) {
  330                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
  331                 return ECORE_INVAL;
  332         }
  333 
  334         *elem = p_ent->elem; /* Struct assignment */
  335 
  336         p_db_data->spq_prod =
  337                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
  338 
  339         /* Make sure the SPQE is updated before the doorbell */
  340         OSAL_WMB(p_hwfn->p_dev);
  341 
  342         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
  343 
  344         /* Make sure doorbell was rung */
  345         OSAL_WMB(p_hwfn->p_dev);
  346 
  347         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
  348                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
  349                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
  350                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
  351 
  352         return ECORE_SUCCESS;
  353 }
  354 
  355 /***************************************************************************
  356  * Asynchronous events
  357  ***************************************************************************/
  358 
  359 static enum _ecore_status_t
  360 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
  361                              struct event_ring_entry *p_eqe)
  362 {
  363         ecore_spq_async_comp_cb cb;
  364 
  365         if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) {
  366                 return ECORE_INVAL;
  367         }
  368 
  369         cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
  370         if (cb) {
  371                 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
  372                           &p_eqe->data, p_eqe->fw_return_code);
  373         } else {
  374                 DP_NOTICE(p_hwfn,
  375                           true, "Unknown Async completion for protocol: %d\n",
  376                           p_eqe->protocol_id);
  377                 return ECORE_INVAL;
  378         }
  379 }
  380 
  381 enum _ecore_status_t
  382 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
  383                             enum protocol_type protocol_id,
  384                             ecore_spq_async_comp_cb cb)
  385 {
  386         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
  387                 return ECORE_INVAL;
  388         }
  389 
  390         p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
  391         return ECORE_SUCCESS;
  392 }
  393 
  394 void
  395 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
  396                               enum protocol_type protocol_id)
  397 {
  398         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
  399                 return;
  400         }
  401 
  402         p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
  403 }
  404 
  405 /***************************************************************************
  406  * EQ API
  407  ***************************************************************************/
  408 void ecore_eq_prod_update(struct ecore_hwfn     *p_hwfn,
  409                           u16                   prod)
  410 {
  411         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
  412                 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
  413 
  414         REG_WR16(p_hwfn, addr, prod);
  415 
  416         /* keep prod updates ordered */
  417         OSAL_MMIOWB(p_hwfn->p_dev);
  418 }
  419 
  420 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn      *p_hwfn,
  421                                          void                   *cookie)
  422 
  423 {
  424         struct ecore_eq    *p_eq    = cookie;
  425         struct ecore_chain *p_chain = &p_eq->chain;
  426         enum _ecore_status_t rc = 0;
  427 
  428         /* take a snapshot of the FW consumer */
  429         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
  430 
  431         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
  432 
  433         /* Need to guarantee the fw_cons index we use points to a usuable
  434          * element (to comply with our chain), so our macros would comply
  435          */
  436         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
  437             ecore_chain_get_usable_per_page(p_chain)) {
  438                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
  439         }
  440 
  441         /* Complete current segment of eq entries */
  442         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
  443                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
  444                 if (!p_eqe) {
  445                         rc = ECORE_INVAL;
  446                         break;
  447                 }
  448 
  449                 DP_VERBOSE(p_hwfn,
  450                            ECORE_MSG_SPQ,
  451                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
  452                            p_eqe->opcode,            /* Event Opcode */
  453                            p_eqe->protocol_id,       /* Event Protocol ID */
  454                            p_eqe->reserved0,         /* Reserved */
  455                            OSAL_LE16_TO_CPU(p_eqe->echo),/* Echo value from
  456                                                         ramrod data on the host
  457                                                       */
  458                            p_eqe->fw_return_code,    /* FW return code for SP
  459                                                         ramrods
  460                                                       */
  461                            p_eqe->flags);
  462 #ifndef REMOVE_DBG
  463                 if (p_eqe->protocol_id == PROTOCOLID_ISCSI)
  464                         ecore_iscsi_eq_dump(p_hwfn, p_eqe);
  465 #endif
  466 
  467                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
  468                         if (ecore_async_event_completion(p_hwfn, p_eqe))
  469                                 rc = ECORE_INVAL;
  470                 } else if (ecore_spq_completion(p_hwfn,
  471                                                 p_eqe->echo,
  472                                                 p_eqe->fw_return_code,
  473                                                 &p_eqe->data)) {
  474                         rc = ECORE_INVAL;
  475                 }
  476 
  477                 ecore_chain_recycle_consumed(p_chain);
  478         }
  479 
  480         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
  481 
  482         /* Attempt to post pending requests */
  483         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
  484         rc = ecore_spq_pend_post(p_hwfn);
  485         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
  486 
  487         return rc;
  488 }
  489 
  490 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
  491 {
  492         struct ecore_eq *p_eq;
  493 
  494         /* Allocate EQ struct */
  495         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
  496         if (!p_eq) {
  497                 DP_NOTICE(p_hwfn, false,
  498                           "Failed to allocate `struct ecore_eq'\n");
  499                 return ECORE_NOMEM;
  500         }
  501 
  502         /* Allocate and initialize EQ chain*/
  503         if (ecore_chain_alloc(p_hwfn->p_dev,
  504                               ECORE_CHAIN_USE_TO_PRODUCE,
  505                               ECORE_CHAIN_MODE_PBL,
  506                               ECORE_CHAIN_CNT_TYPE_U16,
  507                               num_elem,
  508                               sizeof(union event_ring_element),
  509                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
  510                 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
  511                 goto eq_allocate_fail;
  512         }
  513 
  514         /* register EQ completion on the SP SB */
  515         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
  516                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
  517 
  518         p_hwfn->p_eq = p_eq;
  519         return ECORE_SUCCESS;
  520 
  521 eq_allocate_fail:
  522         OSAL_FREE(p_hwfn->p_dev, p_eq);
  523         return ECORE_NOMEM;
  524 }
  525 
  526 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
  527 {
  528         ecore_chain_reset(&p_hwfn->p_eq->chain);
  529 }
  530 
  531 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
  532 {
  533         if (!p_hwfn->p_eq)
  534                 return;
  535 
  536         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
  537 
  538         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
  539         p_hwfn->p_eq = OSAL_NULL;
  540 }
  541 
  542 /***************************************************************************
  543 * CQE API - manipulate EQ functionallity
  544 ***************************************************************************/
  545 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
  546                                                  struct eth_slow_path_rx_cqe *cqe,
  547                                                  enum protocol_type protocol)
  548 {
  549         if (IS_VF(p_hwfn->p_dev))
  550                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
  551 
  552         /* @@@tmp - it's possible we'll eventually want to handle some
  553          * actual commands that can arrive here, but for now this is only
  554          * used to complete the ramrod using the echo value on the cqe
  555          */
  556         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
  557 }
  558 
  559 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
  560                                               struct eth_slow_path_rx_cqe *cqe)
  561 {
  562         enum _ecore_status_t rc;
  563 
  564         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
  565         if (rc) {
  566                 DP_NOTICE(p_hwfn, true,
  567                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
  568                           cqe->ramrod_cmd_id);
  569         }
  570 
  571         return rc;
  572 }
  573 
  574 /***************************************************************************
  575  * Slow hwfn Queue (spq)
  576  ***************************************************************************/
  577 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
  578 {
  579         struct ecore_spq *p_spq = p_hwfn->p_spq;
  580         struct ecore_spq_entry *p_virt = OSAL_NULL;
  581         struct core_db_data *p_db_data;
  582         void OSAL_IOMEM *db_addr;
  583         dma_addr_t p_phys = 0;
  584         u32 i, capacity;
  585         enum _ecore_status_t rc;
  586 
  587         OSAL_LIST_INIT(&p_spq->pending);
  588         OSAL_LIST_INIT(&p_spq->completion_pending);
  589         OSAL_LIST_INIT(&p_spq->free_pool);
  590         OSAL_LIST_INIT(&p_spq->unlimited_pending);
  591         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
  592 
  593         /* SPQ empty pool */
  594         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
  595         p_virt = p_spq->p_virt;
  596 
  597         capacity = ecore_chain_get_capacity(&p_spq->chain);
  598         for (i = 0; i < capacity; i++) {
  599                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
  600 
  601                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
  602 
  603                 p_virt++;
  604                 p_phys += sizeof(struct ecore_spq_entry);
  605         }
  606 
  607         /* Statistics */
  608         p_spq->normal_count             = 0;
  609         p_spq->comp_count               = 0;
  610         p_spq->comp_sent_count          = 0;
  611         p_spq->unlimited_pending_count  = 0;
  612 
  613         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
  614                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
  615         p_spq->comp_bitmap_idx = 0;
  616 
  617         /* SPQ cid, cannot fail */
  618         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
  619         ecore_spq_hw_initialize(p_hwfn, p_spq);
  620 
  621         /* reset the chain itself */
  622         ecore_chain_reset(&p_spq->chain);
  623 
  624         /* Initialize the address/data of the SPQ doorbell */
  625         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
  626         p_db_data = &p_spq->db_data;
  627         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
  628         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
  629         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
  630         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
  631                   DQ_XCM_CORE_SPQ_PROD_CMD);
  632         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
  633 
  634         /* Register the SPQ doorbell with the doorbell recovery mechanism */
  635         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
  636         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
  637                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
  638         if (rc != ECORE_SUCCESS)
  639                 DP_INFO(p_hwfn,
  640                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
  641 }
  642 
  643 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
  644 {
  645         struct ecore_spq_entry *p_virt = OSAL_NULL;
  646         struct ecore_spq *p_spq = OSAL_NULL;
  647         dma_addr_t p_phys = 0;
  648         u32 capacity;
  649 
  650         /* SPQ struct */
  651         p_spq =
  652             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
  653         if (!p_spq) {
  654                 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
  655                 return ECORE_NOMEM;
  656         }
  657 
  658         /* SPQ ring  */
  659         if (ecore_chain_alloc(p_hwfn->p_dev,
  660                               ECORE_CHAIN_USE_TO_PRODUCE,
  661                               ECORE_CHAIN_MODE_SINGLE,
  662                               ECORE_CHAIN_CNT_TYPE_U16,
  663                               0, /* N/A when the mode is SINGLE */
  664                               sizeof(struct slow_path_element),
  665                               &p_spq->chain, OSAL_NULL)) {
  666                 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
  667                 goto spq_allocate_fail;
  668         }
  669 
  670         /* allocate and fill the SPQ elements (incl. ramrod data list) */
  671         capacity = ecore_chain_get_capacity(&p_spq->chain);
  672         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
  673                                          capacity *
  674                                          sizeof(struct ecore_spq_entry));
  675         if (!p_virt) {
  676                 goto spq_allocate_fail;
  677         }
  678 
  679         p_spq->p_virt = p_virt;
  680         p_spq->p_phys = p_phys;
  681 
  682 #ifdef CONFIG_ECORE_LOCK_ALLOC
  683         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
  684                 goto spq_allocate_fail;
  685 #endif
  686 
  687         p_hwfn->p_spq = p_spq;
  688         return ECORE_SUCCESS;
  689 
  690 spq_allocate_fail:
  691         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
  692         OSAL_FREE(p_hwfn->p_dev, p_spq);
  693         return ECORE_NOMEM;
  694 }
  695 
  696 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
  697 {
  698         struct ecore_spq *p_spq = p_hwfn->p_spq;
  699         void OSAL_IOMEM *db_addr;
  700         u32 capacity;
  701 
  702         if (!p_spq)
  703                 return;
  704 
  705         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
  706         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
  707         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
  708 
  709         if (p_spq->p_virt) {
  710                 capacity = ecore_chain_get_capacity(&p_spq->chain);
  711                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
  712                                        p_spq->p_virt,
  713                                        p_spq->p_phys,
  714                                        capacity *
  715                                        sizeof(struct ecore_spq_entry));
  716         }
  717 
  718         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
  719 #ifdef CONFIG_ECORE_LOCK_ALLOC
  720         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
  721 #endif
  722 
  723         OSAL_FREE(p_hwfn->p_dev, p_spq);
  724         p_hwfn->p_spq = OSAL_NULL;
  725 }
  726 
  727 enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
  728                                          struct ecore_spq_entry **pp_ent)
  729 {
  730         struct ecore_spq *p_spq = p_hwfn->p_spq;
  731         struct ecore_spq_entry *p_ent = OSAL_NULL;
  732         enum _ecore_status_t rc = ECORE_SUCCESS;
  733 
  734         OSAL_SPIN_LOCK(&p_spq->lock);
  735 
  736         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
  737                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
  738                 if (!p_ent) {
  739                         DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
  740                         rc = ECORE_NOMEM;
  741                         goto out_unlock;
  742                 }
  743                 p_ent->queue = &p_spq->unlimited_pending;
  744         } else {
  745                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
  746                                               struct ecore_spq_entry,
  747                                               list);
  748                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
  749                 p_ent->queue = &p_spq->pending;
  750         }
  751 
  752         *pp_ent = p_ent;
  753 
  754 out_unlock:
  755         OSAL_SPIN_UNLOCK(&p_spq->lock);
  756         return rc;
  757 }
  758 
  759 /* Locked variant; Should be called while the SPQ lock is taken */
  760 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
  761                               struct ecore_spq_entry *p_ent)
  762 {
  763         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
  764 }
  765 
  766 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
  767                             struct ecore_spq_entry *p_ent)
  768 {
  769         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
  770         __ecore_spq_return_entry(p_hwfn, p_ent);
  771         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
  772 }
  773 
  774 /**
  775  * @brief ecore_spq_add_entry - adds a new entry to the pending
  776  *        list. Should be used while lock is being held.
  777  *
  778  * Addes an entry to the pending list is there is room (en empty
  779  * element is avaliable in the free_pool), or else places the
  780  * entry in the unlimited_pending pool.
  781  *
  782  * @param p_hwfn
  783  * @param p_ent
  784  * @param priority
  785  *
  786  * @return enum _ecore_status_t
  787  */
  788 static enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
  789                                                 struct ecore_spq_entry *p_ent,
  790                                                 enum spq_priority priority)
  791 {
  792         struct ecore_spq        *p_spq  = p_hwfn->p_spq;
  793 
  794         if (p_ent->queue == &p_spq->unlimited_pending) {
  795                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
  796                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
  797                                             &p_spq->unlimited_pending);
  798                         p_spq->unlimited_pending_count++;
  799 
  800                         return ECORE_SUCCESS;
  801 
  802                 } else {
  803                         struct ecore_spq_entry *p_en2;
  804 
  805                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
  806                                                      struct ecore_spq_entry,
  807                                                      list);
  808                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
  809 
  810                         /* Copy the ring element physical pointer to the new
  811                          * entry, since we are about to override the entire ring
  812                          * entry and don't want to lose the pointer.
  813                          */
  814                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
  815 
  816                         *p_en2 = *p_ent;
  817 
  818                         /* EBLOCK responsible to free the allocated p_ent */
  819                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
  820                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
  821 
  822                         p_ent = p_en2;
  823                 }
  824         }
  825 
  826         /* entry is to be placed in 'pending' queue */
  827         switch (priority) {
  828         case ECORE_SPQ_PRIORITY_NORMAL:
  829                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
  830                 p_spq->normal_count++;
  831                 break;
  832         case ECORE_SPQ_PRIORITY_HIGH:
  833                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
  834                 p_spq->high_count++;
  835                 break;
  836         default:
  837                 return ECORE_INVAL;
  838         }
  839 
  840         return ECORE_SUCCESS;
  841 }
  842 
  843 /***************************************************************************
  844  * Accessor
  845  ***************************************************************************/
  846 
  847 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
  848 {
  849         if (!p_hwfn->p_spq) {
  850                 return 0xffffffff;      /* illegal */
  851         }
  852         return p_hwfn->p_spq->cid;
  853 }
  854 
  855 /***************************************************************************
  856  * Posting new Ramrods
  857  ***************************************************************************/
  858 
  859 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
  860                                                 osal_list_t       *head,
  861                                                 u32               keep_reserve)
  862 {
  863         struct ecore_spq        *p_spq = p_hwfn->p_spq;
  864         enum _ecore_status_t    rc;
  865 
  866         /* TODO - implementation might be wasteful; will always keep room
  867          * for an additional high priority ramrod (even if one is already
  868          * pending FW)
  869          */
  870         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
  871                !OSAL_LIST_IS_EMPTY(head)) {
  872                 struct ecore_spq_entry  *p_ent =
  873                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
  874                 if (p_ent != OSAL_NULL) {
  875 #if defined(_NTDDK_)
  876 #pragma warning(suppress : 6011 28182)
  877 #endif
  878                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
  879                         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
  880                         p_spq->comp_sent_count++;
  881 
  882                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
  883                         if (rc) {
  884                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
  885                                                                         &p_spq->completion_pending);
  886                                 __ecore_spq_return_entry(p_hwfn, p_ent);
  887                                 return rc;
  888                         }
  889                 }
  890         }
  891 
  892         return ECORE_SUCCESS;
  893 }
  894 
  895 enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
  896 {
  897         struct ecore_spq *p_spq = p_hwfn->p_spq;
  898         struct ecore_spq_entry *p_ent = OSAL_NULL;
  899 
  900         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool))
  901         {
  902                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
  903                         break;
  904 
  905                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
  906                                               struct ecore_spq_entry,
  907                                               list);
  908                 if (!p_ent)
  909                         return ECORE_INVAL;
  910 
  911 #if defined(_NTDDK_)
  912 #pragma warning(suppress : 6011)
  913 #endif
  914                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
  915 
  916                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
  917         }
  918 
  919         return ecore_spq_post_list(p_hwfn, &p_spq->pending,
  920                                    SPQ_HIGH_PRI_RESERVE_DEFAULT);
  921 }
  922 
  923 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn           *p_hwfn,
  924                                     struct ecore_spq_entry      *p_ent,
  925                                     u8                          *fw_return_code)
  926 {
  927         enum _ecore_status_t    rc = ECORE_SUCCESS;
  928         struct ecore_spq        *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
  929         bool                    b_ret_ent = true;
  930 
  931         if (!p_hwfn)
  932                 return ECORE_INVAL;
  933 
  934         if (!p_ent) {
  935                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
  936                 return ECORE_INVAL;
  937         }
  938 
  939         if (p_hwfn->p_dev->recov_in_prog) {
  940                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
  941                            "Recovery is in progress -> skip spq post [cmd %02x protocol %02x]\n",
  942                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
  943                 /* Return success to let the flows to be completed successfully
  944                  * w/o any error handling.
  945                  */
  946                 return ECORE_SUCCESS;
  947         }
  948 
  949         OSAL_SPIN_LOCK(&p_spq->lock);
  950 
  951         /* Complete the entry */
  952         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
  953 
  954         /* Check return value after LOCK is taken for cleaner error flow */
  955         if (rc)
  956                 goto spq_post_fail;
  957 
  958         /* Add the request to the pending queue */
  959         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
  960         if (rc)
  961                 goto spq_post_fail;
  962 
  963         rc = ecore_spq_pend_post(p_hwfn);
  964         if (rc) {
  965                 /* Since it's possible that pending failed for a different
  966                  * entry [although unlikely], the failed entry was already
  967                  * dealt with; No need to return it here.
  968                  */
  969                 b_ret_ent = false;
  970                 goto spq_post_fail;
  971         }
  972 
  973         OSAL_SPIN_UNLOCK(&p_spq->lock);
  974 
  975         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
  976                 /* For entries in ECORE BLOCK mode, the completion code cannot
  977                  * perform the necessary cleanup - if it did, we couldn't
  978                  * access p_ent here to see whether it's successful or not.
  979                  * Thus, after gaining the answer perform the cleanup here.
  980                  */
  981                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
  982                                      p_ent->queue == &p_spq->unlimited_pending);
  983 
  984                 if (p_ent->queue == &p_spq->unlimited_pending) {
  985                         /* This is an allocated p_ent which does not need to
  986                          * return to pool.
  987                          */
  988                         OSAL_FREE(p_hwfn->p_dev, p_ent);
  989 
  990                         /* TBD: handle error flow and remove p_ent from
  991                          * completion pending
  992                          */
  993                         return rc;
  994                 }
  995 
  996                 if (rc)
  997                         goto spq_post_fail2;
  998 
  999                 /* return to pool */
 1000                 ecore_spq_return_entry(p_hwfn, p_ent);
 1001         }
 1002         return rc;
 1003 
 1004 spq_post_fail2:
 1005         OSAL_SPIN_LOCK(&p_spq->lock);
 1006         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
 1007         ecore_chain_return_produced(&p_spq->chain);
 1008 
 1009 spq_post_fail:
 1010         /* return to the free pool */
 1011         if (b_ret_ent)
 1012                 __ecore_spq_return_entry(p_hwfn, p_ent);
 1013         OSAL_SPIN_UNLOCK(&p_spq->lock);
 1014 
 1015         return rc;
 1016 }
 1017 
 1018 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
 1019                                           __le16 echo,
 1020                                           u8 fw_return_code,
 1021                                           union event_ring_data *p_data)
 1022 {
 1023         struct ecore_spq        *p_spq;
 1024         struct ecore_spq_entry  *p_ent = OSAL_NULL;
 1025         struct ecore_spq_entry  *tmp;
 1026         struct ecore_spq_entry  *found = OSAL_NULL;
 1027 
 1028         if (!p_hwfn) {
 1029                 return ECORE_INVAL;
 1030         }
 1031 
 1032         p_spq = p_hwfn->p_spq;
 1033         if (!p_spq) {
 1034                 return ECORE_INVAL;
 1035         }
 1036 
 1037         OSAL_SPIN_LOCK(&p_spq->lock);
 1038         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
 1039                                       tmp,
 1040                                       &p_spq->completion_pending,
 1041                                       list,
 1042                                       struct ecore_spq_entry) {
 1043                 if (p_ent->elem.hdr.echo == echo) {
 1044                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
 1045                                                &p_spq->completion_pending);
 1046 
 1047                         /* Avoid overriding of SPQ entries when getting
 1048                          * out-of-order completions, by marking the completions
 1049                          * in a bitmap and increasing the chain consumer only
 1050                          * for the first successive completed entries.
 1051                          */
 1052                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
 1053                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
 1054                                                       p_spq->comp_bitmap_idx)) {
 1055                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
 1056                                                         p_spq->comp_bitmap_idx);
 1057                                 p_spq->comp_bitmap_idx++;
 1058                                 ecore_chain_return_produced(&p_spq->chain);
 1059                         }
 1060 
 1061                         p_spq->comp_count++;
 1062                         found = p_ent;
 1063                         break;
 1064                 }
 1065 
 1066                 /* This is debug and should be relatively uncommon - depends
 1067                  * on scenarios which have mutliple per-PF sent ramrods.
 1068                  */
 1069                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
 1070                            "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
 1071                            OSAL_LE16_TO_CPU(echo),
 1072                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
 1073         }
 1074 
 1075         /* Release lock before callback, as callback may post
 1076          * an additional ramrod.
 1077          */
 1078         OSAL_SPIN_UNLOCK(&p_spq->lock);
 1079 
 1080         if (!found) {
 1081                 DP_NOTICE(p_hwfn, true,
 1082                           "Failed to find an entry this EQE [echo %04x] completes\n",
 1083                           OSAL_LE16_TO_CPU(echo));
 1084                 return ECORE_EXISTS;
 1085         }
 1086 
 1087         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
 1088                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
 1089                    OSAL_LE16_TO_CPU(echo),
 1090                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 1091         if (found->comp_cb.function)
 1092                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 1093                                         fw_return_code);
 1094         else
 1095                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Got a completion without a callback function\n");
 1096 
 1097         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
 1098             (found->queue == &p_spq->unlimited_pending))
 1099                 /* EBLOCK  is responsible for returning its own entry into the
 1100                  * free list, unless it originally added the entry into the
 1101                  * unlimited pending list.
 1102                  */
 1103                 ecore_spq_return_entry(p_hwfn, found);
 1104 
 1105         return ECORE_SUCCESS;
 1106 }
 1107 
 1108 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
 1109 {
 1110         struct ecore_consq *p_consq;
 1111 
 1112         /* Allocate ConsQ struct */
 1113         p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
 1114         if (!p_consq) {
 1115                 DP_NOTICE(p_hwfn, false,
 1116                           "Failed to allocate `struct ecore_consq'\n");
 1117                 return ECORE_NOMEM;
 1118         }
 1119 
 1120         /* Allocate and initialize EQ chain*/
 1121         if (ecore_chain_alloc(p_hwfn->p_dev,
 1122                               ECORE_CHAIN_USE_TO_PRODUCE,
 1123                               ECORE_CHAIN_MODE_PBL,
 1124                               ECORE_CHAIN_CNT_TYPE_U16,
 1125                               ECORE_CHAIN_PAGE_SIZE/0x80,
 1126                               0x80,
 1127                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
 1128                 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
 1129                 goto consq_allocate_fail;
 1130         }
 1131 
 1132         p_hwfn->p_consq = p_consq;
 1133         return ECORE_SUCCESS;
 1134 
 1135 consq_allocate_fail:
 1136         OSAL_FREE(p_hwfn->p_dev, p_consq);
 1137         return ECORE_NOMEM;
 1138 }
 1139 
 1140 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
 1141 {
 1142         ecore_chain_reset(&p_hwfn->p_consq->chain);
 1143 }
 1144 
 1145 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
 1146 {
 1147         if (!p_hwfn->p_consq)
 1148                 return;
 1149 
 1150         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
 1151 
 1152         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
 1153         p_hwfn->p_consq = OSAL_NULL;
 1154 }
 1155 
 1156 #ifdef _NTDDK_
 1157 #pragma warning(pop)
 1158 #endif

Cache object: 552d4eca5fc23ea3e3b5c335d2f3e394


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.