The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_dev.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2017-2018 Cavium, Inc. 
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * File : ecore_dev.c
   30  */
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include "bcm_osal.h"
   35 #include "reg_addr.h"
   36 #include "ecore_gtt_reg_addr.h"
   37 #include "ecore.h"
   38 #include "ecore_chain.h"
   39 #include "ecore_status.h"
   40 #include "ecore_hw.h"
   41 #include "ecore_rt_defs.h"
   42 #include "ecore_init_ops.h"
   43 #include "ecore_int.h"
   44 #include "ecore_cxt.h"
   45 #include "ecore_spq.h"
   46 #include "ecore_init_fw_funcs.h"
   47 #include "ecore_sp_commands.h"
   48 #include "ecore_dev_api.h"
   49 #include "ecore_sriov.h"
   50 #include "ecore_vf.h"
   51 #include "ecore_ll2.h"
   52 #include "ecore_fcoe.h"
   53 #include "ecore_iscsi.h"
   54 #include "ecore_ooo.h"
   55 #include "ecore_mcp.h"
   56 #include "ecore_hw_defs.h"
   57 #include "mcp_public.h"
   58 #include "ecore_rdma.h"
   59 #include "ecore_iro.h"
   60 #include "nvm_cfg.h"
   61 #include "ecore_dev_api.h"
   62 #include "ecore_dcbx.h"
   63 #include "pcics_reg_driver.h"
   64 #include "ecore_l2.h"
   65 #ifndef LINUX_REMOVE
   66 #include "ecore_tcp_ip.h"
   67 #endif
   68 
   69 #ifdef _NTDDK_
   70 #pragma warning(push)
   71 #pragma warning(disable : 28167)
   72 #pragma warning(disable : 28123)
   73 #endif
   74 
   75 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
   76  * registers involved are not split and thus configuration is a race where
   77  * some of the PFs configuration might be lost.
   78  * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
   79  * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
   80  * there's more than a single compiled ecore component in system].
   81  */
   82 static osal_spinlock_t qm_lock;
   83 static u32 qm_lock_ref_cnt;
   84 
   85 void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_page_size)
   86 {
   87         p_dev->ilt_page_size = ilt_page_size;
   88 }
   89 
   90 /******************** Doorbell Recovery *******************/
   91 /* The doorbell recovery mechanism consists of a list of entries which represent
   92  * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
   93  * entity needs to register with the mechanism and provide the parameters
   94  * describing it's doorbell, including a location where last used doorbell data
   95  * can be found. The doorbell execute function will traverse the list and
   96  * doorbell all of the registered entries.
   97  */
   98 struct ecore_db_recovery_entry {
   99         osal_list_entry_t       list_entry;
  100         void OSAL_IOMEM         *db_addr;
  101         void                    *db_data;
  102         enum ecore_db_rec_width db_width;
  103         enum ecore_db_rec_space db_space;
  104         u8                      hwfn_idx;
  105 };
  106 
  107 /* display a single doorbell recovery entry */
  108 static void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
  109                                 struct ecore_db_recovery_entry *db_entry,
  110                                 char *action)
  111 {
  112         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
  113                    action, db_entry, db_entry->db_addr, db_entry->db_data,
  114                    db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
  115                    db_entry->db_space == DB_REC_USER ? "user" : "kernel",
  116                    db_entry->hwfn_idx);
  117 }
  118 
  119 /* doorbell address sanity (address within doorbell bar range) */
  120 static bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
  121                          void *db_data)
  122 {
  123         /* make sure doorbell address  is within the doorbell bar */
  124         if (db_addr < p_dev->doorbells || (u8 *)db_addr >
  125                         (u8 *)p_dev->doorbells + p_dev->db_size) {
  126                 OSAL_WARN(true,
  127                           "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
  128                           db_addr, p_dev->doorbells,
  129                           (u8 *)p_dev->doorbells + p_dev->db_size);
  130                 return false;
  131         }
  132 
  133         /* make sure doorbell data pointer is not null */
  134         if (!db_data) {
  135                 OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data);
  136                 return false;
  137         }
  138 
  139         return true;
  140 }
  141 
  142 /* find hwfn according to the doorbell address */
  143 static struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
  144                                           void OSAL_IOMEM *db_addr)
  145 {
  146         struct ecore_hwfn *p_hwfn;
  147 
  148         /* in CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
  149         if (ECORE_IS_CMT(p_dev))
  150                 p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
  151                         &p_dev->hwfns[0] : &p_dev->hwfns[1];
  152         else
  153                 p_hwfn = ECORE_LEADING_HWFN(p_dev);
  154 
  155         return p_hwfn;
  156 }
  157 
  158 /* add a new entry to the doorbell recovery mechanism */
  159 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
  160                                            void OSAL_IOMEM *db_addr,
  161                                            void *db_data,
  162                                            enum ecore_db_rec_width db_width,
  163                                            enum ecore_db_rec_space db_space)
  164 {
  165         struct ecore_db_recovery_entry *db_entry;
  166         struct ecore_hwfn *p_hwfn;
  167 
  168         /* shortcircuit VFs, for now */
  169         if (IS_VF(p_dev)) {
  170                 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
  171                 return ECORE_SUCCESS;
  172         }
  173 
  174         /* sanitize doorbell address */
  175         if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
  176                 return ECORE_INVAL;
  177 
  178         /* obtain hwfn from doorbell address */
  179         p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
  180 
  181         /* create entry */
  182         db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry));
  183         if (!db_entry) {
  184                 DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n");
  185                 return ECORE_NOMEM;
  186         }
  187 
  188         /* populate entry */
  189         db_entry->db_addr = db_addr;
  190         db_entry->db_data = db_data;
  191         db_entry->db_width = db_width;
  192         db_entry->db_space = db_space;
  193         db_entry->hwfn_idx = p_hwfn->my_id;
  194 
  195         /* display */
  196         ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
  197 
  198         /* protect the list */
  199         OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
  200         OSAL_LIST_PUSH_TAIL(&db_entry->list_entry,
  201                             &p_hwfn->db_recovery_info.list);
  202         OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
  203 
  204         return ECORE_SUCCESS;
  205 }
  206 
  207 /* remove an entry from the doorbell recovery mechanism */
  208 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
  209                                            void OSAL_IOMEM *db_addr,
  210                                            void *db_data)
  211 {
  212         struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
  213         enum _ecore_status_t rc = ECORE_INVAL;
  214         struct ecore_hwfn *p_hwfn;
  215 
  216         /* shortcircuit VFs, for now */
  217         if (IS_VF(p_dev)) {
  218                 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
  219                 return ECORE_SUCCESS;
  220         }
  221 
  222         /* sanitize doorbell address */
  223         if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
  224                 return ECORE_INVAL;
  225 
  226         /* obtain hwfn from doorbell address */
  227         p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
  228 
  229         /* protect the list */
  230         OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
  231         OSAL_LIST_FOR_EACH_ENTRY(db_entry,
  232                                  &p_hwfn->db_recovery_info.list,
  233                                  list_entry,
  234                                  struct ecore_db_recovery_entry) {
  235                 /* search according to db_data addr since db_addr is not unique (roce) */
  236                 if (db_entry->db_data == db_data) {
  237                         ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
  238                         OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
  239                                                &p_hwfn->db_recovery_info.list);
  240                         rc = ECORE_SUCCESS;
  241                         break;
  242                 }
  243         }
  244 
  245         OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
  246 
  247         if (rc == ECORE_INVAL) {
  248                 /*OSAL_WARN(true,*/
  249                 DP_NOTICE(p_hwfn, false,
  250                           "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
  251                           db_data, db_addr);
  252         } else
  253                 OSAL_FREE(p_dev, db_entry);
  254 
  255         return rc;
  256 }
  257 
  258 /* initialize the doorbell recovery mechanism */
  259 static enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
  260 {
  261         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
  262 
  263         /* make sure db_size was set in p_dev */
  264         if (!p_hwfn->p_dev->db_size) {
  265                 DP_ERR(p_hwfn->p_dev, "db_size not set\n");
  266                 return ECORE_INVAL;
  267         }
  268 
  269         OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
  270 #ifdef CONFIG_ECORE_LOCK_ALLOC
  271         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
  272                 return ECORE_NOMEM;
  273 #endif
  274         OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
  275         p_hwfn->db_recovery_info.db_recovery_counter = 0;
  276 
  277         return ECORE_SUCCESS;
  278 }
  279 
  280 /* destroy the doorbell recovery mechanism */
  281 static void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
  282 {
  283         struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
  284 
  285         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
  286         if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
  287                 DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
  288                 while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
  289                         db_entry = OSAL_LIST_FIRST_ENTRY(&p_hwfn->db_recovery_info.list,
  290                                                          struct ecore_db_recovery_entry,
  291                                                          list_entry);
  292                         ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
  293                         OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
  294                                                &p_hwfn->db_recovery_info.list);
  295                         OSAL_FREE(p_hwfn->p_dev, db_entry);
  296                 }
  297         }
  298 #ifdef CONFIG_ECORE_LOCK_ALLOC
  299         OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
  300 #endif
  301         p_hwfn->db_recovery_info.db_recovery_counter = 0;
  302 }
  303 
  304 /* print the content of the doorbell recovery mechanism */
  305 void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
  306 {
  307         struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
  308 
  309         DP_NOTICE(p_hwfn, false,
  310                   "Dispalying doorbell recovery database. Counter was %d\n",
  311                   p_hwfn->db_recovery_info.db_recovery_counter);
  312 
  313         /* protect the list */
  314         OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
  315         OSAL_LIST_FOR_EACH_ENTRY(db_entry,
  316                                  &p_hwfn->db_recovery_info.list,
  317                                  list_entry,
  318                                  struct ecore_db_recovery_entry) {
  319                 ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
  320         }
  321 
  322         OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
  323 }
  324 
  325 /* ring the doorbell of a single doorbell recovery entry */
  326 static void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
  327                             struct ecore_db_recovery_entry *db_entry,
  328                             enum ecore_db_rec_exec db_exec)
  329 {
  330         if (db_exec != DB_REC_ONCE) {
  331                 /* Print according to width */
  332                 if (db_entry->db_width == DB_REC_WIDTH_32B)
  333                         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
  334                                    "%s doorbell address %p data %x\n",
  335                                    db_exec == DB_REC_DRY_RUN ?
  336                                    "would have rung" : "ringing",
  337                                    db_entry->db_addr,
  338                                    *(u32 *)db_entry->db_data);
  339                 else
  340                         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
  341                                    "%s doorbell address %p data %llx\n",
  342                                    db_exec == DB_REC_DRY_RUN ?
  343                                    "would have rung" : "ringing",
  344                                    db_entry->db_addr,
  345                                    (unsigned long long)*(u64 *)(db_entry->db_data));
  346         }
  347 
  348         /* Sanity */
  349         if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
  350                                  db_entry->db_data))
  351                 return;
  352 
  353         /* Flush the write combined buffer. Since there are multiple doorbelling
  354          * entities using the same address, if we don't flush, a transaction
  355          * could be lost.
  356          */
  357         OSAL_WMB(p_hwfn->p_dev);
  358 
  359         /* Ring the doorbell */
  360         if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
  361                 if (db_entry->db_width == DB_REC_WIDTH_32B)
  362                         DIRECT_REG_WR(p_hwfn, db_entry->db_addr, *(u32 *)(db_entry->db_data));
  363                 else
  364                         DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, *(u64 *)(db_entry->db_data));
  365         }
  366 
  367         /* Flush the write combined buffer. Next doorbell may come from a
  368          * different entity to the same address...
  369          */
  370         OSAL_WMB(p_hwfn->p_dev);
  371 }
  372 
  373 /* traverse the doorbell recovery entry list and ring all the doorbells */
  374 void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
  375                                enum ecore_db_rec_exec db_exec)
  376 {
  377         struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
  378 
  379         if (db_exec != DB_REC_ONCE) {
  380                 DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
  381                           p_hwfn->db_recovery_info.db_recovery_counter);
  382 
  383                 /* track amount of times recovery was executed */
  384                 p_hwfn->db_recovery_info.db_recovery_counter++;
  385         }
  386 
  387         /* protect the list */
  388         OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
  389         OSAL_LIST_FOR_EACH_ENTRY(db_entry,
  390                                  &p_hwfn->db_recovery_info.list,
  391                                  list_entry,
  392                                  struct ecore_db_recovery_entry) {
  393                 ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
  394                 if (db_exec == DB_REC_ONCE)
  395                         break;
  396         }
  397 
  398         OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
  399 }
  400 /******************** Doorbell Recovery end ****************/
  401 
  402 /********************************** NIG LLH ***********************************/
  403 
  404 enum ecore_llh_filter_type {
  405         ECORE_LLH_FILTER_TYPE_MAC,
  406         ECORE_LLH_FILTER_TYPE_PROTOCOL,
  407 };
  408 
  409 struct ecore_llh_mac_filter {
  410         u8 addr[ETH_ALEN];
  411 };
  412 
  413 struct ecore_llh_protocol_filter {
  414         enum ecore_llh_prot_filter_type_t type;
  415         u16 source_port_or_eth_type;
  416         u16 dest_port;
  417 };
  418 
  419 union ecore_llh_filter {
  420         struct ecore_llh_mac_filter mac;
  421         struct ecore_llh_protocol_filter protocol;
  422 };
  423 
  424 struct ecore_llh_filter_info {
  425         bool b_enabled;
  426         u32 ref_cnt;
  427         enum ecore_llh_filter_type type;
  428         union ecore_llh_filter filter;
  429 };
  430 
  431 struct ecore_llh_info {
  432         /* Number of LLH filters banks */
  433         u8 num_ppfid;
  434 
  435 #define MAX_NUM_PPFID   8
  436         u8 ppfid_array[MAX_NUM_PPFID];
  437 
  438         /* Array of filters arrays:
  439          * "num_ppfid" elements of filters banks, where each is an array of
  440          * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
  441          */
  442         struct ecore_llh_filter_info **pp_filters;
  443 };
  444 
  445 static void ecore_llh_free(struct ecore_dev *p_dev)
  446 {
  447         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  448         u32 i;
  449 
  450         if (p_llh_info != OSAL_NULL) {
  451                 if (p_llh_info->pp_filters != OSAL_NULL) {
  452                         for (i = 0; i < p_llh_info->num_ppfid; i++)
  453                                 OSAL_FREE(p_dev, p_llh_info->pp_filters[i]);
  454                 }
  455 
  456                 OSAL_FREE(p_dev, p_llh_info->pp_filters);
  457         }
  458 
  459         OSAL_FREE(p_dev, p_llh_info);
  460         p_dev->p_llh_info = OSAL_NULL;
  461 }
  462 
  463 static enum _ecore_status_t ecore_llh_alloc(struct ecore_dev *p_dev)
  464 {
  465         struct ecore_llh_info *p_llh_info;
  466         u32 size; u8 i;
  467 
  468         p_llh_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_llh_info));
  469         if (!p_llh_info)
  470                 return ECORE_NOMEM;
  471         p_dev->p_llh_info = p_llh_info;
  472 
  473         for (i = 0; i < MAX_NUM_PPFID; i++) {
  474                 if (!(p_dev->ppfid_bitmap & (0x1 << i)))
  475                         continue;
  476 
  477                 p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
  478                 DP_VERBOSE(p_dev, ECORE_MSG_SP, "ppfid_array[%d] = %hhd\n",
  479                            p_llh_info->num_ppfid, i);
  480                 p_llh_info->num_ppfid++;
  481         }
  482 
  483         size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
  484         p_llh_info->pp_filters = OSAL_ZALLOC(p_dev, GFP_KERNEL, size);
  485         if (!p_llh_info->pp_filters)
  486                 return ECORE_NOMEM;
  487 
  488         size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
  489                sizeof(**p_llh_info->pp_filters);
  490         for (i = 0; i < p_llh_info->num_ppfid; i++) {
  491                 p_llh_info->pp_filters[i] = OSAL_ZALLOC(p_dev, GFP_KERNEL,
  492                                                         size);
  493                 if (!p_llh_info->pp_filters[i])
  494                         return ECORE_NOMEM;
  495         }
  496 
  497         return ECORE_SUCCESS;
  498 }
  499 
  500 static enum _ecore_status_t ecore_llh_shadow_sanity(struct ecore_dev *p_dev,
  501                                                     u8 ppfid, u8 filter_idx,
  502                                                     const char *action)
  503 {
  504         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  505 
  506         if (ppfid >= p_llh_info->num_ppfid) {
  507                 DP_NOTICE(p_dev, false,
  508                           "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
  509                           action, ppfid, p_llh_info->num_ppfid);
  510                 return ECORE_INVAL;
  511         }
  512 
  513         if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
  514                 DP_NOTICE(p_dev, false,
  515                           "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
  516                           action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
  517                 return ECORE_INVAL;
  518         }
  519 
  520         return ECORE_SUCCESS;
  521 }
  522 
  523 #define ECORE_LLH_INVALID_FILTER_IDX    0xff
  524 
  525 static enum _ecore_status_t
  526 ecore_llh_shadow_search_filter(struct ecore_dev *p_dev, u8 ppfid,
  527                                union ecore_llh_filter *p_filter,
  528                                u8 *p_filter_idx)
  529 {
  530         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  531         struct ecore_llh_filter_info *p_filters;
  532         enum _ecore_status_t rc;
  533         u8 i;
  534 
  535         rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "search");
  536         if (rc != ECORE_SUCCESS)
  537                 return rc;
  538 
  539         *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
  540 
  541         p_filters = p_llh_info->pp_filters[ppfid];
  542         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
  543                 if (!OSAL_MEMCMP(p_filter, &p_filters[i].filter,
  544                                  sizeof(*p_filter))) {
  545                         *p_filter_idx = i;
  546                         break;
  547                 }
  548         }
  549 
  550         return ECORE_SUCCESS;
  551 }
  552 
  553 static enum _ecore_status_t
  554 ecore_llh_shadow_get_free_idx(struct ecore_dev *p_dev, u8 ppfid,
  555                               u8 *p_filter_idx)
  556 {
  557         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  558         struct ecore_llh_filter_info *p_filters;
  559         enum _ecore_status_t rc;
  560         u8 i;
  561 
  562         rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "get_free_idx");
  563         if (rc != ECORE_SUCCESS)
  564                 return rc;
  565 
  566         *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
  567 
  568         p_filters = p_llh_info->pp_filters[ppfid];
  569         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
  570                 if (!p_filters[i].b_enabled) {
  571                         *p_filter_idx = i;
  572                         break;
  573                 }
  574         }
  575 
  576         return ECORE_SUCCESS;
  577 }
  578 
  579 static enum _ecore_status_t
  580 __ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, u8 filter_idx,
  581                               enum ecore_llh_filter_type type,
  582                               union ecore_llh_filter *p_filter, u32 *p_ref_cnt)
  583 {
  584         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  585         struct ecore_llh_filter_info *p_filters;
  586         enum _ecore_status_t rc;
  587 
  588         rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "add");
  589         if (rc != ECORE_SUCCESS)
  590                 return rc;
  591 
  592         p_filters = p_llh_info->pp_filters[ppfid];
  593         if (!p_filters[filter_idx].ref_cnt) {
  594                 p_filters[filter_idx].b_enabled = true;
  595                 p_filters[filter_idx].type = type;
  596                 OSAL_MEMCPY(&p_filters[filter_idx].filter, p_filter,
  597                             sizeof(p_filters[filter_idx].filter));
  598         }
  599 
  600         *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
  601 
  602         return ECORE_SUCCESS;
  603 }
  604 
  605 static enum _ecore_status_t
  606 ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid,
  607                             enum ecore_llh_filter_type type,
  608                             union ecore_llh_filter *p_filter,
  609                             u8 *p_filter_idx, u32 *p_ref_cnt)
  610 {
  611         enum _ecore_status_t rc;
  612 
  613         /* Check if the same filter already exist */
  614         rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
  615                                             p_filter_idx);
  616         if (rc != ECORE_SUCCESS)
  617                 return rc;
  618 
  619         /* Find a new entry in case of a new filter */
  620         if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
  621                 rc = ecore_llh_shadow_get_free_idx(p_dev, ppfid, p_filter_idx);
  622                 if (rc != ECORE_SUCCESS)
  623                         return rc;
  624         }
  625 
  626         /* No free entry was found */
  627         if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
  628                 DP_NOTICE(p_dev, false,
  629                           "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
  630                           ppfid);
  631                 return ECORE_NORESOURCES;
  632         }
  633 
  634         return __ecore_llh_shadow_add_filter(p_dev, ppfid, *p_filter_idx, type,
  635                                              p_filter, p_ref_cnt);
  636 }
  637 
  638 static enum _ecore_status_t
  639 __ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
  640                                  u8 filter_idx, u32 *p_ref_cnt)
  641 {
  642         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  643         struct ecore_llh_filter_info *p_filters;
  644         enum _ecore_status_t rc;
  645 
  646         rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "remove");
  647         if (rc != ECORE_SUCCESS)
  648                 return rc;
  649 
  650         p_filters = p_llh_info->pp_filters[ppfid];
  651         if (!p_filters[filter_idx].ref_cnt) {
  652                 DP_NOTICE(p_dev, false,
  653                           "LLH shadow: trying to remove a filter with ref_cnt=0\n");
  654                 return ECORE_INVAL;
  655         }
  656 
  657         *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
  658         if (!p_filters[filter_idx].ref_cnt)
  659                 OSAL_MEM_ZERO(&p_filters[filter_idx],
  660                               sizeof(p_filters[filter_idx]));
  661 
  662         return ECORE_SUCCESS;
  663 }
  664 
  665 static enum _ecore_status_t
  666 ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
  667                                union ecore_llh_filter *p_filter,
  668                                u8 *p_filter_idx, u32 *p_ref_cnt)
  669 {
  670         enum _ecore_status_t rc;
  671 
  672         rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
  673                                             p_filter_idx);
  674         if (rc != ECORE_SUCCESS)
  675                 return rc;
  676 
  677         /* No matching filter was found */
  678         if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
  679                 DP_NOTICE(p_dev, false,
  680                           "Failed to find a filter in the LLH shadow\n");
  681                 return ECORE_INVAL;
  682         }
  683 
  684         return __ecore_llh_shadow_remove_filter(p_dev, ppfid, *p_filter_idx,
  685                                                 p_ref_cnt);
  686 }
  687 
  688 static enum _ecore_status_t
  689 ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid)
  690 {
  691         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  692         struct ecore_llh_filter_info *p_filters;
  693         enum _ecore_status_t rc;
  694 
  695         rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "remove_all");
  696         if (rc != ECORE_SUCCESS)
  697                 return rc;
  698 
  699         p_filters = p_llh_info->pp_filters[ppfid];
  700         OSAL_MEM_ZERO(p_filters,
  701                       NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters));
  702 
  703         return ECORE_SUCCESS;
  704 }
  705 
  706 static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
  707                                             u8 rel_ppfid, u8 *p_abs_ppfid)
  708 {
  709         struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
  710 
  711         if (rel_ppfid >= p_llh_info->num_ppfid) {
  712                 DP_NOTICE(p_dev, false,
  713                           "rel_ppfid %d is not valid, available indices are 0..%hhd\n",
  714                           rel_ppfid, (u8)(p_llh_info->num_ppfid - 1));
  715                 return ECORE_INVAL;
  716         }
  717 
  718         *p_abs_ppfid = p_llh_info->ppfid_array[rel_ppfid];
  719 
  720         return ECORE_SUCCESS;
  721 }
  722 
  723 static enum _ecore_status_t
  724 __ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
  725 {
  726         struct ecore_dev *p_dev = p_hwfn->p_dev;
  727         enum ecore_eng eng;
  728         u8 ppfid;
  729         enum _ecore_status_t rc;
  730 
  731         rc = ecore_mcp_get_engine_config(p_hwfn, p_ptt);
  732         if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
  733                 DP_NOTICE(p_hwfn, false,
  734                           "Failed to get the engine affinity configuration\n");
  735                 return rc;
  736         }
  737 
  738         /* RoCE PF is bound to a single engine */
  739         if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
  740                 eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
  741                 rc = ecore_llh_set_roce_affinity(p_dev, eng);
  742                 if (rc != ECORE_SUCCESS) {
  743                         DP_NOTICE(p_dev, false,
  744                                   "Failed to set the RoCE engine affinity\n");
  745                         return rc;
  746                 }
  747 
  748                 DP_VERBOSE(p_dev, ECORE_MSG_SP,
  749                            "LLH: Set the engine affinity of RoCE packets as %d\n",
  750                            eng);
  751         }
  752 
  753         /* Storage PF is bound to a single engine while L2 PF uses both */
  754         if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
  755             ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
  756                 eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
  757         else /* L2_PERSONALITY */
  758                 eng = ECORE_BOTH_ENG;
  759 
  760         for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
  761                 rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
  762                 if (rc != ECORE_SUCCESS) {
  763                         DP_NOTICE(p_dev, false,
  764                                   "Failed to set the engine affinity of ppfid %d\n",
  765                                   ppfid);
  766                         return rc;
  767                 }
  768         }
  769 
  770         DP_VERBOSE(p_dev, ECORE_MSG_SP,
  771                    "LLH: Set the engine affinity of non-RoCE packets as %d\n",
  772                    eng);
  773 
  774         return ECORE_SUCCESS;
  775 }
  776 
  777 static enum _ecore_status_t
  778 ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
  779                            bool avoid_eng_affin)
  780 {
  781         struct ecore_dev *p_dev = p_hwfn->p_dev;
  782         enum _ecore_status_t rc;
  783 
  784         /* Backwards compatible mode:
  785          * - RoCE packets     - Use engine 0.
  786          * - Non-RoCE packets - Use connection based classification for L2 PFs,
  787          *                      and engine 0 otherwise.
  788          */
  789         if (avoid_eng_affin) {
  790                 enum ecore_eng eng;
  791                 u8 ppfid;
  792 
  793                 if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
  794                         eng = ECORE_ENG0;
  795                         rc = ecore_llh_set_roce_affinity(p_dev, eng);
  796                         if (rc != ECORE_SUCCESS) {
  797                                 DP_NOTICE(p_dev, false,
  798                                           "Failed to set the RoCE engine affinity\n");
  799                                 return rc;
  800                         }
  801 
  802                         DP_VERBOSE(p_dev, ECORE_MSG_SP,
  803                                    "LLH [backwards compatible mode]: Set the engine affinity of RoCE packets as %d\n",
  804                                    eng);
  805                 }
  806 
  807                 eng = (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
  808                        ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) ? ECORE_ENG0
  809                                                            : ECORE_BOTH_ENG;
  810                 for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
  811                         rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
  812                         if (rc != ECORE_SUCCESS) {
  813                                 DP_NOTICE(p_dev, false,
  814                                           "Failed to set the engine affinity of ppfid %d\n",
  815                                           ppfid);
  816                                 return rc;
  817                         }
  818                 }
  819 
  820                 DP_VERBOSE(p_dev, ECORE_MSG_SP,
  821                            "LLH [backwards compatible mode]: Set the engine affinity of non-RoCE packets as %d\n",
  822                            eng);
  823 
  824                 return ECORE_SUCCESS;
  825         }
  826 
  827         return __ecore_llh_set_engine_affin(p_hwfn, p_ptt);
  828 }
  829 
  830 static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn,
  831                                                  struct ecore_ptt *p_ptt,
  832                                                  bool avoid_eng_affin)
  833 {
  834         struct ecore_dev *p_dev = p_hwfn->p_dev;
  835         u8 ppfid, abs_ppfid;
  836         enum _ecore_status_t rc;
  837 
  838         for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
  839                 u32 addr;
  840 
  841                 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
  842                 if (rc != ECORE_SUCCESS)
  843                         return rc;
  844 
  845                 addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
  846                 ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
  847         }
  848 
  849         if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
  850             !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) {
  851                 rc = ecore_llh_add_mac_filter(p_dev, 0,
  852                                               p_hwfn->hw_info.hw_mac_addr);
  853                 if (rc != ECORE_SUCCESS)
  854                         DP_NOTICE(p_dev, false,
  855                                   "Failed to add an LLH filter with the primary MAC\n");
  856         }
  857 
  858         if (ECORE_IS_CMT(p_dev)) {
  859                 rc = ecore_llh_set_engine_affin(p_hwfn, p_ptt, avoid_eng_affin);
  860                 if (rc != ECORE_SUCCESS)
  861                         return rc;
  862         }
  863 
  864         return ECORE_SUCCESS;
  865 }
  866 
  867 u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev)
  868 {
  869         return p_dev->p_llh_info->num_ppfid;
  870 }
  871 
  872 enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev)
  873 {
  874         return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0;
  875 }
  876 
  877 /* TBD - should be removed when these definitions are available in reg_addr.h */
  878 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK             0x3
  879 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT            0
  880 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK         0x3
  881 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT        2
  882 
  883 enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
  884                                                   u8 ppfid, enum ecore_eng eng)
  885 {
  886         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
  887         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
  888         u32 addr, val, eng_sel;
  889         enum _ecore_status_t rc = ECORE_SUCCESS;
  890         u8 abs_ppfid;
  891 
  892         if (p_ptt == OSAL_NULL)
  893                 return ECORE_AGAIN;
  894 
  895         if (!ECORE_IS_CMT(p_dev))
  896                 goto out;
  897 
  898         rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
  899         if (rc != ECORE_SUCCESS)
  900                 goto out;
  901 
  902         switch (eng) {
  903         case ECORE_ENG0:
  904                 eng_sel = 0;
  905                 break;
  906         case ECORE_ENG1:
  907                 eng_sel = 1;
  908                 break;
  909         case ECORE_BOTH_ENG:
  910                 eng_sel = 2;
  911                 break;
  912         default:
  913                 DP_NOTICE(p_dev, false,
  914                           "Invalid affinity value for ppfid [%d]\n", eng);
  915                 rc = ECORE_INVAL;
  916                 goto out;
  917         }
  918 
  919         addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
  920         val = ecore_rd(p_hwfn, p_ptt, addr);
  921         SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
  922         ecore_wr(p_hwfn, p_ptt, addr, val);
  923 
  924         /* The iWARP affinity is set as the affinity of ppfid 0 */
  925         if (!ppfid && ECORE_IS_IWARP_PERSONALITY(p_hwfn))
  926                 p_dev->iwarp_affin = (eng == ECORE_ENG1) ? 1 : 0;
  927 out:
  928         ecore_ptt_release(p_hwfn, p_ptt);
  929 
  930         return rc;
  931 }
  932 
  933 enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
  934                                                  enum ecore_eng eng)
  935 {
  936         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
  937         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
  938         u32 addr, val, eng_sel;
  939         enum _ecore_status_t rc = ECORE_SUCCESS;
  940         u8 ppfid, abs_ppfid;
  941 
  942         if (p_ptt == OSAL_NULL)
  943                 return ECORE_AGAIN;
  944 
  945         if (!ECORE_IS_CMT(p_dev))
  946                 goto out;
  947 
  948         switch (eng) {
  949         case ECORE_ENG0:
  950                 eng_sel = 0;
  951                 break;
  952         case ECORE_ENG1:
  953                 eng_sel = 1;
  954                 break;
  955         case ECORE_BOTH_ENG:
  956                 eng_sel = 2;
  957                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
  958                          0xf /* QP bit 15 */);
  959                 break;
  960         default:
  961                 DP_NOTICE(p_dev, false,
  962                           "Invalid affinity value for RoCE [%d]\n", eng);
  963                 rc = ECORE_INVAL;
  964                 goto out;
  965         }
  966 
  967         for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
  968                 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
  969                 if (rc != ECORE_SUCCESS)
  970                         goto out;
  971 
  972                 addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
  973                 val = ecore_rd(p_hwfn, p_ptt, addr);
  974                 SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
  975                 ecore_wr(p_hwfn, p_ptt, addr, val);
  976         }
  977 out:
  978         ecore_ptt_release(p_hwfn, p_ptt);
  979 
  980         return rc;
  981 }
  982 
  983 struct ecore_llh_filter_e4_details {
  984         u64 value;
  985         u32 mode;
  986         u32 protocol_type;
  987         u32 hdr_sel;
  988         u32 enable;
  989 };
  990 
  991 static enum _ecore_status_t
  992 ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
  993                            struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
  994                            struct ecore_llh_filter_e4_details *p_details,
  995                            bool b_write_access)
  996 {
  997         u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
  998         struct ecore_dmae_params params;
  999         enum _ecore_status_t rc;
 1000         u32 addr;
 1001 
 1002         /* The NIG/LLH registers that are accessed in this function have only 16
 1003          * rows which are exposed to a PF. I.e. only the 16 filters of its
 1004          * default ppfid
 1005          * Accessing filters of other ppfids requires pretending to other PFs,
 1006          * and thus the usage of the ecore_ppfid_rd/wr() functions.
 1007          */
 1008 
 1009         /* Filter enable - should be done first when removing a filter */
 1010         if (b_write_access && !p_details->enable) {
 1011                 addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
 1012                 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
 1013                                p_details->enable);
 1014         }
 1015 
 1016         /* Filter value */
 1017         addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;
 1018         OSAL_MEMSET(&params, 0, sizeof(params));
 1019 
 1020         if (b_write_access) {
 1021                 params.flags = ECORE_DMAE_FLAG_PF_DST;
 1022                 params.dst_pfid = pfid;
 1023                 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
 1024                                          (u64)(osal_uintptr_t)&p_details->value,
 1025                                          addr, 2 /* size_in_dwords */, &params);
 1026         } else {
 1027                 params.flags = ECORE_DMAE_FLAG_PF_SRC |
 1028                                ECORE_DMAE_FLAG_COMPLETION_DST;
 1029                 params.src_pfid = pfid;
 1030                 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr,
 1031                                          (u64)(osal_uintptr_t)&p_details->value,
 1032                                          2 /* size_in_dwords */, &params);
 1033         }
 1034 
 1035         if (rc != ECORE_SUCCESS)
 1036                 return rc;
 1037 
 1038         /* Filter mode */
 1039         addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;
 1040         if (b_write_access)
 1041                 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);
 1042         else
 1043                 p_details->mode = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
 1044                                                  addr);
 1045 
 1046         /* Filter protocol type */
 1047         addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;
 1048         if (b_write_access)
 1049                 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
 1050                                p_details->protocol_type);
 1051         else
 1052                 p_details->protocol_type = ecore_ppfid_rd(p_hwfn, p_ptt,
 1053                                                           abs_ppfid, addr);
 1054 
 1055         /* Filter header select */
 1056         addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 + filter_idx * 0x4;
 1057         if (b_write_access)
 1058                 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
 1059                                p_details->hdr_sel);
 1060         else
 1061                 p_details->hdr_sel = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
 1062                                                     addr);
 1063 
 1064         /* Filter enable - should be done last when adding a filter */
 1065         if (!b_write_access || p_details->enable) {
 1066                 addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
 1067                 if (b_write_access)
 1068                         ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
 1069                                        p_details->enable);
 1070                 else
 1071                         p_details->enable = ecore_ppfid_rd(p_hwfn, p_ptt,
 1072                                                            abs_ppfid, addr);
 1073         }
 1074 
 1075         return ECORE_SUCCESS;
 1076 }
 1077 
 1078 static enum _ecore_status_t
 1079 ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 1080                         u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
 1081                         u32 high, u32 low)
 1082 {
 1083         struct ecore_llh_filter_e4_details filter_details;
 1084 
 1085         filter_details.enable = 1;
 1086         filter_details.value = ((u64)high << 32) | low;
 1087         filter_details.hdr_sel = 0;
 1088         filter_details.protocol_type = filter_prot_type;
 1089         filter_details.mode = filter_prot_type ?
 1090                               1 : /* protocol-based classification */
 1091                               0;  /* MAC-address based classification */
 1092 
 1093         return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
 1094                                           &filter_details,
 1095                                           true /* write access */);
 1096 }
 1097 
 1098 static enum _ecore_status_t
 1099 ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
 1100                            struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
 1101 {
 1102         struct ecore_llh_filter_e4_details filter_details;
 1103 
 1104         OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
 1105 
 1106         return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
 1107                                           &filter_details,
 1108                                           true /* write access */);
 1109 }
 1110 
 1111 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
 1112  * Should be removed when the function is implemented.
 1113  */
 1114 static enum _ecore_status_t
 1115 ecore_llh_add_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
 1116                         struct ecore_ptt OSAL_UNUSED *p_ptt,
 1117                         u8 OSAL_UNUSED abs_ppfid, u8 OSAL_UNUSED filter_idx,
 1118                         u8 OSAL_UNUSED filter_prot_type, u32 OSAL_UNUSED high,
 1119                         u32 OSAL_UNUSED low)
 1120 {
 1121         ECORE_E5_MISSING_CODE;
 1122 
 1123         return ECORE_NOTIMPL;
 1124 }
 1125 
 1126 /* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
 1127  * Should be removed when the function is implemented.
 1128  */
 1129 static enum _ecore_status_t
 1130 ecore_llh_remove_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
 1131                            struct ecore_ptt OSAL_UNUSED *p_ptt,
 1132                            u8 OSAL_UNUSED abs_ppfid,
 1133                            u8 OSAL_UNUSED filter_idx)
 1134 {
 1135         ECORE_E5_MISSING_CODE;
 1136 
 1137         return ECORE_NOTIMPL;
 1138 }
 1139 
 1140 static enum _ecore_status_t
 1141 ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 1142                      u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
 1143                      u32 low)
 1144 {
 1145         if (ECORE_IS_E4(p_hwfn->p_dev))
 1146                 return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
 1147                                                filter_idx, filter_prot_type,
 1148                                                high, low);
 1149         else /* E5 */
 1150                 return ecore_llh_add_filter_e5(p_hwfn, p_ptt, abs_ppfid,
 1151                                                filter_idx, filter_prot_type,
 1152                                                high, low);
 1153 }
 1154 
 1155 static enum _ecore_status_t
 1156 ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 1157                         u8 abs_ppfid, u8 filter_idx)
 1158 {
 1159         if (ECORE_IS_E4(p_hwfn->p_dev))
 1160                 return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
 1161                                                   filter_idx);
 1162         else /* E5 */
 1163                 return ecore_llh_remove_filter_e5(p_hwfn, p_ptt, abs_ppfid,
 1164                                                   filter_idx);
 1165 }
 1166 
 1167 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
 1168                                               u8 mac_addr[ETH_ALEN])
 1169 {
 1170         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 1171         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 1172         union ecore_llh_filter filter;
 1173         u8 filter_idx, abs_ppfid;
 1174         u32 high, low, ref_cnt;
 1175         enum _ecore_status_t rc = ECORE_SUCCESS;
 1176 
 1177         if (p_ptt == OSAL_NULL)
 1178                 return ECORE_AGAIN;
 1179 
 1180         if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
 1181                 goto out;
 1182 
 1183         OSAL_MEM_ZERO(&filter, sizeof(filter));
 1184         OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
 1185         rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
 1186                                          ECORE_LLH_FILTER_TYPE_MAC,
 1187                                          &filter, &filter_idx, &ref_cnt);
 1188         if (rc != ECORE_SUCCESS)
 1189                 goto err;
 1190 
 1191         rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
 1192         if (rc != ECORE_SUCCESS)
 1193                 goto err;
 1194 
 1195         /* Configure the LLH only in case of a new the filter */
 1196         if (ref_cnt == 1) {
 1197                 high = mac_addr[1] | (mac_addr[0] << 8);
 1198                 low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
 1199                       (mac_addr[2] << 24);
 1200                 rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
 1201                                           0, high, low);
 1202                 if (rc != ECORE_SUCCESS)
 1203                         goto err;
 1204         }
 1205 
 1206         DP_VERBOSE(p_dev, ECORE_MSG_SP,
 1207                    "LLH: Added MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
 1208                    mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
 1209                    mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
 1210                    ref_cnt);
 1211 
 1212         goto out;
 1213 
 1214 err:
 1215         DP_NOTICE(p_dev, false,
 1216                   "LLH: Failed to add MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd\n",
 1217                   mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
 1218                   mac_addr[4], mac_addr[5], ppfid);
 1219 out:
 1220         ecore_ptt_release(p_hwfn, p_ptt);
 1221 
 1222         return rc;
 1223 }
 1224 
 1225 static enum _ecore_status_t
 1226 ecore_llh_protocol_filter_stringify(struct ecore_dev *p_dev,
 1227                                     enum ecore_llh_prot_filter_type_t type,
 1228                                     u16 source_port_or_eth_type, u16 dest_port,
 1229                                     u8 *str, osal_size_t str_len)
 1230 {
 1231         switch (type) {
 1232         case ECORE_LLH_FILTER_ETHERTYPE:
 1233                 OSAL_SNPRINTF(str, str_len, "Ethertype 0x%04x",
 1234                               source_port_or_eth_type);
 1235                 break;
 1236         case ECORE_LLH_FILTER_TCP_SRC_PORT:
 1237                 OSAL_SNPRINTF(str, str_len, "TCP src port 0x%04x",
 1238                               source_port_or_eth_type);
 1239                 break;
 1240         case ECORE_LLH_FILTER_UDP_SRC_PORT:
 1241                 OSAL_SNPRINTF(str, str_len, "UDP src port 0x%04x",
 1242                               source_port_or_eth_type);
 1243                 break;
 1244         case ECORE_LLH_FILTER_TCP_DEST_PORT:
 1245                 OSAL_SNPRINTF(str, str_len, "TCP dst port 0x%04x", dest_port);
 1246                 break;
 1247         case ECORE_LLH_FILTER_UDP_DEST_PORT:
 1248                 OSAL_SNPRINTF(str, str_len, "UDP dst port 0x%04x", dest_port);
 1249                 break;
 1250         case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
 1251                 OSAL_SNPRINTF(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
 1252                               source_port_or_eth_type, dest_port);
 1253                 break;
 1254         case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
 1255                 OSAL_SNPRINTF(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
 1256                               source_port_or_eth_type, dest_port);
 1257                 break;
 1258         default:
 1259                 DP_NOTICE(p_dev, true,
 1260                           "Non valid LLH protocol filter type %d\n", type);
 1261                 return ECORE_INVAL;
 1262         }
 1263 
 1264         return ECORE_SUCCESS;
 1265 }
 1266 
 1267 static enum _ecore_status_t
 1268 ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev,
 1269                                   enum ecore_llh_prot_filter_type_t type,
 1270                                   u16 source_port_or_eth_type, u16 dest_port,
 1271                                   u32 *p_high, u32 *p_low)
 1272 {
 1273         *p_high = 0;
 1274         *p_low = 0;
 1275 
 1276         switch (type) {
 1277         case ECORE_LLH_FILTER_ETHERTYPE:
 1278                 *p_high = source_port_or_eth_type;
 1279                 break;
 1280         case ECORE_LLH_FILTER_TCP_SRC_PORT:
 1281         case ECORE_LLH_FILTER_UDP_SRC_PORT:
 1282                 *p_low = source_port_or_eth_type << 16;
 1283                 break;
 1284         case ECORE_LLH_FILTER_TCP_DEST_PORT:
 1285         case ECORE_LLH_FILTER_UDP_DEST_PORT:
 1286                 *p_low = dest_port;
 1287                 break;
 1288         case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
 1289         case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
 1290                 *p_low = (source_port_or_eth_type << 16) | dest_port;
 1291                 break;
 1292         default:
 1293                 DP_NOTICE(p_dev, true,
 1294                           "Non valid LLH protocol filter type %d\n", type);
 1295                 return ECORE_INVAL;
 1296         }
 1297 
 1298         return ECORE_SUCCESS;
 1299 }
 1300 
 1301 enum _ecore_status_t
 1302 ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
 1303                               enum ecore_llh_prot_filter_type_t type,
 1304                               u16 source_port_or_eth_type, u16 dest_port)
 1305 {
 1306         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 1307         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 1308         u8 filter_idx, abs_ppfid, str[32], type_bitmap;
 1309         union ecore_llh_filter filter;
 1310         u32 high, low, ref_cnt;
 1311         enum _ecore_status_t rc = ECORE_SUCCESS;
 1312 
 1313         if (p_ptt == OSAL_NULL)
 1314                 return ECORE_AGAIN;
 1315 
 1316         if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
 1317                 goto out;
 1318 
 1319         rc = ecore_llh_protocol_filter_stringify(p_dev, type,
 1320                                                  source_port_or_eth_type,
 1321                                                  dest_port, str, sizeof(str));
 1322         if (rc != ECORE_SUCCESS)
 1323                 goto err;
 1324 
 1325         OSAL_MEM_ZERO(&filter, sizeof(filter));
 1326         filter.protocol.type = type;
 1327         filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
 1328         filter.protocol.dest_port = dest_port;
 1329         rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
 1330                                          ECORE_LLH_FILTER_TYPE_PROTOCOL,
 1331                                          &filter, &filter_idx, &ref_cnt);
 1332         if (rc != ECORE_SUCCESS)
 1333                 goto err;
 1334 
 1335         rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
 1336         if (rc != ECORE_SUCCESS)
 1337                 goto err;
 1338 
 1339         /* Configure the LLH only in case of a new the filter */
 1340         if (ref_cnt == 1) {
 1341                 rc = ecore_llh_protocol_filter_to_hilo(p_dev, type,
 1342                                                        source_port_or_eth_type,
 1343                                                        dest_port, &high, &low);
 1344                 if (rc != ECORE_SUCCESS)
 1345                         goto err;
 1346 
 1347                 type_bitmap = 0x1 << type;
 1348                 rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
 1349                                           type_bitmap, high, low);
 1350                 if (rc != ECORE_SUCCESS)
 1351                         goto err;
 1352         }
 1353 
 1354         DP_VERBOSE(p_dev, ECORE_MSG_SP,
 1355                    "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
 1356                    str, ppfid, abs_ppfid, filter_idx, ref_cnt);
 1357 
 1358         goto out;
 1359 
 1360 err:
 1361         DP_NOTICE(p_hwfn, false,
 1362                   "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
 1363                   str, ppfid);
 1364 out:
 1365         ecore_ptt_release(p_hwfn, p_ptt);
 1366 
 1367         return rc;
 1368 }
 1369 
 1370 void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
 1371                                  u8 mac_addr[ETH_ALEN])
 1372 {
 1373         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 1374         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 1375         union ecore_llh_filter filter;
 1376         u8 filter_idx, abs_ppfid;
 1377         enum _ecore_status_t rc = ECORE_SUCCESS;
 1378         u32 ref_cnt;
 1379 
 1380         if (p_ptt == OSAL_NULL)
 1381                 return;
 1382 
 1383         if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
 1384                 goto out;
 1385 
 1386         OSAL_MEM_ZERO(&filter, sizeof(filter));
 1387         OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
 1388         rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
 1389                                             &ref_cnt);
 1390         if (rc != ECORE_SUCCESS)
 1391                 goto err;
 1392 
 1393         rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
 1394         if (rc != ECORE_SUCCESS)
 1395                 goto err;
 1396 
 1397         /* Remove from the LLH in case the filter is not in use */
 1398         if (!ref_cnt) {
 1399                 rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
 1400                                              filter_idx);
 1401                 if (rc != ECORE_SUCCESS)
 1402                         goto err;
 1403         }
 1404 
 1405         DP_VERBOSE(p_dev, ECORE_MSG_SP,
 1406                    "LLH: Removed MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
 1407                    mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
 1408                    mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
 1409                    ref_cnt);
 1410 
 1411         goto out;
 1412 
 1413 err:
 1414         DP_NOTICE(p_dev, false,
 1415                   "LLH: Failed to remove MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd\n",
 1416                   mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
 1417                   mac_addr[4], mac_addr[5], ppfid);
 1418 out:
 1419         ecore_ptt_release(p_hwfn, p_ptt);
 1420 }
 1421 
 1422 void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
 1423                                       enum ecore_llh_prot_filter_type_t type,
 1424                                       u16 source_port_or_eth_type,
 1425                                       u16 dest_port)
 1426 {
 1427         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 1428         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 1429         u8 filter_idx, abs_ppfid, str[32];
 1430         union ecore_llh_filter filter;
 1431         enum _ecore_status_t rc = ECORE_SUCCESS;
 1432         u32 ref_cnt;
 1433 
 1434         if (p_ptt == OSAL_NULL)
 1435                 return;
 1436 
 1437         if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
 1438                 goto out;
 1439 
 1440         rc = ecore_llh_protocol_filter_stringify(p_dev, type,
 1441                                                  source_port_or_eth_type,
 1442                                                  dest_port, str, sizeof(str));
 1443         if (rc != ECORE_SUCCESS)
 1444                 goto err;
 1445 
 1446         OSAL_MEM_ZERO(&filter, sizeof(filter));
 1447         filter.protocol.type = type;
 1448         filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
 1449         filter.protocol.dest_port = dest_port;
 1450         rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
 1451                                             &ref_cnt);
 1452         if (rc != ECORE_SUCCESS)
 1453                 goto err;
 1454 
 1455         rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
 1456         if (rc != ECORE_SUCCESS)
 1457                 goto err;
 1458 
 1459         /* Remove from the LLH in case the filter is not in use */
 1460         if (!ref_cnt) {
 1461                 rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
 1462                                              filter_idx);
 1463                 if (rc != ECORE_SUCCESS)
 1464                         goto err;
 1465         }
 1466 
 1467         DP_VERBOSE(p_dev, ECORE_MSG_SP,
 1468                    "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
 1469                    str, ppfid, abs_ppfid, filter_idx, ref_cnt);
 1470 
 1471         goto out;
 1472 
 1473 err:
 1474         DP_NOTICE(p_dev, false,
 1475                   "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
 1476                   str, ppfid);
 1477 out:
 1478         ecore_ptt_release(p_hwfn, p_ptt);
 1479 }
 1480 
 1481 void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
 1482 {
 1483         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 1484         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 1485         u8 filter_idx, abs_ppfid;
 1486         enum _ecore_status_t rc = ECORE_SUCCESS;
 1487 
 1488         if (p_ptt == OSAL_NULL)
 1489                 return;
 1490 
 1491         if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
 1492             !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
 1493                 goto out;
 1494 
 1495         rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
 1496         if (rc != ECORE_SUCCESS)
 1497                 goto out;
 1498 
 1499         rc = ecore_llh_shadow_remove_all_filters(p_dev, ppfid);
 1500         if (rc != ECORE_SUCCESS)
 1501                 goto out;
 1502 
 1503         for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
 1504              filter_idx++) {
 1505                 if (ECORE_IS_E4(p_dev))
 1506                         rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,
 1507                                                         abs_ppfid, filter_idx);
 1508                 else /* E5 */
 1509                         rc = ecore_llh_remove_filter_e5(p_hwfn, p_ptt,
 1510                                                         abs_ppfid, filter_idx);
 1511                 if (rc != ECORE_SUCCESS)
 1512                         goto out;
 1513         }
 1514 out:
 1515         ecore_ptt_release(p_hwfn, p_ptt);
 1516 }
 1517 
 1518 void ecore_llh_clear_all_filters(struct ecore_dev *p_dev)
 1519 {
 1520         u8 ppfid;
 1521 
 1522         if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
 1523             !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
 1524                 return;
 1525 
 1526         for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++)
 1527                 ecore_llh_clear_ppfid_filters(p_dev, ppfid);
 1528 }
 1529 
 1530 enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
 1531                                          struct ecore_ptt *p_ptt, u32 addr,
 1532                                          u32 val)
 1533 {
 1534         struct ecore_dev *p_dev = p_hwfn->p_dev;
 1535         u8 ppfid, abs_ppfid;
 1536         enum _ecore_status_t rc;
 1537 
 1538         for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
 1539                 rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
 1540                 if (rc != ECORE_SUCCESS)
 1541                         return rc;
 1542 
 1543                 ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, val);
 1544         }
 1545 
 1546         return ECORE_SUCCESS;
 1547 }
 1548 
 1549 static enum _ecore_status_t
 1550 ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 1551                         u8 ppfid)
 1552 {
 1553         struct ecore_llh_filter_e4_details filter_details;
 1554         u8 abs_ppfid, filter_idx;
 1555         u32 addr;
 1556         enum _ecore_status_t rc;
 1557 
 1558         rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
 1559         if (rc != ECORE_SUCCESS)
 1560                 return rc;
 1561 
 1562         addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
 1563         DP_NOTICE(p_hwfn, false,
 1564                   "[rel_pf_id %hhd, ppfid={rel %hhd, abs %hhd}, engine_sel 0x%x]\n",
 1565                   p_hwfn->rel_pf_id, ppfid, abs_ppfid,
 1566                   ecore_rd(p_hwfn, p_ptt, addr));
 1567 
 1568         for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
 1569              filter_idx++) {
 1570                 OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
 1571                 rc =  ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
 1572                                                  filter_idx, &filter_details,
 1573                                                  false /* read access */);
 1574                 if (rc != ECORE_SUCCESS)
 1575                         return rc;
 1576 
 1577                 DP_NOTICE(p_hwfn, false,
 1578                           "filter %2hhd: enable %d, value 0x%016llx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
 1579                           filter_idx, filter_details.enable,
 1580                           (unsigned long long)filter_details.value, filter_details.mode,
 1581                           filter_details.protocol_type, filter_details.hdr_sel);
 1582         }
 1583 
 1584         return ECORE_SUCCESS;
 1585 }
 1586 
 1587 static enum _ecore_status_t
 1588 ecore_llh_dump_ppfid_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
 1589                         struct ecore_ptt OSAL_UNUSED *p_ptt,
 1590                         u8 OSAL_UNUSED ppfid)
 1591 {
 1592         ECORE_E5_MISSING_CODE;
 1593 
 1594         return ECORE_NOTIMPL;
 1595 }
 1596 
 1597 enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
 1598 {
 1599         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 1600         struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 1601         enum _ecore_status_t rc;
 1602 
 1603         if (p_ptt == OSAL_NULL)
 1604                 return ECORE_AGAIN;
 1605 
 1606         if (ECORE_IS_E4(p_dev))
 1607                 rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
 1608         else /* E5 */
 1609                 rc = ecore_llh_dump_ppfid_e5(p_hwfn, p_ptt, ppfid);
 1610 
 1611         ecore_ptt_release(p_hwfn, p_ptt);
 1612 
 1613         return rc;
 1614 }
 1615 
 1616 enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev)
 1617 {
 1618         u8 ppfid;
 1619         enum _ecore_status_t rc;
 1620 
 1621         for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
 1622                 rc = ecore_llh_dump_ppfid(p_dev, ppfid);
 1623                 if (rc != ECORE_SUCCESS)
 1624                         return rc;
 1625         }
 1626 
 1627         return ECORE_SUCCESS;
 1628 }
 1629 
 1630 /******************************* NIG LLH - End ********************************/
 1631 
 1632 /* Configurable */
 1633 #define ECORE_MIN_DPIS          (4)  /* The minimal number of DPIs required to
 1634                                       * load the driver. The number was
 1635                                       * arbitrarily set.
 1636                                       */
 1637 
 1638 /* Derived */
 1639 #define ECORE_MIN_PWM_REGION    (ECORE_WID_SIZE * ECORE_MIN_DPIS)
 1640 
 1641 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
 1642                              struct ecore_ptt *p_ptt,
 1643                              enum BAR_ID bar_id)
 1644 {
 1645         u32 bar_reg = (bar_id == BAR_ID_0 ?
 1646                        PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
 1647         u32 val;
 1648 
 1649         if (IS_VF(p_hwfn->p_dev))
 1650                 return ecore_vf_hw_bar_size(p_hwfn, bar_id);
 1651 
 1652         val = ecore_rd(p_hwfn, p_ptt, bar_reg);
 1653         if (val)
 1654                 return 1 << (val + 15);
 1655 
 1656         /* The above registers were updated in the past only in CMT mode. Since
 1657          * they were found to be useful MFW started updating them from 8.7.7.0.
 1658          * In older MFW versions they are set to 0 which means disabled.
 1659          */
 1660         if (ECORE_IS_CMT(p_hwfn->p_dev)) {
 1661                 DP_INFO(p_hwfn,
 1662                         "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
 1663                 return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
 1664         } else {
 1665                 DP_INFO(p_hwfn,
 1666                         "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
 1667                 return 512 * 1024;
 1668         }
 1669 }
 1670 
 1671 void ecore_init_dp(struct ecore_dev     *p_dev,
 1672                    u32                  dp_module,
 1673                    u8                   dp_level,
 1674                    void          *dp_ctx)
 1675 {
 1676         u32 i;
 1677 
 1678         p_dev->dp_level = dp_level;
 1679         p_dev->dp_module = dp_module;
 1680         p_dev->dp_ctx = dp_ctx;
 1681         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
 1682                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 1683 
 1684                 p_hwfn->dp_level = dp_level;
 1685                 p_hwfn->dp_module = dp_module;
 1686                 p_hwfn->dp_ctx = dp_ctx;
 1687         }
 1688 }
 1689 
 1690 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
 1691 {
 1692         u8 i;
 1693 
 1694         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
 1695                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 1696 
 1697                 p_hwfn->p_dev = p_dev;
 1698                 p_hwfn->my_id = i;
 1699                 p_hwfn->b_active = false;
 1700 
 1701 #ifdef CONFIG_ECORE_LOCK_ALLOC
 1702                 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
 1703                         goto handle_err;
 1704 #endif
 1705                 OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
 1706         }
 1707 
 1708         /* hwfn 0 is always active */
 1709         p_dev->hwfns[0].b_active = true;
 1710 
 1711         /* set the default cache alignment to 128 (may be overridden later) */
 1712         p_dev->cache_shift = 7;
 1713 
 1714         p_dev->ilt_page_size = ECORE_DEFAULT_ILT_PAGE_SIZE;
 1715 
 1716         return ECORE_SUCCESS;
 1717 #ifdef CONFIG_ECORE_LOCK_ALLOC
 1718 handle_err:
 1719         while (--i) {
 1720                 struct ecore_hwfn *p_hwfn = OSAL_NULL;
 1721 
 1722                 p_hwfn = &p_dev->hwfns[i];
 1723                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
 1724         }
 1725         return ECORE_NOMEM;
 1726 #endif
 1727 }
 1728 
 1729 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
 1730 {
 1731         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 1732 
 1733         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
 1734         qm_info->qm_pq_params = OSAL_NULL;
 1735         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
 1736         qm_info->qm_vport_params = OSAL_NULL;
 1737         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
 1738         qm_info->qm_port_params = OSAL_NULL;
 1739         OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
 1740         qm_info->wfq_data = OSAL_NULL;
 1741 }
 1742 
 1743 void ecore_resc_free(struct ecore_dev *p_dev)
 1744 {
 1745         int i;
 1746 
 1747         if (IS_VF(p_dev)) {
 1748                 for_each_hwfn(p_dev, i)
 1749                         ecore_l2_free(&p_dev->hwfns[i]);
 1750                 return;
 1751         }
 1752 
 1753         OSAL_FREE(p_dev, p_dev->fw_data);
 1754         p_dev->fw_data = OSAL_NULL;
 1755 
 1756         OSAL_FREE(p_dev, p_dev->reset_stats);
 1757         p_dev->reset_stats = OSAL_NULL;
 1758 
 1759         ecore_llh_free(p_dev);
 1760 
 1761         for_each_hwfn(p_dev, i) {
 1762                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 1763 
 1764                 ecore_cxt_mngr_free(p_hwfn);
 1765                 ecore_qm_info_free(p_hwfn);
 1766                 ecore_spq_free(p_hwfn);
 1767                 ecore_eq_free(p_hwfn);
 1768                 ecore_consq_free(p_hwfn);
 1769                 ecore_int_free(p_hwfn);
 1770 #ifdef CONFIG_ECORE_LL2
 1771                 ecore_ll2_free(p_hwfn);
 1772 #endif
 1773                 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
 1774                         ecore_fcoe_free(p_hwfn);
 1775 
 1776                 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
 1777                         ecore_iscsi_free(p_hwfn);
 1778                         ecore_ooo_free(p_hwfn);
 1779                 }
 1780 
 1781 #ifdef CONFIG_ECORE_ROCE
 1782                 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn))
 1783                         ecore_rdma_info_free(p_hwfn);
 1784 #endif
 1785                 ecore_iov_free(p_hwfn);
 1786                 ecore_l2_free(p_hwfn);
 1787                 ecore_dmae_info_free(p_hwfn);
 1788                 ecore_dcbx_info_free(p_hwfn);
 1789                 /* @@@TBD Flush work-queue ?*/
 1790 
 1791                 /* destroy doorbell recovery mechanism */
 1792                 ecore_db_recovery_teardown(p_hwfn);
 1793         }
 1794 }
 1795 
 1796 /******************** QM initialization *******************/
 1797 /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */
 1798 #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
 1799 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */
 1800 
 1801 /* determines the physical queue flags for a given PF. */
 1802 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
 1803 {
 1804         u32 flags;
 1805 
 1806         /* common flags */
 1807         flags = PQ_FLAGS_LB;
 1808 
 1809         /* feature flags */
 1810         if (IS_ECORE_SRIOV(p_hwfn->p_dev))
 1811                 flags |= PQ_FLAGS_VFS;
 1812         if (IS_ECORE_DCQCN(p_hwfn))
 1813                 flags |= PQ_FLAGS_RLS;
 1814 
 1815         /* protocol flags */
 1816         switch (p_hwfn->hw_info.personality) {
 1817         case ECORE_PCI_ETH:
 1818                 flags |= PQ_FLAGS_MCOS;
 1819                 break;
 1820         case ECORE_PCI_FCOE:
 1821                 flags |= PQ_FLAGS_OFLD;
 1822                 break;
 1823         case ECORE_PCI_ISCSI:
 1824                 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
 1825                 break;
 1826         case ECORE_PCI_ETH_ROCE:
 1827                 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
 1828                 break;
 1829         case ECORE_PCI_ETH_IWARP:
 1830                 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
 1831                 break;
 1832         default:
 1833                 DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality);
 1834                 return 0;
 1835         }
 1836 
 1837         return flags;
 1838 }
 1839 
 1840 /* Getters for resource amounts necessary for qm initialization */
 1841 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
 1842 {
 1843         return p_hwfn->hw_info.num_hw_tc;
 1844 }
 1845 
 1846 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
 1847 {
 1848         return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0;
 1849 }
 1850 
 1851 #define NUM_DEFAULT_RLS 1
 1852 
 1853 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
 1854 {
 1855         u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
 1856 
 1857         /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */
 1858         num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
 1859                                      (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT),
 1860                                                      ROCE_DCQCN_RP_MAX_QPS));
 1861 
 1862         /* make sure after we reserve the default and VF rls we'll have something left */
 1863         if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
 1864                 if (IS_ECORE_DCQCN(p_hwfn))
 1865                         DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
 1866                 return 0;
 1867         }
 1868 
 1869         /* subtract rls necessary for VFs and one default one for the PF */
 1870         num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
 1871 
 1872         return num_pf_rls;
 1873 }
 1874 
 1875 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
 1876 {
 1877         u32 pq_flags = ecore_get_pq_flags(p_hwfn);
 1878 
 1879         /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */
 1880         return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
 1881                (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1;
 1882 }
 1883 
 1884 /* calc amount of PQs according to the requested flags */
 1885 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
 1886 {
 1887         u32 pq_flags = ecore_get_pq_flags(p_hwfn);
 1888 
 1889         return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
 1890                (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) +
 1891                (!!(PQ_FLAGS_LB & pq_flags)) +
 1892                (!!(PQ_FLAGS_OOO & pq_flags)) +
 1893                (!!(PQ_FLAGS_ACK & pq_flags)) +
 1894                (!!(PQ_FLAGS_OFLD & pq_flags)) +
 1895                (!!(PQ_FLAGS_LLT & pq_flags)) +
 1896                (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn);
 1897 }
 1898 
 1899 /* initialize the top level QM params */
 1900 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
 1901 {
 1902         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 1903         bool four_port;
 1904 
 1905         /* pq and vport bases for this PF */
 1906         qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
 1907         qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
 1908 
 1909         /* rate limiting and weighted fair queueing are always enabled */
 1910         qm_info->vport_rl_en = 1;
 1911         qm_info->vport_wfq_en = 1;
 1912 
 1913         /* TC config is different for AH 4 port */
 1914         four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
 1915 
 1916         /* in AH 4 port we have fewer TCs per port */
 1917         qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
 1918 
 1919         /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */
 1920         if (!qm_info->ooo_tc)
 1921                 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC;
 1922 }
 1923 
 1924 /* initialize qm vport params */
 1925 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
 1926 {
 1927         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 1928         u8 i;
 1929 
 1930         /* all vports participate in weighted fair queueing */
 1931         for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
 1932                 qm_info->qm_vport_params[i].vport_wfq = 1;
 1933 }
 1934 
 1935 /* initialize qm port params */
 1936 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
 1937 {
 1938         /* Initialize qm port parameters */
 1939         u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
 1940 
 1941         /* indicate how ooo and high pri traffic is dealt with */
 1942         active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
 1943                 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
 1944 
 1945         for (i = 0; i < num_ports; i++) {
 1946                 struct init_qm_port_params *p_qm_port =
 1947                         &p_hwfn->qm_info.qm_port_params[i];
 1948 
 1949                 p_qm_port->active = 1;
 1950                 p_qm_port->active_phys_tcs = active_phys_tcs;
 1951                 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
 1952                 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
 1953         }
 1954 }
 1955 
 1956 /* Reset the params which must be reset for qm init. QM init may be called as
 1957  * a result of flows other than driver load (e.g. dcbx renegotiation). Other
 1958  * params may be affected by the init but would simply recalculate to the same
 1959  * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
 1960  * affected as these amounts stay the same.
 1961  */
 1962 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
 1963 {
 1964         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 1965 
 1966         qm_info->num_pqs = 0;
 1967         qm_info->num_vports = 0;
 1968         qm_info->num_pf_rls = 0;
 1969         qm_info->num_vf_pqs = 0;
 1970         qm_info->first_vf_pq = 0;
 1971         qm_info->first_mcos_pq = 0;
 1972         qm_info->first_rl_pq = 0;
 1973 }
 1974 
 1975 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
 1976 {
 1977         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 1978 
 1979         qm_info->num_vports++;
 1980 
 1981         if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
 1982                 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
 1983 }
 1984 
 1985 /* initialize a single pq and manage qm_info resources accounting.
 1986  * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF)
 1987  * and whether a new vport is allocated to the pq or not (i.e. vport will be shared)
 1988  */
 1989 
 1990 /* flags for pq init */
 1991 #define PQ_INIT_SHARE_VPORT     (1 << 0)
 1992 #define PQ_INIT_PF_RL           (1 << 1)
 1993 #define PQ_INIT_VF_RL           (1 << 2)
 1994 
 1995 /* defines for pq init */
 1996 #define PQ_INIT_DEFAULT_WRR_GROUP       1
 1997 #define PQ_INIT_DEFAULT_TC              0
 1998 #define PQ_INIT_OFLD_TC                 (p_hwfn->hw_info.offload_tc)
 1999 
 2000 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
 2001                              struct ecore_qm_info *qm_info,
 2002                              u8 tc, u32 pq_init_flags)
 2003 {
 2004         u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn);
 2005 
 2006         if (pq_idx > max_pq)
 2007                 DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
 2008 
 2009         /* init pq params */
 2010         qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports;
 2011         qm_info->qm_pq_params[pq_idx].tc_id = tc;
 2012         qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
 2013         qm_info->qm_pq_params[pq_idx].rl_valid =
 2014                 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
 2015 
 2016         /* qm params accounting */
 2017         qm_info->num_pqs++;
 2018         if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
 2019                 qm_info->num_vports++;
 2020 
 2021         if (pq_init_flags & PQ_INIT_PF_RL)
 2022                 qm_info->num_pf_rls++;
 2023 
 2024         if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
 2025                 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
 2026 
 2027         if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
 2028                 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn));
 2029 }
 2030 
 2031 /* get pq index according to PQ_FLAGS */
 2032 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
 2033                                              u32 pq_flags)
 2034 {
 2035         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2036 
 2037         /* Can't have multiple flags set here */
 2038         if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
 2039                 goto err;
 2040 
 2041         switch (pq_flags) {
 2042         case PQ_FLAGS_RLS:
 2043                 return &qm_info->first_rl_pq;
 2044         case PQ_FLAGS_MCOS:
 2045                 return &qm_info->first_mcos_pq;
 2046         case PQ_FLAGS_LB:
 2047                 return &qm_info->pure_lb_pq;
 2048         case PQ_FLAGS_OOO:
 2049                 return &qm_info->ooo_pq;
 2050         case PQ_FLAGS_ACK:
 2051                 return &qm_info->pure_ack_pq;
 2052         case PQ_FLAGS_OFLD:
 2053                 return &qm_info->offload_pq;
 2054         case PQ_FLAGS_LLT:
 2055                 return &qm_info->low_latency_pq;
 2056         case PQ_FLAGS_VFS:
 2057                 return &qm_info->first_vf_pq;
 2058         default:
 2059                 goto err;
 2060         }
 2061 
 2062 err:
 2063         DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
 2064         return OSAL_NULL;
 2065 }
 2066 
 2067 /* save pq index in qm info */
 2068 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
 2069                                   u32 pq_flags, u16 pq_val)
 2070 {
 2071         u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
 2072 
 2073         *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
 2074 }
 2075 
 2076 /* get tx pq index, with the PQ TX base already set (ready for context init) */
 2077 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
 2078 {
 2079         u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
 2080 
 2081         return *base_pq_idx + CM_TX_PQ_BASE;
 2082 }
 2083 
 2084 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
 2085 {
 2086         u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
 2087 
 2088         if (tc > max_tc)
 2089                 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
 2090 
 2091         return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
 2092 }
 2093 
 2094 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
 2095 {
 2096         u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
 2097 
 2098         if (vf > max_vf)
 2099                 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
 2100 
 2101         return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
 2102 }
 2103 
 2104 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
 2105 {
 2106         u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
 2107 
 2108         if (rl > max_rl)
 2109                 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
 2110 
 2111         return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
 2112 }
 2113 
 2114 /* Functions for creating specific types of pqs */
 2115 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
 2116 {
 2117         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2118 
 2119         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
 2120                 return;
 2121 
 2122         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
 2123         ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
 2124 }
 2125 
 2126 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
 2127 {
 2128         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2129 
 2130         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
 2131                 return;
 2132 
 2133         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
 2134         ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
 2135 }
 2136 
 2137 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
 2138 {
 2139         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2140 
 2141         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
 2142                 return;
 2143 
 2144         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
 2145         ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
 2146 }
 2147 
 2148 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
 2149 {
 2150         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2151 
 2152         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
 2153                 return;
 2154 
 2155         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
 2156         ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
 2157 }
 2158 
 2159 static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn)
 2160 {
 2161         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2162 
 2163         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
 2164                 return;
 2165 
 2166         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
 2167         ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
 2168 }
 2169 
 2170 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
 2171 {
 2172         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2173         u8 tc_idx;
 2174 
 2175         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
 2176                 return;
 2177 
 2178         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
 2179         for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
 2180                 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
 2181 }
 2182 
 2183 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
 2184 {
 2185         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2186         u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
 2187 
 2188         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
 2189                 return;
 2190 
 2191         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
 2192         qm_info->num_vf_pqs = num_vfs;
 2193         for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
 2194                 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
 2195 }
 2196 
 2197 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
 2198 {
 2199         u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
 2200         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2201 
 2202         if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
 2203                 return;
 2204 
 2205         ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
 2206         for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
 2207                 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
 2208 }
 2209 
 2210 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
 2211 {
 2212         /* rate limited pqs, must come first (FW assumption) */
 2213         ecore_init_qm_rl_pqs(p_hwfn);
 2214 
 2215         /* pqs for multi cos */
 2216         ecore_init_qm_mcos_pqs(p_hwfn);
 2217 
 2218         /* pure loopback pq */
 2219         ecore_init_qm_lb_pq(p_hwfn);
 2220 
 2221         /* out of order pq */
 2222         ecore_init_qm_ooo_pq(p_hwfn);
 2223 
 2224         /* pure ack pq */
 2225         ecore_init_qm_pure_ack_pq(p_hwfn);
 2226 
 2227         /* pq for offloaded protocol */
 2228         ecore_init_qm_offload_pq(p_hwfn);
 2229 
 2230         /* low latency pq */
 2231         ecore_init_qm_low_latency_pq(p_hwfn);
 2232 
 2233         /* done sharing vports */
 2234         ecore_init_qm_advance_vport(p_hwfn);
 2235 
 2236         /* pqs for vfs */
 2237         ecore_init_qm_vf_pqs(p_hwfn);
 2238 }
 2239 
 2240 /* compare values of getters against resources amounts */
 2241 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
 2242 {
 2243         if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) {
 2244                 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
 2245                 return ECORE_INVAL;
 2246         }
 2247 
 2248         if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
 2249                 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
 2250                 return ECORE_INVAL;
 2251         }
 2252 
 2253         return ECORE_SUCCESS;
 2254 }
 2255 
 2256 /*
 2257  * Function for verbose printing of the qm initialization results
 2258  */
 2259 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
 2260 {
 2261         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2262         struct init_qm_vport_params *vport;
 2263         struct init_qm_port_params *port;
 2264         struct init_qm_pq_params *pq;
 2265         int i, tc;
 2266 
 2267         /* top level params */
 2268         DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
 2269                    qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq);
 2270         DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
 2271                    qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port);
 2272         DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
 2273                    qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
 2274 
 2275         /* port table */
 2276         for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
 2277                 port = &(qm_info->qm_port_params[i]);
 2278                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
 2279                            i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved);
 2280         }
 2281 
 2282         /* vport table */
 2283         for (i = 0; i < qm_info->num_vports; i++) {
 2284                 vport = &(qm_info->qm_vport_params[i]);
 2285                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
 2286                            qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq);
 2287                 for (tc = 0; tc < NUM_OF_TCS; tc++)
 2288                         DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]);
 2289                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
 2290         }
 2291 
 2292         /* pq table */
 2293         for (i = 0; i < qm_info->num_pqs; i++) {
 2294                 pq = &(qm_info->qm_pq_params[i]);
 2295                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
 2296                            qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid);
 2297         }
 2298 }
 2299 
 2300 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
 2301 {
 2302         /* reset params required for init run */
 2303         ecore_init_qm_reset_params(p_hwfn);
 2304 
 2305         /* init QM top level params */
 2306         ecore_init_qm_params(p_hwfn);
 2307 
 2308         /* init QM port params */
 2309         ecore_init_qm_port_params(p_hwfn);
 2310 
 2311         /* init QM vport params */
 2312         ecore_init_qm_vport_params(p_hwfn);
 2313 
 2314         /* init QM physical queue params */
 2315         ecore_init_qm_pq_params(p_hwfn);
 2316 
 2317         /* display all that init */
 2318         ecore_dp_init_qm_params(p_hwfn);
 2319 }
 2320 
 2321 /* This function reconfigures the QM pf on the fly.
 2322  * For this purpose we:
 2323  * 1. reconfigure the QM database
 2324  * 2. set new values to runtime array
 2325  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
 2326  * 4. activate init tool in QM_PF stage
 2327  * 5. send an sdm_qm_cmd through rbc interface to release the QM
 2328  */
 2329 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
 2330                                      struct ecore_ptt *p_ptt)
 2331 {
 2332         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2333         bool b_rc;
 2334         enum _ecore_status_t rc;
 2335 
 2336         /* initialize ecore's qm data structure */
 2337         ecore_init_qm_info(p_hwfn);
 2338 
 2339         /* stop PF's qm queues */
 2340         OSAL_SPIN_LOCK(&qm_lock);
 2341         b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
 2342                                       qm_info->start_pq, qm_info->num_pqs);
 2343         OSAL_SPIN_UNLOCK(&qm_lock);
 2344         if (!b_rc)
 2345                 return ECORE_INVAL;
 2346 
 2347         /* clear the QM_PF runtime phase leftovers from previous init */
 2348         ecore_init_clear_rt_data(p_hwfn);
 2349 
 2350         /* prepare QM portion of runtime array */
 2351         ecore_qm_init_pf(p_hwfn, p_ptt, false);
 2352 
 2353         /* activate init tool on runtime array */
 2354         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
 2355                             p_hwfn->hw_info.hw_mode);
 2356         if (rc != ECORE_SUCCESS)
 2357                 return rc;
 2358 
 2359         /* start PF's qm queues */
 2360         OSAL_SPIN_LOCK(&qm_lock);
 2361         b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
 2362                                       qm_info->start_pq, qm_info->num_pqs);
 2363         OSAL_SPIN_UNLOCK(&qm_lock);
 2364         if (!b_rc)
 2365                 return ECORE_INVAL;
 2366 
 2367         return ECORE_SUCCESS;
 2368 }
 2369 
 2370 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
 2371 {
 2372         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2373         enum _ecore_status_t rc;
 2374 
 2375         rc = ecore_init_qm_sanity(p_hwfn);
 2376         if (rc != ECORE_SUCCESS)
 2377                 goto alloc_err;
 2378 
 2379         qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 2380                                             sizeof(struct init_qm_pq_params) *
 2381                                             ecore_init_qm_get_num_pqs(p_hwfn));
 2382         if (!qm_info->qm_pq_params)
 2383                 goto alloc_err;
 2384 
 2385         qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 2386                                                sizeof(struct init_qm_vport_params) *
 2387                                                ecore_init_qm_get_num_vports(p_hwfn));
 2388         if (!qm_info->qm_vport_params)
 2389                 goto alloc_err;
 2390 
 2391         qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 2392                                               sizeof(struct init_qm_port_params) *
 2393                                               p_hwfn->p_dev->num_ports_in_engine);
 2394         if (!qm_info->qm_port_params)
 2395                 goto alloc_err;
 2396 
 2397         qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 2398                                         sizeof(struct ecore_wfq_data) *
 2399                                         ecore_init_qm_get_num_vports(p_hwfn));
 2400         if (!qm_info->wfq_data)
 2401                 goto alloc_err;
 2402 
 2403         return ECORE_SUCCESS;
 2404 
 2405 alloc_err:
 2406         DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
 2407         ecore_qm_info_free(p_hwfn);
 2408         return ECORE_NOMEM;
 2409 }
 2410 /******************** End QM initialization ***************/
 2411 
 2412 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 2413 {
 2414         u32 rdma_tasks, excess_tasks;
 2415         u32 line_count;
 2416         enum _ecore_status_t rc = ECORE_SUCCESS;
 2417         int i;
 2418 
 2419         if (IS_VF(p_dev)) {
 2420                 for_each_hwfn(p_dev, i) {
 2421                         rc = ecore_l2_alloc(&p_dev->hwfns[i]);
 2422                         if (rc != ECORE_SUCCESS)
 2423                                 return rc;
 2424                 }
 2425                 return rc;
 2426         }
 2427 
 2428         p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
 2429                                      sizeof(*p_dev->fw_data));
 2430         if (!p_dev->fw_data)
 2431                 return ECORE_NOMEM;
 2432 
 2433         for_each_hwfn(p_dev, i) {
 2434                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 2435                 u32 n_eqes, num_cons;
 2436 
 2437                 /* initialize the doorbell recovery mechanism */
 2438                 rc = ecore_db_recovery_setup(p_hwfn);
 2439                 if (rc)
 2440                         goto alloc_err;
 2441 
 2442                 /* First allocate the context manager structure */
 2443                 rc = ecore_cxt_mngr_alloc(p_hwfn);
 2444                 if (rc)
 2445                         goto alloc_err;
 2446 
 2447                 /* Set the HW cid/tid numbers (in the context manager)
 2448                  * Must be done prior to any further computations.
 2449                  */
 2450                 rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
 2451                 if (rc)
 2452                         goto alloc_err;
 2453 
 2454                 rc = ecore_alloc_qm_data(p_hwfn);
 2455                 if (rc)
 2456                         goto alloc_err;
 2457 
 2458                 /* init qm info */
 2459                 ecore_init_qm_info(p_hwfn);
 2460 
 2461                 /* Compute the ILT client partition */
 2462                 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
 2463                 if (rc) {
 2464                         DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n");
 2465                         /* In case there are not enough ILT lines we reduce the
 2466                          * number of RDMA tasks and re-compute.
 2467                          */
 2468                         excess_tasks = ecore_cxt_cfg_ilt_compute_excess(
 2469                                         p_hwfn, line_count);
 2470                         if (!excess_tasks)
 2471                                 goto alloc_err;
 2472 
 2473                         rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
 2474                         rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks);
 2475                         if (rc)
 2476                                 goto alloc_err;
 2477 
 2478                         rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
 2479                         if (rc) {
 2480                                 DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n",
 2481                                        line_count);
 2482 
 2483                                 goto alloc_err;
 2484                         }
 2485                 }
 2486 
 2487                 /* CID map / ILT shadow table / T2
 2488                  * The talbes sizes are determined by the computations above
 2489                  */
 2490                 rc = ecore_cxt_tables_alloc(p_hwfn);
 2491                 if (rc)
 2492                         goto alloc_err;
 2493 
 2494                 /* SPQ, must follow ILT because initializes SPQ context */
 2495                 rc = ecore_spq_alloc(p_hwfn);
 2496                 if (rc)
 2497                         goto alloc_err;
 2498 
 2499                 /* SP status block allocation */
 2500                 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
 2501                                                            RESERVED_PTT_DPC);
 2502 
 2503                 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
 2504                 if (rc)
 2505                         goto alloc_err;
 2506 
 2507                 rc = ecore_iov_alloc(p_hwfn);
 2508                 if (rc)
 2509                         goto alloc_err;
 2510 
 2511                 /* EQ */
 2512                 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
 2513                 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
 2514                         u32 n_srq = ecore_cxt_get_total_srq_count(p_hwfn);
 2515 
 2516                         /* Calculate the EQ size
 2517                          * ---------------------
 2518                          * Each ICID may generate up to one event at a time i.e.
 2519                          * the event must be handled/cleared before a new one
 2520                          * can be generated. We calculate the sum of events per
 2521                          * protocol and create an EQ deep enough to handle the
 2522                          * worst case:
 2523                          * - Core - according to SPQ.
 2524                          * - RoCE - per QP there are a couple of ICIDs, one
 2525                          *        responder and one requester, each can
 2526                          *        generate max 2 EQE (err+qp_destroyed) =>
 2527                          *        n_eqes_qp = 4 * n_qp.
 2528                          *        Each CQ can generate an EQE. There are 2 CQs
 2529                          *        per QP => n_eqes_cq = 2 * n_qp.
 2530                          *        Hence the RoCE total is 6 * n_qp or
 2531                          *        3 * num_cons.
 2532                          *        On top of that one eqe shoule be added for
 2533                          *        each XRC SRQ and SRQ.
 2534                          * - iWARP - can generate three async per QP (error
 2535                          *        detected and qp in error) and an
 2536                                   additional error per CQ. 4* num_cons.
 2537                                   On top of that one eqe shoule be added for
 2538                          *        each SRQ and XRC SRQ.
 2539                          * - ENet - There can be up to two events per VF. One
 2540                          *        for VF-PF channel and another for VF FLR
 2541                          *        initial cleanup. The number of VFs is
 2542                          *        bounded by MAX_NUM_VFS_BB, and is much
 2543                          *        smaller than RoCE's so we avoid exact
 2544                          *        calculation.
 2545                          */
 2546                         if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
 2547                                 num_cons = ecore_cxt_get_proto_cid_count(
 2548                                         p_hwfn, PROTOCOLID_ROCE, OSAL_NULL);
 2549                                 num_cons *= 3;
 2550                         } else {
 2551                                 num_cons = ecore_cxt_get_proto_cid_count(
 2552                                                 p_hwfn, PROTOCOLID_IWARP,
 2553                                                 OSAL_NULL);
 2554                                 num_cons *= 4;
 2555                         }
 2556                         n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
 2557                 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
 2558                         num_cons = ecore_cxt_get_proto_cid_count(
 2559                                         p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL);
 2560                         n_eqes += 2 * num_cons;
 2561                 }
 2562 
 2563                 if (n_eqes > 0xFF00) {
 2564                         DP_ERR(p_hwfn, "EQs maxing out at 0xFF00 elements\n");
 2565                         n_eqes = 0xFF00;
 2566                 }
 2567 
 2568                 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
 2569                 if (rc)
 2570                         goto alloc_err;
 2571 
 2572                 rc = ecore_consq_alloc(p_hwfn);
 2573                 if (rc)
 2574                         goto alloc_err;
 2575 
 2576                 rc = ecore_l2_alloc(p_hwfn);
 2577                 if (rc != ECORE_SUCCESS)
 2578                         goto alloc_err;
 2579 
 2580 #ifdef CONFIG_ECORE_LL2
 2581                 if (p_hwfn->using_ll2) {
 2582                         rc = ecore_ll2_alloc(p_hwfn);
 2583                         if (rc)
 2584                                 goto alloc_err;
 2585                 }
 2586 #endif
 2587                 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
 2588                         rc = ecore_fcoe_alloc(p_hwfn);
 2589                         if (rc)
 2590                                 goto alloc_err;
 2591                 }
 2592 
 2593                 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
 2594                         rc = ecore_iscsi_alloc(p_hwfn);
 2595                         if (rc)
 2596                                 goto alloc_err;
 2597 
 2598                         rc = ecore_ooo_alloc(p_hwfn);
 2599                         if (rc)
 2600                                 goto alloc_err;
 2601                 }
 2602 #ifdef CONFIG_ECORE_ROCE
 2603                 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
 2604                         rc = ecore_rdma_info_alloc(p_hwfn);
 2605                         if (rc)
 2606                                 goto alloc_err;
 2607                 }
 2608 #endif
 2609 
 2610                 /* DMA info initialization */
 2611                 rc = ecore_dmae_info_alloc(p_hwfn);
 2612                 if (rc) {
 2613                         DP_NOTICE(p_hwfn, false,
 2614                                   "Failed to allocate memory for dmae_info structure\n");
 2615                         goto alloc_err;
 2616                 }
 2617 
 2618                 /* DCBX initialization */
 2619                 rc = ecore_dcbx_info_alloc(p_hwfn);
 2620                 if (rc) {
 2621                         DP_NOTICE(p_hwfn, false,
 2622                                   "Failed to allocate memory for dcbx structure\n");
 2623                         goto alloc_err;
 2624                 }
 2625         }
 2626 
 2627         rc = ecore_llh_alloc(p_dev);
 2628         if (rc != ECORE_SUCCESS) {
 2629                 DP_NOTICE(p_dev, false,
 2630                           "Failed to allocate memory for the llh_info structure\n");
 2631                 goto alloc_err;
 2632         }
 2633 
 2634         p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
 2635                                          sizeof(*p_dev->reset_stats));
 2636         if (!p_dev->reset_stats) {
 2637                 DP_NOTICE(p_dev, false,
 2638                           "Failed to allocate reset statistics\n");
 2639                 goto alloc_no_mem;
 2640         }
 2641 
 2642         return ECORE_SUCCESS;
 2643 
 2644 alloc_no_mem:
 2645         rc = ECORE_NOMEM;
 2646 alloc_err:
 2647         ecore_resc_free(p_dev);
 2648         return rc;
 2649 }
 2650 
 2651 void ecore_resc_setup(struct ecore_dev *p_dev)
 2652 {
 2653         int i;
 2654 
 2655         if (IS_VF(p_dev)) {
 2656                 for_each_hwfn(p_dev, i)
 2657                         ecore_l2_setup(&p_dev->hwfns[i]);
 2658                 return;
 2659         }
 2660 
 2661         for_each_hwfn(p_dev, i) {
 2662                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 2663 
 2664                 ecore_cxt_mngr_setup(p_hwfn);
 2665                 ecore_spq_setup(p_hwfn);
 2666                 ecore_eq_setup(p_hwfn);
 2667                 ecore_consq_setup(p_hwfn);
 2668 
 2669                 /* Read shadow of current MFW mailbox */
 2670                 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
 2671                 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
 2672                             p_hwfn->mcp_info->mfw_mb_cur,
 2673                             p_hwfn->mcp_info->mfw_mb_length);
 2674 
 2675                 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
 2676 
 2677                 ecore_l2_setup(p_hwfn);
 2678                 ecore_iov_setup(p_hwfn);
 2679 #ifdef CONFIG_ECORE_LL2
 2680                 if (p_hwfn->using_ll2)
 2681                         ecore_ll2_setup(p_hwfn);
 2682 #endif
 2683                 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
 2684                         ecore_fcoe_setup(p_hwfn);
 2685 
 2686                 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
 2687                         ecore_iscsi_setup(p_hwfn);
 2688                         ecore_ooo_setup(p_hwfn);
 2689                 }
 2690         }
 2691 }
 2692 
 2693 #define FINAL_CLEANUP_POLL_CNT  (100)
 2694 #define FINAL_CLEANUP_POLL_TIME (10)
 2695 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
 2696                                          struct ecore_ptt *p_ptt,
 2697                                          u16 id, bool is_vf)
 2698 {
 2699         u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
 2700         enum _ecore_status_t rc = ECORE_TIMEOUT;
 2701 
 2702 #ifndef ASIC_ONLY
 2703         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
 2704             CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
 2705                 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
 2706                 return ECORE_SUCCESS;
 2707         }
 2708 #endif
 2709 
 2710         addr = GTT_BAR0_MAP_REG_USDM_RAM +
 2711                USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
 2712 
 2713         if (is_vf)
 2714                 id += 0x10;
 2715 
 2716         command |= X_FINAL_CLEANUP_AGG_INT <<
 2717                    SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
 2718         command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
 2719         command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
 2720         command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
 2721 
 2722         /* Make sure notification is not set before initiating final cleanup */
 2723         if (REG_RD(p_hwfn, addr)) {
 2724                 DP_NOTICE(p_hwfn, false,
 2725                           "Unexpected; Found final cleanup notification before initiating final cleanup\n");
 2726                 REG_WR(p_hwfn, addr, 0);
 2727         }
 2728 
 2729         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2730                    "Sending final cleanup for PFVF[%d] [Command %08x]\n",
 2731                    id, command);
 2732 
 2733         ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
 2734 
 2735         /* Poll until completion */
 2736         while (!REG_RD(p_hwfn, addr) && count--)
 2737                 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
 2738 
 2739         if (REG_RD(p_hwfn, addr))
 2740                 rc = ECORE_SUCCESS;
 2741         else
 2742                 DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n");
 2743 
 2744         /* Cleanup afterwards */
 2745         REG_WR(p_hwfn, addr, 0);
 2746 
 2747         return rc;
 2748 }
 2749 
 2750 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
 2751 {
 2752         int hw_mode = 0;
 2753 
 2754         if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
 2755                 hw_mode |= 1 << MODE_BB;
 2756         } else if (ECORE_IS_AH(p_hwfn->p_dev)) {
 2757                 hw_mode |= 1 << MODE_K2;
 2758         } else if (ECORE_IS_E5(p_hwfn->p_dev)) {
 2759                 hw_mode |= 1 << MODE_E5;
 2760         } else {
 2761                 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
 2762                           p_hwfn->p_dev->type);
 2763                 return ECORE_INVAL;
 2764         }
 2765 
 2766         /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/
 2767         switch (p_hwfn->p_dev->num_ports_in_engine) {
 2768         case 1:
 2769                 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
 2770                 break;
 2771         case 2:
 2772                 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
 2773                 break;
 2774         case 4:
 2775                 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
 2776                 break;
 2777         default:
 2778                 DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n",
 2779                           p_hwfn->p_dev->num_ports_in_engine);
 2780                 return ECORE_INVAL;
 2781         }
 2782 
 2783         if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
 2784                           &p_hwfn->p_dev->mf_bits))
 2785                 hw_mode |= 1 << MODE_MF_SD;
 2786         else
 2787                 hw_mode |= 1 << MODE_MF_SI;
 2788 
 2789 #ifndef ASIC_ONLY
 2790         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
 2791                 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
 2792                         hw_mode |= 1 << MODE_FPGA;
 2793                 } else {
 2794                         if (p_hwfn->p_dev->b_is_emul_full)
 2795                                 hw_mode |= 1 << MODE_EMUL_FULL;
 2796                         else
 2797                                 hw_mode |= 1 << MODE_EMUL_REDUCED;
 2798                 }
 2799         } else
 2800 #endif
 2801         hw_mode |= 1 << MODE_ASIC;
 2802 
 2803         if (ECORE_IS_CMT(p_hwfn->p_dev))
 2804                 hw_mode |= 1 << MODE_100G;
 2805 
 2806         p_hwfn->hw_info.hw_mode = hw_mode;
 2807 
 2808         DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
 2809                    "Configuring function for hw_mode: 0x%08x\n",
 2810                    p_hwfn->hw_info.hw_mode);
 2811 
 2812         return ECORE_SUCCESS;
 2813 }
 2814 
 2815 #ifndef ASIC_ONLY
 2816 /* MFW-replacement initializations for non-ASIC */
 2817 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
 2818                                                struct ecore_ptt *p_ptt)
 2819 {
 2820         struct ecore_dev *p_dev = p_hwfn->p_dev;
 2821         u32 pl_hv = 1;
 2822         int i;
 2823 
 2824         if (CHIP_REV_IS_EMUL(p_dev)) {
 2825                 if (ECORE_IS_AH(p_dev))
 2826                         pl_hv |= 0x600;
 2827                 else if (ECORE_IS_E5(p_dev))
 2828                         ECORE_E5_MISSING_CODE;
 2829         }
 2830 
 2831         ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
 2832 
 2833         if (CHIP_REV_IS_EMUL(p_dev) &&
 2834             (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev)))
 2835                 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
 2836                          0x3ffffff);
 2837 
 2838         /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
 2839         /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
 2840         if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
 2841                 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
 2842 
 2843         if (CHIP_REV_IS_EMUL(p_dev)) {
 2844                 if (ECORE_IS_AH(p_dev)) {
 2845                         /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
 2846                         ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
 2847                                  (p_dev->num_ports_in_engine >> 1));
 2848 
 2849                         ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
 2850                                  p_dev->num_ports_in_engine == 4 ? 0 : 3);
 2851                 } else if (ECORE_IS_E5(p_dev)) {
 2852                         ECORE_E5_MISSING_CODE;
 2853                 }
 2854 
 2855                 /* Poll on RBC */
 2856                 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
 2857                 for (i = 0; i < 100; i++) {
 2858                         OSAL_UDELAY(50);
 2859                         if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
 2860                                 break;
 2861                 }
 2862                 if (i == 100)
 2863                         DP_NOTICE(p_hwfn, true,
 2864                                   "RBC done failed to complete in PSWRQ2\n");
 2865         }
 2866 
 2867         return ECORE_SUCCESS;
 2868 }
 2869 #endif
 2870 
 2871 /* Init run time data for all PFs and their VFs on an engine.
 2872  * TBD - for VFs - Once we have parent PF info for each VF in
 2873  * shmem available as CAU requires knowledge of parent PF for each VF.
 2874  */
 2875 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
 2876 {
 2877         u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
 2878         int i, igu_sb_id;
 2879 
 2880         for_each_hwfn(p_dev, i) {
 2881                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 2882                 struct ecore_igu_info *p_igu_info;
 2883                 struct ecore_igu_block *p_block;
 2884                 struct cau_sb_entry sb_entry;
 2885 
 2886                 p_igu_info = p_hwfn->hw_info.p_igu_info;
 2887 
 2888                 for (igu_sb_id = 0;
 2889                      igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
 2890                      igu_sb_id++) {
 2891                         p_block = &p_igu_info->entry[igu_sb_id];
 2892 
 2893                         if (!p_block->is_pf)
 2894                                 continue;
 2895 
 2896                         ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
 2897                                                 p_block->function_id,
 2898                                                 0, 0);
 2899                         STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
 2900                                          sb_entry);
 2901                 }
 2902         }
 2903 }
 2904 
 2905 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
 2906                                        struct ecore_ptt *p_ptt)
 2907 {
 2908         u32 val, wr_mbs, cache_line_size;
 2909 
 2910         val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
 2911         switch (val) {
 2912         case 0:
 2913                 wr_mbs = 128;
 2914                 break;
 2915         case 1:
 2916                 wr_mbs = 256;
 2917                 break;
 2918         case 2:
 2919                 wr_mbs = 512;
 2920                 break;
 2921         default:
 2922                 DP_INFO(p_hwfn,
 2923                         "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
 2924                         val);
 2925                 return;
 2926         }
 2927 
 2928         cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
 2929         switch (cache_line_size) {
 2930         case 32:
 2931                 val = 0;
 2932                 break;
 2933         case 64:
 2934                 val = 1;
 2935                 break;
 2936         case 128:
 2937                 val = 2;
 2938                 break;
 2939         case 256:
 2940                 val = 3;
 2941                 break;
 2942         default:
 2943                 DP_INFO(p_hwfn,
 2944                         "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
 2945                         cache_line_size);
 2946         }
 2947 
 2948         if (OSAL_CACHE_LINE_SIZE > wr_mbs)
 2949                 DP_INFO(p_hwfn,
 2950                         "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
 2951                         OSAL_CACHE_LINE_SIZE, wr_mbs);
 2952 
 2953         STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
 2954         if (val > 0) {
 2955                 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
 2956                 STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
 2957         }
 2958 }
 2959 
 2960 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
 2961                                                  struct ecore_ptt *p_ptt,
 2962                                                  int hw_mode)
 2963 {
 2964         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
 2965         struct ecore_dev *p_dev = p_hwfn->p_dev;
 2966         u8 vf_id, max_num_vfs;
 2967         u16 num_pfs, pf_id;
 2968         u32 concrete_fid;
 2969         enum _ecore_status_t rc = ECORE_SUCCESS;
 2970 
 2971         ecore_init_cau_rt_data(p_dev);
 2972 
 2973         /* Program GTT windows */
 2974         ecore_gtt_init(p_hwfn, p_ptt);
 2975 
 2976 #ifndef ASIC_ONLY
 2977         if (CHIP_REV_IS_EMUL(p_dev)) {
 2978                 rc = ecore_hw_init_chip(p_hwfn, p_ptt);
 2979                 if (rc != ECORE_SUCCESS)
 2980                         return rc;
 2981         }
 2982 #endif
 2983 
 2984         if (p_hwfn->mcp_info) {
 2985                 if (p_hwfn->mcp_info->func_info.bandwidth_max)
 2986                         qm_info->pf_rl_en = 1;
 2987                 if (p_hwfn->mcp_info->func_info.bandwidth_min)
 2988                         qm_info->pf_wfq_en = 1;
 2989         }
 2990 
 2991         ecore_qm_common_rt_init(p_hwfn,
 2992                                 p_dev->num_ports_in_engine,
 2993                                 qm_info->max_phys_tcs_per_port,
 2994                                 qm_info->pf_rl_en, qm_info->pf_wfq_en,
 2995                                 qm_info->vport_rl_en, qm_info->vport_wfq_en,
 2996                                 qm_info->qm_port_params);
 2997 
 2998         ecore_cxt_hw_init_common(p_hwfn);
 2999 
 3000         ecore_init_cache_line_size(p_hwfn, p_ptt);
 3001 
 3002         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn),
 3003                             hw_mode);
 3004         if (rc != ECORE_SUCCESS)
 3005                 return rc;
 3006 
 3007         /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
 3008          * need to decide with which value, maybe runtime
 3009          */
 3010         ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
 3011         ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
 3012 
 3013         if (ECORE_IS_BB(p_dev)) {
 3014                 /* Workaround clears ROCE search for all functions to prevent
 3015                  * involving non initialized function in processing ROCE packet.
 3016                  */
 3017                 num_pfs = NUM_OF_ENG_PFS(p_dev);
 3018                 for (pf_id = 0; pf_id < num_pfs; pf_id++) {
 3019                         ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
 3020                         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
 3021                         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
 3022                 }
 3023                 /* pretend to original PF */
 3024                 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
 3025         }
 3026 
 3027         /* Workaround for avoiding CCFC execution error when getting packets
 3028          * with CRC errors, and allowing instead the invoking of the FW error
 3029          * handler.
 3030          * This is not done inside the init tool since it currently can't
 3031          * perform a pretending to VFs.
 3032          */
 3033         max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
 3034         for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
 3035                 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
 3036                 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
 3037                 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
 3038                 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
 3039                 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
 3040                 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
 3041         }
 3042         /* pretend to original PF */
 3043         ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
 3044 
 3045         return rc;
 3046 }
 3047 
 3048 #ifndef ASIC_ONLY
 3049 #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4)
 3050 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5)
 3051 
 3052 #define PMEG_IF_BYTE_COUNT      8
 3053 
 3054 static void ecore_wr_nw_port(struct ecore_hwfn  *p_hwfn,
 3055                              struct ecore_ptt   *p_ptt,
 3056                              u32                addr,
 3057                              u64                data,
 3058                              u8                 reg_type,
 3059                              u8                 port)
 3060 {
 3061         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 3062                    "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
 3063                    ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
 3064                    (8 << PMEG_IF_BYTE_COUNT),
 3065                    (reg_type << 25) | (addr << 8) | port,
 3066                    (u32)((data >> 32) & 0xffffffff),
 3067                    (u32)(data & 0xffffffff));
 3068 
 3069         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
 3070                  (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
 3071                   0xffff00fe) |
 3072                  (8 << PMEG_IF_BYTE_COUNT));
 3073         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
 3074                  (reg_type << 25) | (addr << 8) | port);
 3075         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
 3076         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
 3077                  (data >> 32) & 0xffffffff);
 3078 }
 3079 
 3080 #define XLPORT_MODE_REG (0x20a)
 3081 #define XLPORT_MAC_CONTROL (0x210)
 3082 #define XLPORT_FLOW_CONTROL_CONFIG (0x207)
 3083 #define XLPORT_ENABLE_REG (0x20b)
 3084 
 3085 #define XLMAC_CTRL (0x600)
 3086 #define XLMAC_MODE (0x601)
 3087 #define XLMAC_RX_MAX_SIZE (0x608)
 3088 #define XLMAC_TX_CTRL (0x604)
 3089 #define XLMAC_PAUSE_CTRL (0x60d)
 3090 #define XLMAC_PFC_CTRL (0x60e)
 3091 
 3092 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
 3093                                     struct ecore_ptt *p_ptt)
 3094 {
 3095         u8 loopback = 0, port = p_hwfn->port_id * 2;
 3096 
 3097         DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
 3098 
 3099         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG,
 3100                          (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */
 3101         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
 3102         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
 3103                          0x40, 0, port); /*XLMAC: SOFT RESET */
 3104         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE,
 3105                          0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */
 3106         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE,
 3107                          0x3fff, 0, port); /* XLMAC: Max Size */
 3108         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
 3109                          0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
 3110                          0, port);
 3111         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL,
 3112                          0x7c000, 0, port);
 3113         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
 3114                          0x30ffffc000ULL, 0, port);
 3115         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2),
 3116                          0, port); /* XLMAC: TX_EN, RX_EN */
 3117         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
 3118                          0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
 3119         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG,
 3120                          1, 0, port); /* Enabled Parallel PFC interface */
 3121         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG,
 3122                          0xf, 1, port); /* XLPORT port enable */
 3123 }
 3124 
 3125 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
 3126                                        struct ecore_ptt *p_ptt)
 3127 {
 3128         u8 port = p_hwfn->port_id;
 3129         u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
 3130 
 3131         DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
 3132 
 3133         ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
 3134                  (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
 3135                  (port <<
 3136                   CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
 3137                  (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
 3138 
 3139         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
 3140                  1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
 3141 
 3142         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
 3143                  9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
 3144 
 3145         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
 3146                  0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
 3147 
 3148         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
 3149                  8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
 3150 
 3151         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
 3152                  (0xA <<
 3153                   ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
 3154                  (8 <<
 3155                   ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
 3156 
 3157         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
 3158                  0xa853);
 3159 }
 3160 
 3161 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
 3162                                  struct ecore_ptt *p_ptt)
 3163 {
 3164         if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev))
 3165                 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
 3166         else /* BB */
 3167                 ecore_emul_link_init_bb(p_hwfn, p_ptt);
 3168 
 3169         return;
 3170 }
 3171 
 3172 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
 3173                                struct ecore_ptt *p_ptt,  u8 port)
 3174 {
 3175         int port_offset = port ? 0x800 : 0;
 3176         u32 xmac_rxctrl = 0;
 3177 
 3178         /* Reset of XMAC */
 3179         /* FIXME: move to common start */
 3180         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32),
 3181                  MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
 3182         OSAL_MSLEEP(1);
 3183         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
 3184                  MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
 3185 
 3186         ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
 3187 
 3188         /* Set the number of ports on the Warp Core to 10G */
 3189         ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
 3190 
 3191         /* Soft reset of XMAC */
 3192         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
 3193                  MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
 3194         OSAL_MSLEEP(1);
 3195         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
 3196                  MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
 3197 
 3198         /* FIXME: move to common end */
 3199         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
 3200                 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
 3201 
 3202         /* Set Max packet size: initialize XMAC block register for port 0 */
 3203         ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
 3204 
 3205         /* CRC append for Tx packets: init XMAC block register for port 1 */
 3206         ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
 3207 
 3208         /* Enable TX and RX: initialize XMAC block register for port 1 */
 3209         ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
 3210                  XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
 3211         xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
 3212                                XMAC_REG_RX_CTRL_BB + port_offset);
 3213         xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
 3214         ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
 3215 }
 3216 #endif
 3217 
 3218 static enum _ecore_status_t
 3219 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
 3220                        struct ecore_ptt *p_ptt,
 3221                        u32 pwm_region_size,
 3222                        u32 n_cpus)
 3223 {
 3224         u32 dpi_bit_shift, dpi_count, dpi_page_size;
 3225         u32 min_dpis;
 3226         u32 n_wids;
 3227 
 3228         /* Calculate DPI size
 3229          * ------------------
 3230          * The PWM region contains Doorbell Pages. The first is reserverd for
 3231          * the kernel for, e.g, L2. The others are free to be used by non-
 3232          * trusted applications, typically from user space. Each page, called a
 3233          * doorbell page is sectioned into windows that allow doorbells to be
 3234          * issued in parallel by the kernel/application. The size of such a
 3235          * window (a.k.a. WID) is 1kB.
 3236          * Summary:
 3237          *    1kB WID x N WIDS = DPI page size
 3238          *    DPI page size x N DPIs = PWM region size
 3239          * Notes:
 3240          * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
 3241          * in order to ensure that two applications won't share the same page.
 3242          * It also must contain at least one WID per CPU to allow parallelism.
 3243          * It also must be a power of 2, since it is stored as a bit shift.
 3244          *
 3245          * The DPI page size is stored in a register as 'dpi_bit_shift' so that
 3246          * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
 3247          * containing 4 WIDs.
 3248          */
 3249         n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
 3250         dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
 3251         dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & ~(OSAL_PAGE_SIZE - 1);
 3252         dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
 3253         dpi_count = pwm_region_size / dpi_page_size;
 3254 
 3255         min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
 3256         min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
 3257 
 3258         /* Update hwfn */
 3259         p_hwfn->dpi_size = dpi_page_size;
 3260         p_hwfn->dpi_count = dpi_count;
 3261 
 3262         /* Update registers */
 3263         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
 3264 
 3265         if (dpi_count < min_dpis)
 3266                 return ECORE_NORESOURCES;
 3267 
 3268         return ECORE_SUCCESS;
 3269 }
 3270 
 3271 enum ECORE_ROCE_EDPM_MODE {
 3272         ECORE_ROCE_EDPM_MODE_ENABLE     = 0,
 3273         ECORE_ROCE_EDPM_MODE_FORCE_ON   = 1,
 3274         ECORE_ROCE_EDPM_MODE_DISABLE    = 2,
 3275 };
 3276 
 3277 static enum _ecore_status_t
 3278 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
 3279                               struct ecore_ptt *p_ptt)
 3280 {
 3281         struct ecore_rdma_pf_params *p_rdma_pf_params;
 3282         u32 pwm_regsize, norm_regsize;
 3283         u32 non_pwm_conn, min_addr_reg1;
 3284         u32 db_bar_size, n_cpus = 1;
 3285         u32 roce_edpm_mode;
 3286         u32 pf_dems_shift;
 3287         enum _ecore_status_t rc = ECORE_SUCCESS;
 3288         u8 cond;
 3289 
 3290         db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
 3291         if (ECORE_IS_CMT(p_hwfn->p_dev))
 3292                 db_bar_size /= 2;
 3293 
 3294         /* Calculate doorbell regions
 3295          * -----------------------------------
 3296          * The doorbell BAR is made of two regions. The first is called normal
 3297          * region and the second is called PWM region. In the normal region
 3298          * each ICID has its own set of addresses so that writing to that
 3299          * specific address identifies the ICID. In the Process Window Mode
 3300          * region the ICID is given in the data written to the doorbell. The
 3301          * above per PF register denotes the offset in the doorbell BAR in which
 3302          * the PWM region begins.
 3303          * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
 3304          * non-PWM connection. The calculation below computes the total non-PWM
 3305          * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
 3306          * in units of 4,096 bytes.
 3307          */
 3308         non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
 3309                        ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
 3310                                                      OSAL_NULL) +
 3311                        ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
 3312                                                      OSAL_NULL);
 3313         norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, OSAL_PAGE_SIZE);
 3314         min_addr_reg1 = norm_regsize / 4096;
 3315         pwm_regsize = db_bar_size - norm_regsize;
 3316 
 3317         /* Check that the normal and PWM sizes are valid */
 3318         if (db_bar_size < norm_regsize) {
 3319                 DP_ERR(p_hwfn->p_dev,
 3320                        "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
 3321                        db_bar_size, norm_regsize);
 3322                 return ECORE_NORESOURCES;
 3323         }
 3324         if (pwm_regsize < ECORE_MIN_PWM_REGION) {
 3325                 DP_ERR(p_hwfn->p_dev,
 3326                        "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
 3327                        pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
 3328                        norm_regsize);
 3329                 return ECORE_NORESOURCES;
 3330         }
 3331 
 3332         p_rdma_pf_params = &p_hwfn->pf_params.rdma_pf_params;
 3333 
 3334         /* Calculate number of DPIs */
 3335         if (ECORE_IS_IWARP_PERSONALITY(p_hwfn))
 3336                 p_rdma_pf_params->roce_edpm_mode =  ECORE_ROCE_EDPM_MODE_DISABLE;
 3337 
 3338         if (p_rdma_pf_params->roce_edpm_mode <= ECORE_ROCE_EDPM_MODE_DISABLE) {
 3339                 roce_edpm_mode = p_rdma_pf_params->roce_edpm_mode;
 3340         } else {
 3341                 DP_ERR(p_hwfn->p_dev,
 3342                        "roce edpm mode was configured to an illegal value of %u. Resetting it to 0-Enable EDPM if BAR size is adequate\n",
 3343                        p_rdma_pf_params->roce_edpm_mode);
 3344                 roce_edpm_mode = 0;
 3345         }
 3346 
 3347         if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
 3348             ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
 3349                 /* Either EDPM is mandatory, or we are attempting to allocate a
 3350                  * WID per CPU.
 3351                  */
 3352                 n_cpus = OSAL_NUM_CPUS();
 3353                 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
 3354         }
 3355 
 3356         cond = ((rc != ECORE_SUCCESS) &&
 3357                 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
 3358                 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
 3359         if (cond || p_hwfn->dcbx_no_edpm) {
 3360                 /* Either EDPM is disabled from user configuration, or it is
 3361                  * disabled via DCBx, or it is not mandatory and we failed to
 3362                  * allocated a WID per CPU.
 3363                  */
 3364                 n_cpus = 1;
 3365                 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
 3366 
 3367 #ifdef CONFIG_ECORE_ROCE
 3368                 /* If we entered this flow due to DCBX then the DPM register is
 3369                  * already configured.
 3370                  */
 3371                 if (cond)
 3372                         ecore_rdma_dpm_bar(p_hwfn, p_ptt);
 3373 #endif
 3374         }
 3375 
 3376         p_hwfn->wid_count = (u16)n_cpus;
 3377 
 3378         /* Check return codes from above calls */
 3379         if (rc != ECORE_SUCCESS) {
 3380 #ifndef LINUX_REMOVE
 3381                 DP_ERR(p_hwfn,
 3382                        "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via user configuration min_dpis or by disabling EDPM via user configuration roce_edpm_mode\n",
 3383                        p_hwfn->dpi_count, p_rdma_pf_params->min_dpis,
 3384                        ECORE_MIN_DPIS);
 3385 #else
 3386                 DP_ERR(p_hwfn,
 3387                        "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via the module parameter min_rdma_dpis or by disabling EDPM by setting the module parameter roce_edpm to 2\n",
 3388                        p_hwfn->dpi_count, p_rdma_pf_params->min_dpis,
 3389                        ECORE_MIN_DPIS);
 3390 #endif
 3391                 DP_ERR(p_hwfn,
 3392                        "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
 3393                        norm_regsize, pwm_regsize, p_hwfn->dpi_size,
 3394                        p_hwfn->dpi_count,
 3395                        ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
 3396                        "disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE);
 3397 
 3398                 return ECORE_NORESOURCES;
 3399         }
 3400 
 3401         DP_INFO(p_hwfn,
 3402                 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
 3403                 norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count,
 3404                 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
 3405                 "disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE);
 3406 
 3407         /* Update hwfn */
 3408         p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
 3409                                                       * calculate the doorbell
 3410                                                       * address
 3411                                                       */
 3412 
 3413         /* Update registers */
 3414         /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
 3415         pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
 3416         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
 3417         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
 3418 
 3419         return ECORE_SUCCESS;
 3420 }
 3421 
 3422 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
 3423                                                struct ecore_ptt *p_ptt,
 3424                                                int hw_mode)
 3425 {
 3426         enum _ecore_status_t rc = ECORE_SUCCESS;
 3427 
 3428         /* In CMT the gate should be cleared by the 2nd hwfn */
 3429         if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
 3430                 STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
 3431 
 3432         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
 3433                             hw_mode);
 3434         if (rc != ECORE_SUCCESS)
 3435                 return rc;
 3436 
 3437         ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
 3438 
 3439 #ifndef ASIC_ONLY
 3440         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
 3441                 return ECORE_SUCCESS;
 3442 
 3443         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
 3444                 if (ECORE_IS_AH(p_hwfn->p_dev))
 3445                         return ECORE_SUCCESS;
 3446                 else if (ECORE_IS_BB(p_hwfn->p_dev))
 3447                         ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
 3448                 else /* E5 */
 3449                         ECORE_E5_MISSING_CODE;
 3450         } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
 3451                 if (ECORE_IS_CMT(p_hwfn->p_dev)) {
 3452                         /* Activate OPTE in CMT */
 3453                         u32 val;
 3454 
 3455                         val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
 3456                         val |= 0x10;
 3457                         ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
 3458                         ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
 3459                         ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
 3460                         ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
 3461                         ecore_wr(p_hwfn, p_ptt,
 3462                                  NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
 3463                         ecore_wr(p_hwfn, p_ptt,
 3464                                  NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
 3465                         ecore_wr(p_hwfn, p_ptt,
 3466                                  NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
 3467                                  0x55555555);
 3468                 }
 3469 
 3470                 ecore_emul_link_init(p_hwfn, p_ptt);
 3471         } else {
 3472                 DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
 3473         }
 3474 #endif
 3475 
 3476         return rc;
 3477 }
 3478 
 3479 static enum _ecore_status_t
 3480 ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 3481                  int hw_mode, struct ecore_hw_init_params *p_params)
 3482 {
 3483         u8 rel_pf_id = p_hwfn->rel_pf_id;
 3484         u32 prs_reg;
 3485         enum _ecore_status_t rc = ECORE_SUCCESS;
 3486         u16 ctrl;
 3487         int pos;
 3488 
 3489         if (p_hwfn->mcp_info) {
 3490                 struct ecore_mcp_function_info *p_info;
 3491 
 3492                 p_info = &p_hwfn->mcp_info->func_info;
 3493                 if (p_info->bandwidth_min)
 3494                         p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
 3495 
 3496                 /* Update rate limit once we'll actually have a link */
 3497                 p_hwfn->qm_info.pf_rl = 100000;
 3498         }
 3499         ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
 3500 
 3501         ecore_int_igu_init_rt(p_hwfn);
 3502 
 3503         /* Set VLAN in NIG if needed */
 3504         if (hw_mode & (1 << MODE_MF_SD)) {
 3505                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
 3506                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
 3507                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
 3508                              p_hwfn->hw_info.ovlan);
 3509 
 3510                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
 3511                            "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
 3512                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
 3513                              1);
 3514         }
 3515 
 3516         /* Enable classification by MAC if needed */
 3517         if (hw_mode & (1 << MODE_MF_SI)) {
 3518                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n");
 3519                 STORE_RT_REG(p_hwfn,
 3520                              NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
 3521         }
 3522 
 3523         /* Protocl Configuration  - @@@TBD - should we set 0 otherwise?*/
 3524         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
 3525                      (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
 3526         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
 3527                      (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0);
 3528         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
 3529 
 3530         /* perform debug configuration when chip is out of reset */
 3531         OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
 3532 
 3533         /* Sanity check before the PF init sequence that uses DMAE */
 3534         rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
 3535         if (rc)
 3536                 return rc;
 3537 
 3538         /* PF Init sequence */
 3539         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
 3540         if (rc)
 3541                 return rc;
 3542 
 3543         /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
 3544         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
 3545         if (rc)
 3546                 return rc;
 3547 
 3548         /* Pure runtime initializations - directly to the HW  */
 3549         ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
 3550 
 3551         /* PCI relaxed ordering is generally beneficial for performance,
 3552          * but can hurt performance or lead to instability on some setups.
 3553          * If management FW is taking care of it go with that, otherwise
 3554          * disable to be on the safe side.
 3555          */
 3556         pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
 3557         if (!pos) {
 3558                 DP_NOTICE(p_hwfn, true,
 3559                           "Failed to find the PCI Express Capability structure in the PCI config space\n");
 3560                 return ECORE_IO;
 3561         }
 3562 
 3563         OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
 3564 
 3565         if (p_params->pci_rlx_odr_mode == ECORE_ENABLE_RLX_ODR) {
 3566                 ctrl |= PCI_EXP_DEVCTL_RELAX_EN;
 3567                 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
 3568                                            pos + PCI_EXP_DEVCTL, ctrl);
 3569         } else if (p_params->pci_rlx_odr_mode == ECORE_DISABLE_RLX_ODR) {
 3570                 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
 3571                 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
 3572                                            pos + PCI_EXP_DEVCTL, ctrl);
 3573         } else if (ecore_mcp_rlx_odr_supported(p_hwfn)) {
 3574                 DP_INFO(p_hwfn, "PCI relax ordering configured by MFW\n");
 3575         } else {
 3576                 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
 3577                 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
 3578                                            pos + PCI_EXP_DEVCTL, ctrl);
 3579         }
 3580 
 3581         rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
 3582         if (rc != ECORE_SUCCESS)
 3583                 return rc;
 3584 
 3585         /* Use the leading hwfn since in CMT only NIG #0 is operational */
 3586         if (IS_LEAD_HWFN(p_hwfn)) {
 3587                 rc = ecore_llh_hw_init_pf(p_hwfn, p_ptt,
 3588                                           p_params->avoid_eng_affin);
 3589                 if (rc != ECORE_SUCCESS)
 3590                         return rc;
 3591         }
 3592 
 3593         if (p_params->b_hw_start) {
 3594                 /* enable interrupts */
 3595                 rc = ecore_int_igu_enable(p_hwfn, p_ptt, p_params->int_mode);
 3596                 if (rc != ECORE_SUCCESS)
 3597                         return rc;
 3598 
 3599                 /* send function start command */
 3600                 rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn,
 3601                                        p_params->allow_npar_tx_switch);
 3602                 if (rc) {
 3603                         DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n");
 3604                         return rc;
 3605                 }
 3606                 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
 3607                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3608                                 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
 3609 
 3610                 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
 3611                 {
 3612                         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
 3613                                         (1 << 2));
 3614                         ecore_wr(p_hwfn, p_ptt,
 3615                                         PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
 3616                                         0x100);
 3617                 }
 3618                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3619                                 "PRS_REG_SEARCH registers after start PFn\n");
 3620                 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
 3621                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3622                                 "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
 3623                 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
 3624                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3625                                 "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
 3626                 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
 3627                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3628                                 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
 3629                 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
 3630                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3631                                 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
 3632                 prs_reg = ecore_rd(p_hwfn, p_ptt,
 3633                                 PRS_REG_SEARCH_TCP_FIRST_FRAG);
 3634                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3635                                 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
 3636                                 prs_reg);
 3637                 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
 3638                 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
 3639                                 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
 3640         }
 3641         return ECORE_SUCCESS;
 3642 }
 3643 
 3644 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
 3645                                                   struct ecore_ptt *p_ptt,
 3646                                                   bool b_enable)
 3647 {
 3648         u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
 3649 
 3650         /* Configure the PF's internal FID_enable for master transactions */
 3651         ecore_wr(p_hwfn, p_ptt,
 3652                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
 3653 
 3654         /* Wait until value is set - try for 1 second every 50us */
 3655         for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
 3656                 val = ecore_rd(p_hwfn, p_ptt,
 3657                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
 3658                 if (val == set_val)
 3659                         break;
 3660 
 3661                 OSAL_UDELAY(50);
 3662         }
 3663 
 3664         if (val != set_val) {
 3665                 DP_NOTICE(p_hwfn, true,
 3666                           "PFID_ENABLE_MASTER wasn't changed after a second\n");
 3667                 return ECORE_UNKNOWN_ERROR;
 3668         }
 3669 
 3670         return ECORE_SUCCESS;
 3671 }
 3672 
 3673 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
 3674                         struct ecore_ptt *p_main_ptt)
 3675 {
 3676         /* Read shadow of current MFW mailbox */
 3677         ecore_mcp_read_mb(p_hwfn, p_main_ptt);
 3678         OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
 3679                     p_hwfn->mcp_info->mfw_mb_cur,
 3680                     p_hwfn->mcp_info->mfw_mb_length);
 3681 }
 3682 
 3683 static enum _ecore_status_t
 3684 ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
 3685                            struct ecore_load_req_params *p_load_req,
 3686                            struct ecore_drv_load_params *p_drv_load)
 3687 {
 3688         /* Make sure that if ecore-client didn't provide inputs, all the
 3689          * expected defaults are indeed zero.
 3690          */
 3691         OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0);
 3692         OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0);
 3693         OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0);
 3694 
 3695         OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
 3696 
 3697         if (p_drv_load == OSAL_NULL)
 3698                 goto out;
 3699 
 3700         p_load_req->drv_role = p_drv_load->is_crash_kernel ?
 3701                                ECORE_DRV_ROLE_KDUMP :
 3702                                ECORE_DRV_ROLE_OS;
 3703         p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
 3704         p_load_req->override_force_load = p_drv_load->override_force_load;
 3705 
 3706         /* Old MFW versions don't support timeout values other than default and
 3707          * none, so these values are replaced according to the fall-back action.
 3708          */
 3709 
 3710         if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT ||
 3711             p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE ||
 3712             (p_hwfn->mcp_info->capabilities &
 3713              FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) {
 3714                 p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
 3715                 goto out;
 3716         }
 3717 
 3718         switch (p_drv_load->mfw_timeout_fallback) {
 3719         case ECORE_TO_FALLBACK_TO_NONE:
 3720                 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE;
 3721                 break;
 3722         case ECORE_TO_FALLBACK_TO_DEFAULT:
 3723                 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
 3724                 break;
 3725         case ECORE_TO_FALLBACK_FAIL_LOAD:
 3726                 DP_NOTICE(p_hwfn, false,
 3727                           "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n",
 3728                           p_drv_load->mfw_timeout_val,
 3729                           ECORE_LOAD_REQ_LOCK_TO_DEFAULT,
 3730                           ECORE_LOAD_REQ_LOCK_TO_NONE);
 3731                 return ECORE_ABORTED;
 3732         }
 3733 
 3734         DP_INFO(p_hwfn,
 3735                 "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n",
 3736                 p_drv_load->mfw_timeout_val,
 3737                 (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ?
 3738                 "default" : "none",
 3739                 p_load_req->timeout_val);
 3740 out:
 3741         return ECORE_SUCCESS;
 3742 }
 3743 
 3744 static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
 3745                                     struct ecore_hw_init_params *p_params)
 3746 {
 3747         if (p_params->p_tunn) {
 3748                 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
 3749                 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
 3750         }
 3751 
 3752         p_hwfn->b_int_enabled = 1;
 3753 
 3754         return ECORE_SUCCESS;
 3755 }
 3756 
 3757 static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
 3758                                    struct ecore_ptt *p_ptt)
 3759 {
 3760         ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
 3761                  1 << p_hwfn->abs_pf_id);
 3762 }
 3763 
 3764 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 3765                                    struct ecore_hw_init_params *p_params)
 3766 {
 3767         struct ecore_load_req_params load_req_params;
 3768         u32 load_code, resp, param, drv_mb_param;
 3769         bool b_default_mtu = true;
 3770         struct ecore_hwfn *p_hwfn;
 3771         enum _ecore_status_t rc = ECORE_SUCCESS, cancel_load;
 3772         u16 ether_type;
 3773         int i;
 3774 
 3775         if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
 3776                 DP_NOTICE(p_dev, false,
 3777                           "MSI mode is not supported for CMT devices\n");
 3778                 return ECORE_INVAL;
 3779         }
 3780 
 3781         if (IS_PF(p_dev)) {
 3782                 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data);
 3783                 if (rc != ECORE_SUCCESS)
 3784                         return rc;
 3785         }
 3786 
 3787         for_each_hwfn(p_dev, i) {
 3788                 p_hwfn = &p_dev->hwfns[i];
 3789 
 3790                 /* If management didn't provide a default, set one of our own */
 3791                 if (!p_hwfn->hw_info.mtu) {
 3792                         p_hwfn->hw_info.mtu = 1500;
 3793                         b_default_mtu = false;
 3794                 }
 3795 
 3796                 if (IS_VF(p_dev)) {
 3797                         ecore_vf_start(p_hwfn, p_params);
 3798                         continue;
 3799                 }
 3800 
 3801                 rc = ecore_calc_hw_mode(p_hwfn);
 3802                 if (rc != ECORE_SUCCESS)
 3803                         return rc;
 3804 
 3805                 if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
 3806                                                    &p_dev->mf_bits) ||
 3807                                      OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
 3808                                                    &p_dev->mf_bits))) {
 3809                         if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
 3810                                           &p_dev->mf_bits))
 3811                                 ether_type = ETH_P_8021Q;
 3812                         else
 3813                                 ether_type = ETH_P_8021AD;
 3814                         STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
 3815                                      ether_type);
 3816                         STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
 3817                                      ether_type);
 3818                         STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
 3819                                      ether_type);
 3820                         STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
 3821                                      ether_type);
 3822                 }
 3823 
 3824                 rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
 3825                                                 p_params->p_drv_load_params);
 3826                 if (rc != ECORE_SUCCESS)
 3827                         return rc;
 3828 
 3829                 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
 3830                                         &load_req_params);
 3831                 if (rc != ECORE_SUCCESS) {
 3832                         DP_NOTICE(p_hwfn, false,
 3833                                   "Failed sending a LOAD_REQ command\n");
 3834                         return rc;
 3835                 }
 3836 
 3837                 load_code = load_req_params.load_code;
 3838                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 3839                            "Load request was sent. Load code: 0x%x\n",
 3840                            load_code);
 3841 
 3842                 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
 3843 
 3844                 /* CQ75580:
 3845                  * When coming back from hibernate state, the registers from
 3846                  * which shadow is read initially are not initialized. It turns
 3847                  * out that these registers get initialized during the call to
 3848                  * ecore_mcp_load_req request. So we need to reread them here
 3849                  * to get the proper shadow register value.
 3850                  * Note: This is a workaround for the missing MFW
 3851                  * initialization. It may be removed once the implementation
 3852                  * is done.
 3853                  */
 3854                 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
 3855 
 3856                 /* Only relevant for recovery:
 3857                  * Clear the indication after the LOAD_REQ command is responded
 3858                  * by the MFW.
 3859                  */
 3860                 p_dev->recov_in_prog = false;
 3861 
 3862                 if (!qm_lock_ref_cnt) {
 3863 #ifdef CONFIG_ECORE_LOCK_ALLOC
 3864                         rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
 3865                         if (rc) {
 3866                                 DP_ERR(p_hwfn, "qm_lock allocation failed\n");
 3867                                 goto qm_lock_fail;
 3868                         }
 3869 #endif
 3870                         OSAL_SPIN_LOCK_INIT(&qm_lock);
 3871                 }
 3872                 ++qm_lock_ref_cnt;
 3873 
 3874                 /* Clean up chip from previous driver if such remains exist.
 3875                  * This is not needed when the PF is the first one on the
 3876                  * engine, since afterwards we are going to init the FW.
 3877                  */
 3878                 if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
 3879                         rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
 3880                                                  p_hwfn->rel_pf_id, false);
 3881                         if (rc != ECORE_SUCCESS) {
 3882                                 ecore_hw_err_notify(p_hwfn,
 3883                                                     ECORE_HW_ERR_RAMROD_FAIL);
 3884                                 goto load_err;
 3885                         }
 3886                 }
 3887 
 3888                 /* Log and clear previous pglue_b errors if such exist */
 3889                 ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
 3890 
 3891                 /* Enable the PF's internal FID_enable in the PXP */
 3892                 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
 3893                                                   true);
 3894                 if (rc != ECORE_SUCCESS)
 3895                         goto load_err;
 3896 
 3897                 /* Clear the pglue_b was_error indication.
 3898                  * In E4 it must be done after the BME and the internal
 3899                  * FID_enable for the PF are set, since VDMs may cause the
 3900                  * indication to be set again.
 3901                  */
 3902                 ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
 3903 
 3904                 switch (load_code) {
 3905                 case FW_MSG_CODE_DRV_LOAD_ENGINE:
 3906                         rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
 3907                                                   p_hwfn->hw_info.hw_mode);
 3908                         if (rc != ECORE_SUCCESS)
 3909                                 break;
 3910                         /* Fall into */
 3911                 case FW_MSG_CODE_DRV_LOAD_PORT:
 3912                         rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
 3913                                                 p_hwfn->hw_info.hw_mode);
 3914                         if (rc != ECORE_SUCCESS)
 3915                                 break;
 3916                         /* Fall into */
 3917                 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
 3918                         rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
 3919                                               p_hwfn->hw_info.hw_mode,
 3920                                               p_params);
 3921                         break;
 3922                 default:
 3923                         DP_NOTICE(p_hwfn, false,
 3924                                   "Unexpected load code [0x%08x]", load_code);
 3925                         rc = ECORE_NOTIMPL;
 3926                         break;
 3927                 }
 3928 
 3929                 if (rc != ECORE_SUCCESS) {
 3930                         DP_NOTICE(p_hwfn, false,
 3931                                   "init phase failed for loadcode 0x%x (rc %d)\n",
 3932                                   load_code, rc);
 3933                         goto load_err;
 3934                 }
 3935 
 3936                 rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
 3937                 if (rc != ECORE_SUCCESS) {
 3938                         DP_NOTICE(p_hwfn, false, "Sending load done failed, rc = %d\n", rc);
 3939                         if (rc == ECORE_NOMEM) {
 3940                                 DP_NOTICE(p_hwfn, false,
 3941                                           "Sending load done was failed due to memory allocation failure\n");
 3942                                 goto load_err;
 3943                         }
 3944                         return rc;
 3945                 }
 3946 
 3947                 /* send DCBX attention request command */
 3948                 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
 3949                            "sending phony dcbx set command to trigger DCBx attention handling\n");
 3950                 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
 3951                                    DRV_MSG_CODE_SET_DCBX,
 3952                                    1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
 3953                                    &param);
 3954                 if (rc != ECORE_SUCCESS) {
 3955                         DP_NOTICE(p_hwfn, false,
 3956                                   "Failed to send DCBX attention request\n");
 3957                         return rc;
 3958                 }
 3959 
 3960                 p_hwfn->hw_init_done = true;
 3961         }
 3962 
 3963         if (IS_PF(p_dev)) {
 3964                 /* Get pre-negotiated values for stag, bandwidth etc. */
 3965                 p_hwfn = ECORE_LEADING_HWFN(p_dev);
 3966                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
 3967                            "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
 3968                 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
 3969                                    DRV_MSG_CODE_GET_OEM_UPDATES,
 3970                                    1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
 3971                                    &resp, &param);
 3972                 if (rc != ECORE_SUCCESS)
 3973                         DP_NOTICE(p_hwfn, false,
 3974                                   "Failed to send GET_OEM_UPDATES attention request\n");
 3975         }
 3976 
 3977         if (IS_PF(p_dev)) {
 3978                 p_hwfn = ECORE_LEADING_HWFN(p_dev);
 3979                 drv_mb_param = STORM_FW_VERSION;
 3980                 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
 3981                                    DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
 3982                                    drv_mb_param, &resp, &param);
 3983                 if (rc != ECORE_SUCCESS)
 3984                         DP_INFO(p_hwfn, "Failed to update firmware version\n");
 3985 
 3986                 if (!b_default_mtu) {
 3987                         rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
 3988                                                       p_hwfn->hw_info.mtu);
 3989                         if (rc != ECORE_SUCCESS)
 3990                                 DP_INFO(p_hwfn, "Failed to update default mtu\n");
 3991                 }
 3992 
 3993                 rc = ecore_mcp_ov_update_driver_state(p_hwfn,
 3994                                                       p_hwfn->p_main_ptt,
 3995                                                       ECORE_OV_DRIVER_STATE_DISABLED);
 3996                 if (rc != ECORE_SUCCESS)
 3997                         DP_INFO(p_hwfn, "Failed to update driver state\n");
 3998 
 3999                 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
 4000                                                  ECORE_OV_ESWITCH_VEB);
 4001                 if (rc != ECORE_SUCCESS)
 4002                         DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
 4003         }
 4004 
 4005         return rc;
 4006 
 4007 load_err:
 4008         --qm_lock_ref_cnt;
 4009 #ifdef CONFIG_ECORE_LOCK_ALLOC
 4010         if (!qm_lock_ref_cnt)
 4011                 OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
 4012 qm_lock_fail:
 4013 #endif
 4014         /* The MFW load lock should be released also when initialization fails.
 4015          * If supported, use a cancel_load request to update the MFW with the
 4016          * load failure.
 4017          */
 4018         cancel_load = ecore_mcp_cancel_load_req(p_hwfn, p_hwfn->p_main_ptt);
 4019         if (cancel_load == ECORE_NOTIMPL) {
 4020                 DP_INFO(p_hwfn,
 4021                         "Send a load done request instead of cancel load\n");
 4022                 ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
 4023         }
 4024         return rc;
 4025 }
 4026 
 4027 #define ECORE_HW_STOP_RETRY_LIMIT       (10)
 4028 static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
 4029                                  struct ecore_hwfn *p_hwfn,
 4030                                  struct ecore_ptt *p_ptt)
 4031 {
 4032         int i;
 4033 
 4034         /* close timers */
 4035         ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
 4036         ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
 4037         for (i = 0;
 4038              i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
 4039              i++) {
 4040                 if ((!ecore_rd(p_hwfn, p_ptt,
 4041                                TM_REG_PF_SCAN_ACTIVE_CONN)) &&
 4042                     (!ecore_rd(p_hwfn, p_ptt,
 4043                                TM_REG_PF_SCAN_ACTIVE_TASK)))
 4044                         break;
 4045 
 4046                 /* Dependent on number of connection/tasks, possibly
 4047                  * 1ms sleep is required between polls
 4048                  */
 4049                 OSAL_MSLEEP(1);
 4050         }
 4051 
 4052         if (i < ECORE_HW_STOP_RETRY_LIMIT)
 4053                 return;
 4054 
 4055         DP_NOTICE(p_hwfn, false,
 4056                   "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
 4057                   (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
 4058                   (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
 4059 }
 4060 
 4061 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
 4062 {
 4063         int j;
 4064 
 4065         for_each_hwfn(p_dev, j) {
 4066                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
 4067                 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
 4068 
 4069                 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
 4070         }
 4071 }
 4072 
 4073 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
 4074                                                  struct ecore_ptt *p_ptt,
 4075                                                  u32 addr, u32 expected_val)
 4076 {
 4077         u32 val = ecore_rd(p_hwfn, p_ptt, addr);
 4078 
 4079         if (val != expected_val) {
 4080                 DP_NOTICE(p_hwfn, true,
 4081                           "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
 4082                           addr, val, expected_val);
 4083                 return ECORE_UNKNOWN_ERROR;
 4084         }
 4085 
 4086         return ECORE_SUCCESS;
 4087 }
 4088 
 4089 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
 4090 {
 4091         struct ecore_hwfn *p_hwfn;
 4092         struct ecore_ptt *p_ptt;
 4093         enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
 4094         int j;
 4095 
 4096         for_each_hwfn(p_dev, j) {
 4097                 p_hwfn = &p_dev->hwfns[j];
 4098                 p_ptt = p_hwfn->p_main_ptt;
 4099 
 4100                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
 4101 
 4102                 if (IS_VF(p_dev)) {
 4103                         ecore_vf_pf_int_cleanup(p_hwfn);
 4104                         rc = ecore_vf_pf_reset(p_hwfn);
 4105                         if (rc != ECORE_SUCCESS) {
 4106                                 DP_NOTICE(p_hwfn, true,
 4107                                           "ecore_vf_pf_reset failed. rc = %d.\n",
 4108                                           rc);
 4109                                 rc2 = ECORE_UNKNOWN_ERROR;
 4110                         }
 4111                         continue;
 4112                 }
 4113 
 4114                 /* mark the hw as uninitialized... */
 4115                 p_hwfn->hw_init_done = false;
 4116 
 4117                 /* Send unload command to MCP */
 4118                 if (!p_dev->recov_in_prog) {
 4119                         rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
 4120                         if (rc != ECORE_SUCCESS) {
 4121                                 DP_NOTICE(p_hwfn, false,
 4122                                           "Failed sending a UNLOAD_REQ command. rc = %d.\n",
 4123                                           rc);
 4124                                 rc2 = ECORE_UNKNOWN_ERROR;
 4125                         }
 4126                 }
 4127 
 4128                 OSAL_DPC_SYNC(p_hwfn);
 4129 
 4130                 /* After this point no MFW attentions are expected, e.g. prevent
 4131                  * race between pf stop and dcbx pf update.
 4132                  */
 4133 
 4134                 rc = ecore_sp_pf_stop(p_hwfn);
 4135                 if (rc != ECORE_SUCCESS) {
 4136                         DP_NOTICE(p_hwfn, false,
 4137                                   "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
 4138                                   rc);
 4139                         rc2 = ECORE_UNKNOWN_ERROR;
 4140                 }
 4141 
 4142                 /* perform debug action after PF stop was sent */
 4143                 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
 4144 
 4145                 /* close NIG to BRB gate */
 4146                 ecore_wr(p_hwfn, p_ptt,
 4147                          NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
 4148 
 4149                 /* close parser */
 4150                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
 4151                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
 4152                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
 4153                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
 4154                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
 4155 
 4156                 /* @@@TBD - clean transmission queues (5.b) */
 4157                 /* @@@TBD - clean BTB (5.c) */
 4158 
 4159                 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
 4160 
 4161                 /* @@@TBD - verify DMAE requests are done (8) */
 4162 
 4163                 /* Disable Attention Generation */
 4164                 ecore_int_igu_disable_int(p_hwfn, p_ptt);
 4165                 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
 4166                 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
 4167                 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
 4168                 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
 4169                 if (rc != ECORE_SUCCESS) {
 4170                         DP_NOTICE(p_hwfn, true,
 4171                                   "Failed to return IGU CAM to default\n");
 4172                         rc2 = ECORE_UNKNOWN_ERROR;
 4173                 }
 4174 
 4175                 /* Need to wait 1ms to guarantee SBs are cleared */
 4176                 OSAL_MSLEEP(1);
 4177 
 4178                 if (!p_dev->recov_in_prog) {
 4179                         ecore_verify_reg_val(p_hwfn, p_ptt,
 4180                                              QM_REG_USG_CNT_PF_TX, 0);
 4181                         ecore_verify_reg_val(p_hwfn, p_ptt,
 4182                                              QM_REG_USG_CNT_PF_OTHER, 0);
 4183                         /* @@@TBD - assert on incorrect xCFC values (10.b) */
 4184                 }
 4185 
 4186                 /* Disable PF in HW blocks */
 4187                 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
 4188                 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
 4189 
 4190                 if (IS_LEAD_HWFN(p_hwfn) &&
 4191                     OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
 4192                     !ECORE_IS_FCOE_PERSONALITY(p_hwfn))
 4193                         ecore_llh_remove_mac_filter(p_dev, 0,
 4194                                                     p_hwfn->hw_info.hw_mac_addr);
 4195 
 4196                 --qm_lock_ref_cnt;
 4197 #ifdef CONFIG_ECORE_LOCK_ALLOC
 4198                 if (!qm_lock_ref_cnt)
 4199                         OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
 4200 #endif
 4201 
 4202                 if (!p_dev->recov_in_prog) {
 4203                         rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
 4204                         if (rc == ECORE_NOMEM) {
 4205                                 DP_NOTICE(p_hwfn, false,
 4206                                          "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
 4207                                 rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
 4208                         }
 4209                         if (rc != ECORE_SUCCESS) {
 4210                                 DP_NOTICE(p_hwfn, false,
 4211                                           "Failed sending a UNLOAD_DONE command. rc = %d.\n",
 4212                                           rc);
 4213                                 rc2 = ECORE_UNKNOWN_ERROR;
 4214                         }
 4215                 }
 4216         } /* hwfn loop */
 4217 
 4218         if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
 4219                 p_hwfn = ECORE_LEADING_HWFN(p_dev);
 4220                 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
 4221 
 4222                  /* Clear the PF's internal FID_enable in the PXP.
 4223                   * In CMT this should only be done for first hw-function, and
 4224                   * only after all transactions have stopped for all active
 4225                   * hw-functions.
 4226                   */
 4227                 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
 4228                                                   false);
 4229                 if (rc != ECORE_SUCCESS) {
 4230                         DP_NOTICE(p_hwfn, true,
 4231                                   "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
 4232                                   rc);
 4233                         rc2 = ECORE_UNKNOWN_ERROR;
 4234                 }
 4235         }
 4236 
 4237         return rc2;
 4238 }
 4239 
 4240 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
 4241 {
 4242         int j;
 4243 
 4244         for_each_hwfn(p_dev, j) {
 4245                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
 4246                 struct ecore_ptt *p_ptt;
 4247 
 4248                 if (IS_VF(p_dev)) {
 4249                         ecore_vf_pf_int_cleanup(p_hwfn);
 4250                         continue;
 4251                 }
 4252                 p_ptt = ecore_ptt_acquire(p_hwfn);
 4253                 if (!p_ptt)
 4254                         return ECORE_AGAIN;
 4255 
 4256                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n");
 4257 
 4258                 ecore_wr(p_hwfn, p_ptt,
 4259                          NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
 4260 
 4261                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
 4262                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
 4263                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
 4264                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
 4265                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
 4266 
 4267                 /* @@@TBD - clean transmission queues (5.b) */
 4268                 /* @@@TBD - clean BTB (5.c) */
 4269 
 4270                 /* @@@TBD - verify DMAE requests are done (8) */
 4271 
 4272                 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
 4273                 /* Need to wait 1ms to guarantee SBs are cleared */
 4274                 OSAL_MSLEEP(1);
 4275                 ecore_ptt_release(p_hwfn, p_ptt);
 4276         }
 4277 
 4278         return ECORE_SUCCESS;
 4279 }
 4280 
 4281 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
 4282 {
 4283         struct ecore_ptt *p_ptt;
 4284 
 4285         if (IS_VF(p_hwfn->p_dev))
 4286                 return ECORE_SUCCESS;
 4287 
 4288         p_ptt = ecore_ptt_acquire(p_hwfn);
 4289         if (!p_ptt)
 4290                 return ECORE_AGAIN;
 4291 
 4292         /* If roce info is allocated it means roce is initialized and should
 4293          * be enabled in searcher.
 4294          */
 4295         if (p_hwfn->p_rdma_info &&
 4296             p_hwfn->p_rdma_info->active &&
 4297             p_hwfn->b_rdma_enabled_in_prs)
 4298                 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
 4299 
 4300         /* Re-open incoming traffic */
 4301         ecore_wr(p_hwfn, p_ptt,
 4302                  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
 4303         ecore_ptt_release(p_hwfn, p_ptt);
 4304 
 4305         return ECORE_SUCCESS;
 4306 }
 4307 
 4308 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, u32 reg_idx,
 4309                                         u32 pattern_size, u32 crc)
 4310 {
 4311         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 4312         enum _ecore_status_t rc = ECORE_SUCCESS;
 4313         struct ecore_ptt *p_ptt;
 4314         u32 reg_len = 0;
 4315         u32 reg_crc = 0;
 4316 
 4317         p_ptt = ecore_ptt_acquire(p_hwfn);
 4318         if (!p_ptt)
 4319                 return ECORE_AGAIN;
 4320 
 4321         /* Get length and CRC register offsets */
 4322         switch (reg_idx)
 4323         {
 4324         case 0:
 4325                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB :
 4326                                 WOL_REG_ACPI_PAT_0_LEN_K2_E5;
 4327                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB :
 4328                                 WOL_REG_ACPI_PAT_0_CRC_K2_E5;
 4329                 break;
 4330         case 1:
 4331                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB :
 4332                                 WOL_REG_ACPI_PAT_1_LEN_K2_E5;
 4333                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB :
 4334                                 WOL_REG_ACPI_PAT_1_CRC_K2_E5;
 4335                 break;
 4336         case 2:
 4337                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB :
 4338                                 WOL_REG_ACPI_PAT_2_LEN_K2_E5;
 4339                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB :
 4340                                 WOL_REG_ACPI_PAT_2_CRC_K2_E5;
 4341                 break;
 4342         case 3:
 4343                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB :
 4344                                 WOL_REG_ACPI_PAT_3_LEN_K2_E5;
 4345                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB :
 4346                                 WOL_REG_ACPI_PAT_3_CRC_K2_E5;
 4347                 break;
 4348         case 4:
 4349                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB :
 4350                                 WOL_REG_ACPI_PAT_4_LEN_K2_E5;
 4351                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB :
 4352                                 WOL_REG_ACPI_PAT_4_CRC_K2_E5;
 4353                 break;
 4354         case 5:
 4355                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB :
 4356                                 WOL_REG_ACPI_PAT_5_LEN_K2_E5;
 4357                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB :
 4358                                 WOL_REG_ACPI_PAT_5_CRC_K2_E5;
 4359                 break;
 4360         case 6:
 4361                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB :
 4362                                 WOL_REG_ACPI_PAT_6_LEN_K2_E5;
 4363                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB :
 4364                                 WOL_REG_ACPI_PAT_6_CRC_K2_E5;
 4365                 break;
 4366         case 7:
 4367                 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB :
 4368                                 WOL_REG_ACPI_PAT_7_LEN_K2_E5;
 4369                 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB :
 4370                                 WOL_REG_ACPI_PAT_7_CRC_K2_E5;
 4371                 break;
 4372         default:
 4373                 rc = ECORE_UNKNOWN_ERROR;
 4374                 goto out;
 4375         }
 4376 
 4377         /* Allign pattern size to 4 */
 4378         while (pattern_size % 4)
 4379                 pattern_size++;
 4380 
 4381         /* Write pattern length and crc value */
 4382         if (ECORE_IS_BB(p_dev)) {
 4383                 rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_len, pattern_size);
 4384                 if (rc != ECORE_SUCCESS) {
 4385                         DP_NOTICE(p_hwfn, false,
 4386                                   "Failed to update the ACPI pattern length\n");
 4387                         return rc;
 4388                 }
 4389 
 4390                 rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_crc, crc);
 4391                 if (rc != ECORE_SUCCESS) {
 4392                         DP_NOTICE(p_hwfn, false,
 4393                                   "Failed to update the ACPI pattern crc value\n");
 4394                         return rc;
 4395                 }
 4396         } else {
 4397                 ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_len, pattern_size);
 4398                 ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_crc, crc);
 4399         }
 4400 
 4401         DP_INFO(p_dev,
 4402                 "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] "
 4403                 "reg_len[0x%x=0x%x]\n",
 4404                 reg_idx, reg_crc, crc, reg_len, pattern_size);
 4405 out:
 4406          ecore_ptt_release(p_hwfn, p_ptt);
 4407 
 4408         return rc;
 4409 }
 4410 
 4411 void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
 4412                             struct ecore_ptt *p_ptt)
 4413 {
 4414         const u32 wake_buffer_clear_offset =
 4415                 ECORE_IS_BB(p_hwfn->p_dev) ?
 4416                 NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5;
 4417 
 4418         DP_INFO(p_hwfn->p_dev,
 4419                 "ecore_wol_buffer_clear: reset "
 4420                 "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n",
 4421                 wake_buffer_clear_offset);
 4422 
 4423         if (ECORE_IS_BB(p_hwfn->p_dev)) {
 4424                 ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1);
 4425                 ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0);
 4426         } else {
 4427                 ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1);
 4428                 ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0);
 4429         }
 4430 }
 4431 
 4432 enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
 4433                                          struct ecore_ptt *p_ptt,
 4434                                          struct ecore_wake_info *wake_info)
 4435 {
 4436         struct ecore_dev *p_dev = p_hwfn->p_dev;
 4437         u32 *buf = OSAL_NULL;
 4438         u32 i    = 0;
 4439         const u32 reg_wake_buffer_offest =
 4440                 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB :
 4441                         WOL_REG_WAKE_BUFFER_K2_E5;
 4442 
 4443         wake_info->wk_info    = ecore_rd(p_hwfn, p_ptt,
 4444                                 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB :
 4445                                 WOL_REG_WAKE_INFO_K2_E5);
 4446         wake_info->wk_details = ecore_rd(p_hwfn, p_ptt,
 4447                                 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB :
 4448                                 WOL_REG_WAKE_DETAILS_K2_E5);
 4449         wake_info->wk_pkt_len = ecore_rd(p_hwfn, p_ptt,
 4450                                 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB :
 4451                                 WOL_REG_WAKE_PKT_LEN_K2_E5);
 4452 
 4453         DP_INFO(p_dev,
 4454                 "ecore_get_wake_info: REG_WAKE_INFO=0x%08x "
 4455                 "REG_WAKE_DETAILS=0x%08x "
 4456                 "REG_WAKE_PKT_LEN=0x%08x\n",
 4457                 wake_info->wk_info,
 4458                 wake_info->wk_details,
 4459                 wake_info->wk_pkt_len);
 4460 
 4461         buf = (u32 *)wake_info->wk_buffer;
 4462 
 4463         for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++)
 4464         {
 4465                 if ((i*sizeof(u32)) >=  sizeof(wake_info->wk_buffer))
 4466                 {
 4467                         DP_INFO(p_dev,
 4468                                 "ecore_get_wake_info: i index to 0 high=%d\n",
 4469                                  i);
 4470                         break;
 4471                 }
 4472                 buf[i] = ecore_rd(p_hwfn, p_ptt,
 4473                                   reg_wake_buffer_offest + (i * sizeof(u32)));
 4474                 DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n",
 4475                         i, buf[i]);
 4476         }
 4477 
 4478         ecore_wol_buffer_clear(p_hwfn, p_ptt);
 4479 
 4480         return ECORE_SUCCESS;
 4481 }
 4482 
 4483 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
 4484 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
 4485 {
 4486         ecore_ptt_pool_free(p_hwfn);
 4487         OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
 4488         p_hwfn->hw_info.p_igu_info = OSAL_NULL;
 4489 }
 4490 
 4491 /* Setup bar access */
 4492 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
 4493 {
 4494         /* clear indirect access */
 4495         if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) {
 4496                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4497                          PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
 4498                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4499                          PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
 4500                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4501                          PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
 4502                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4503                          PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
 4504         } else {
 4505                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4506                          PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
 4507                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4508                          PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
 4509                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4510                          PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
 4511                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4512                          PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
 4513         }
 4514 
 4515         /* Clean previous pglue_b errors if such exist */
 4516         ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
 4517 
 4518         /* enable internal target-read */
 4519         ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 4520                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 4521 }
 4522 
 4523 static void get_function_id(struct ecore_hwfn *p_hwfn)
 4524 {
 4525         /* ME Register */
 4526         p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
 4527                                                   PXP_PF_ME_OPAQUE_ADDR);
 4528 
 4529         p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
 4530 
 4531         /* Bits 16-19 from the ME registers are the pf_num */
 4532         p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
 4533         p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
 4534                                       PXP_CONCRETE_FID_PFID);
 4535         p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
 4536                                     PXP_CONCRETE_FID_PORT);
 4537 
 4538         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
 4539                    "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
 4540                    p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
 4541 }
 4542 
 4543 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
 4544 {
 4545         u32 *feat_num = p_hwfn->hw_info.feat_num;
 4546         struct ecore_sb_cnt_info sb_cnt;
 4547         u32 non_l2_sbs = 0;
 4548 
 4549         OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
 4550         ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
 4551 
 4552 #ifdef CONFIG_ECORE_ROCE
 4553         /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the
 4554          * status blocks equally between L2 / RoCE but with consideration as
 4555          * to how many l2 queues / cnqs we have
 4556          */
 4557         if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
 4558 #ifndef __EXTRACT__LINUX__THROW__
 4559                 u32 max_cnqs;
 4560 #endif
 4561 
 4562                 feat_num[ECORE_RDMA_CNQ] =
 4563                         OSAL_MIN_T(u32,
 4564                                    sb_cnt.cnt / 2,
 4565                                    RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM));
 4566 
 4567 #ifndef __EXTRACT__LINUX__THROW__
 4568                 /* Upper layer might require less */
 4569                 max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs;
 4570                 if (max_cnqs) {
 4571                         if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE)
 4572                                 max_cnqs = 0;
 4573                         feat_num[ECORE_RDMA_CNQ] =
 4574                                 OSAL_MIN_T(u32,
 4575                                            feat_num[ECORE_RDMA_CNQ],
 4576                                            max_cnqs);
 4577                 }
 4578 #endif
 4579 
 4580                 non_l2_sbs = feat_num[ECORE_RDMA_CNQ];
 4581         }
 4582 #endif
 4583 
 4584         /* L2 Queues require each: 1 status block. 1 L2 queue */
 4585         if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
 4586                 /* Start by allocating VF queues, then PF's */
 4587                 feat_num[ECORE_VF_L2_QUE] =
 4588                         OSAL_MIN_T(u32,
 4589                                    RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
 4590                                    sb_cnt.iov_cnt);
 4591                 feat_num[ECORE_PF_L2_QUE] =
 4592                         OSAL_MIN_T(u32,
 4593                                    sb_cnt.cnt - non_l2_sbs,
 4594                                    RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
 4595                                    FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
 4596         }
 4597 
 4598         if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
 4599                 feat_num[ECORE_FCOE_CQ] =
 4600                         OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
 4601                                                              ECORE_CMDQS_CQS));
 4602 
 4603         if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
 4604                 feat_num[ECORE_ISCSI_CQ] =
 4605                         OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
 4606                                                              ECORE_CMDQS_CQS));
 4607 
 4608         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
 4609                    "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
 4610                    (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
 4611                    (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
 4612                    (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
 4613                    (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
 4614                    (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
 4615                    (int)sb_cnt.cnt);
 4616 }
 4617 
 4618 const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
 4619 {
 4620         switch (res_id) {
 4621         case ECORE_L2_QUEUE:
 4622                 return "L2_QUEUE";
 4623         case ECORE_VPORT:
 4624                 return "VPORT";
 4625         case ECORE_RSS_ENG:
 4626                 return "RSS_ENG";
 4627         case ECORE_PQ:
 4628                 return "PQ";
 4629         case ECORE_RL:
 4630                 return "RL";
 4631         case ECORE_MAC:
 4632                 return "MAC";
 4633         case ECORE_VLAN:
 4634                 return "VLAN";
 4635         case ECORE_RDMA_CNQ_RAM:
 4636                 return "RDMA_CNQ_RAM";
 4637         case ECORE_ILT:
 4638                 return "ILT";
 4639         case ECORE_LL2_QUEUE:
 4640                 return "LL2_QUEUE";
 4641         case ECORE_CMDQS_CQS:
 4642                 return "CMDQS_CQS";
 4643         case ECORE_RDMA_STATS_QUEUE:
 4644                 return "RDMA_STATS_QUEUE";
 4645         case ECORE_BDQ:
 4646                 return "BDQ";
 4647         case ECORE_SB:
 4648                 return "SB";
 4649         default:
 4650                 return "UNKNOWN_RESOURCE";
 4651         }
 4652 }
 4653 
 4654 static enum _ecore_status_t
 4655 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
 4656                               struct ecore_ptt *p_ptt,
 4657                               enum ecore_resources res_id,
 4658                               u32 resc_max_val,
 4659                               u32 *p_mcp_resp)
 4660 {
 4661         enum _ecore_status_t rc;
 4662 
 4663         rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
 4664                                         resc_max_val, p_mcp_resp);
 4665         if (rc != ECORE_SUCCESS) {
 4666                 DP_NOTICE(p_hwfn, false,
 4667                           "MFW response failure for a max value setting of resource %d [%s]\n",
 4668                           res_id, ecore_hw_get_resc_name(res_id));
 4669                 return rc;
 4670         }
 4671 
 4672         if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
 4673                 DP_INFO(p_hwfn,
 4674                         "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
 4675                         res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
 4676 
 4677         return ECORE_SUCCESS;
 4678 }
 4679 
 4680 static enum _ecore_status_t
 4681 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
 4682                             struct ecore_ptt *p_ptt)
 4683 {
 4684         bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
 4685         u32 resc_max_val, mcp_resp;
 4686         u8 res_id;
 4687         enum _ecore_status_t rc;
 4688 
 4689         for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
 4690                 switch (res_id) {
 4691                 case ECORE_LL2_QUEUE:
 4692                         resc_max_val = MAX_NUM_LL2_RX_QUEUES;
 4693                         break;
 4694                 case ECORE_RDMA_CNQ_RAM:
 4695                         /* No need for a case for ECORE_CMDQS_CQS since
 4696                          * CNQ/CMDQS are the same resource.
 4697                          */
 4698                         resc_max_val = NUM_OF_GLOBAL_QUEUES;
 4699                         break;
 4700                 case ECORE_RDMA_STATS_QUEUE:
 4701                         resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
 4702                                             : RDMA_NUM_STATISTIC_COUNTERS_BB;
 4703                         break;
 4704                 case ECORE_BDQ:
 4705                         resc_max_val = BDQ_NUM_RESOURCES;
 4706                         break;
 4707                 default:
 4708                         continue;
 4709                 }
 4710 
 4711                 rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
 4712                                                    resc_max_val, &mcp_resp);
 4713                 if (rc != ECORE_SUCCESS)
 4714                         return rc;
 4715 
 4716                 /* There's no point to continue to the next resource if the
 4717                  * command is not supported by the MFW.
 4718                  * We do continue if the command is supported but the resource
 4719                  * is unknown to the MFW. Such a resource will be later
 4720                  * configured with the default allocation values.
 4721                  */
 4722                 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
 4723                         return ECORE_NOTIMPL;
 4724         }
 4725 
 4726         return ECORE_SUCCESS;
 4727 }
 4728 
 4729 static
 4730 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
 4731                                             enum ecore_resources res_id,
 4732                                             u32 *p_resc_num, u32 *p_resc_start)
 4733 {
 4734         u8 num_funcs = p_hwfn->num_funcs_on_engine;
 4735         bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
 4736 
 4737         switch (res_id) {
 4738         case ECORE_L2_QUEUE:
 4739                 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
 4740                                       MAX_NUM_L2_QUEUES_BB) / num_funcs;
 4741                 break;
 4742         case ECORE_VPORT:
 4743                 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
 4744                                       MAX_NUM_VPORTS_BB) / num_funcs;
 4745                 break;
 4746         case ECORE_RSS_ENG:
 4747                 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
 4748                                       ETH_RSS_ENGINE_NUM_BB) / num_funcs;
 4749                 break;
 4750         case ECORE_PQ:
 4751                 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
 4752                                       MAX_QM_TX_QUEUES_BB) / num_funcs;
 4753                 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
 4754                 break;
 4755         case ECORE_RL:
 4756                 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
 4757                 break;
 4758         case ECORE_MAC:
 4759         case ECORE_VLAN:
 4760                 /* Each VFC resource can accommodate both a MAC and a VLAN */
 4761                 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
 4762                 break;
 4763         case ECORE_ILT:
 4764                 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
 4765                                       PXP_NUM_ILT_RECORDS_BB) / num_funcs;
 4766                 break;
 4767         case ECORE_LL2_QUEUE:
 4768                 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
 4769                 break;
 4770         case ECORE_RDMA_CNQ_RAM:
 4771         case ECORE_CMDQS_CQS:
 4772                 /* CNQ/CMDQS are the same resource */
 4773                 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
 4774                 break;
 4775         case ECORE_RDMA_STATS_QUEUE:
 4776                 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
 4777                                       RDMA_NUM_STATISTIC_COUNTERS_BB) /
 4778                               num_funcs;
 4779                 break;
 4780         case ECORE_BDQ:
 4781                 if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI &&
 4782                     p_hwfn->hw_info.personality != ECORE_PCI_FCOE)
 4783                         *p_resc_num = 0;
 4784                 else
 4785                         *p_resc_num = 1;
 4786                 break;
 4787         case ECORE_SB:
 4788                 /* Since we want its value to reflect whether MFW supports
 4789                  * the new scheme, have a default of 0.
 4790                  */
 4791                 *p_resc_num = 0;
 4792                 break;
 4793         default:
 4794                 return ECORE_INVAL;
 4795         }
 4796 
 4797         switch (res_id) {
 4798         case ECORE_BDQ:
 4799                 if (!*p_resc_num)
 4800                         *p_resc_start = 0;
 4801                 else if (p_hwfn->p_dev->num_ports_in_engine == 4)
 4802                         *p_resc_start = p_hwfn->port_id;
 4803                 else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI)
 4804                         *p_resc_start = p_hwfn->port_id;
 4805                 else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
 4806                         *p_resc_start = p_hwfn->port_id + 2;
 4807                 break;
 4808         default:
 4809                 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
 4810                 break;
 4811         }
 4812 
 4813         return ECORE_SUCCESS;
 4814 }
 4815 
 4816 static enum _ecore_status_t
 4817 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
 4818                          bool drv_resc_alloc)
 4819 {
 4820         u32 dflt_resc_num = 0, dflt_resc_start = 0;
 4821         u32 mcp_resp, *p_resc_num, *p_resc_start;
 4822         enum _ecore_status_t rc;
 4823 
 4824         p_resc_num = &RESC_NUM(p_hwfn, res_id);
 4825         p_resc_start = &RESC_START(p_hwfn, res_id);
 4826 
 4827         rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
 4828                                     &dflt_resc_start);
 4829         if (rc != ECORE_SUCCESS) {
 4830                 DP_ERR(p_hwfn,
 4831                        "Failed to get default amount for resource %d [%s]\n",
 4832                         res_id, ecore_hw_get_resc_name(res_id));
 4833                 return rc;
 4834         }
 4835 
 4836 #ifndef ASIC_ONLY
 4837         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
 4838                 *p_resc_num = dflt_resc_num;
 4839                 *p_resc_start = dflt_resc_start;
 4840                 goto out;
 4841         }
 4842 #endif
 4843 
 4844         rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
 4845                                      &mcp_resp, p_resc_num, p_resc_start);
 4846         if (rc != ECORE_SUCCESS) {
 4847                 DP_NOTICE(p_hwfn, false,
 4848                           "MFW response failure for an allocation request for resource %d [%s]\n",
 4849                           res_id, ecore_hw_get_resc_name(res_id));
 4850                 return rc;
 4851         }
 4852 
 4853         /* Default driver values are applied in the following cases:
 4854          * - The resource allocation MB command is not supported by the MFW
 4855          * - There is an internal error in the MFW while processing the request
 4856          * - The resource ID is unknown to the MFW
 4857          */
 4858         if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
 4859                 DP_INFO(p_hwfn,
 4860                         "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
 4861                         res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
 4862                         dflt_resc_num, dflt_resc_start);
 4863                 *p_resc_num = dflt_resc_num;
 4864                 *p_resc_start = dflt_resc_start;
 4865                 goto out;
 4866         }
 4867 
 4868         if ((*p_resc_num != dflt_resc_num ||
 4869              *p_resc_start != dflt_resc_start) &&
 4870             res_id != ECORE_SB) {
 4871                 DP_INFO(p_hwfn,
 4872                         "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
 4873                         res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
 4874                         *p_resc_start, dflt_resc_num, dflt_resc_start,
 4875                         drv_resc_alloc ? " - Applying default values" : "");
 4876                 if (drv_resc_alloc) {
 4877                         *p_resc_num = dflt_resc_num;
 4878                         *p_resc_start = dflt_resc_start;
 4879                 }
 4880         }
 4881 out:
 4882         /* PQs have to divide by 8 [that's the HW granularity].
 4883          * Reduce number so it would fit.
 4884          */
 4885         if ((res_id == ECORE_PQ) &&
 4886             ((*p_resc_num % 8) || (*p_resc_start % 8))) {
 4887                 DP_INFO(p_hwfn,
 4888                         "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
 4889                         *p_resc_num, (*p_resc_num) & ~0x7,
 4890                         *p_resc_start, (*p_resc_start) & ~0x7);
 4891                 *p_resc_num &= ~0x7;
 4892                 *p_resc_start &= ~0x7;
 4893         }
 4894 
 4895         return ECORE_SUCCESS;
 4896 }
 4897 
 4898 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
 4899                                                    bool drv_resc_alloc)
 4900 {
 4901         enum _ecore_status_t rc;
 4902         u8 res_id;
 4903 
 4904         for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
 4905                 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
 4906                 if (rc != ECORE_SUCCESS)
 4907                         return rc;
 4908         }
 4909 
 4910         return ECORE_SUCCESS;
 4911 }
 4912 
 4913 static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
 4914                                                       struct ecore_ptt *p_ptt)
 4915 {
 4916         u8 native_ppfid_idx = ECORE_PPFID_BY_PFID(p_hwfn);
 4917         struct ecore_dev *p_dev = p_hwfn->p_dev;
 4918         enum _ecore_status_t rc;
 4919 
 4920         rc = ecore_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
 4921         if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL)
 4922                 return rc;
 4923         else if (rc == ECORE_NOTIMPL)
 4924                 p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
 4925 
 4926         if (!(p_dev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
 4927                 DP_INFO(p_hwfn,
 4928                         "Fix the PPFID bitmap to inculde the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
 4929                         native_ppfid_idx, p_dev->ppfid_bitmap);
 4930                 p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
 4931         }
 4932 
 4933         return ECORE_SUCCESS;
 4934 }
 4935 
 4936 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 4937                                               struct ecore_ptt *p_ptt,
 4938                                               bool drv_resc_alloc)
 4939 {
 4940         struct ecore_resc_unlock_params resc_unlock_params;
 4941         struct ecore_resc_lock_params resc_lock_params;
 4942         bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
 4943         u8 res_id;
 4944         enum _ecore_status_t rc;
 4945 #ifndef ASIC_ONLY
 4946         u32 *resc_start = p_hwfn->hw_info.resc_start;
 4947         u32 *resc_num = p_hwfn->hw_info.resc_num;
 4948         /* For AH, an equal share of the ILT lines between the maximal number of
 4949          * PFs is not enough for RoCE. This would be solved by the future
 4950          * resource allocation scheme, but isn't currently present for
 4951          * FPGA/emulation. For now we keep a number that is sufficient for RoCE
 4952          * to work - the BB number of ILT lines divided by its max PFs number.
 4953          */
 4954         u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
 4955 #endif
 4956 
 4957         /* Setting the max values of the soft resources and the following
 4958          * resources allocation queries should be atomic. Since several PFs can
 4959          * run in parallel - a resource lock is needed.
 4960          * If either the resource lock or resource set value commands are not
 4961          * supported - skip the the max values setting, release the lock if
 4962          * needed, and proceed to the queries. Other failures, including a
 4963          * failure to acquire the lock, will cause this function to fail.
 4964          * Old drivers that don't acquire the lock can run in parallel, and
 4965          * their allocation values won't be affected by the updated max values.
 4966          */
 4967 
 4968         ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
 4969                                          ECORE_RESC_LOCK_RESC_ALLOC, false);
 4970 
 4971         /* Changes on top of the default values to accommodate parallel attempts
 4972          * of several PFs.
 4973          * [10 x 10 msec by default ==> 20 x 50 msec]
 4974          */
 4975         resc_lock_params.retry_num *= 2;
 4976         resc_lock_params.retry_interval *= 5;
 4977 
 4978         rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
 4979         if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
 4980                 return rc;
 4981         } else if (rc == ECORE_NOTIMPL) {
 4982                 DP_INFO(p_hwfn,
 4983                         "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
 4984         } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
 4985                 DP_NOTICE(p_hwfn, false,
 4986                           "Failed to acquire the resource lock for the resource allocation commands\n");
 4987                 return ECORE_BUSY;
 4988         } else {
 4989                 rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
 4990                 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
 4991                         DP_NOTICE(p_hwfn, false,
 4992                                   "Failed to set the max values of the soft resources\n");
 4993                         goto unlock_and_exit;
 4994                 } else if (rc == ECORE_NOTIMPL) {
 4995                         DP_INFO(p_hwfn,
 4996                                 "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
 4997                         rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
 4998                                                    &resc_unlock_params);
 4999                         if (rc != ECORE_SUCCESS)
 5000                                 DP_INFO(p_hwfn,
 5001                                         "Failed to release the resource lock for the resource allocation commands\n");
 5002                 }
 5003         }
 5004 
 5005         rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
 5006         if (rc != ECORE_SUCCESS)
 5007                 goto unlock_and_exit;
 5008 
 5009         if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
 5010                 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
 5011                                            &resc_unlock_params);
 5012                 if (rc != ECORE_SUCCESS)
 5013                         DP_INFO(p_hwfn,
 5014                                 "Failed to release the resource lock for the resource allocation commands\n");
 5015         }
 5016 
 5017         /* PPFID bitmap */
 5018         if (IS_LEAD_HWFN(p_hwfn)) {
 5019                 rc = ecore_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
 5020                 if (rc != ECORE_SUCCESS)
 5021                         return rc;
 5022         }
 5023 
 5024 #ifndef ASIC_ONLY
 5025         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
 5026                 /* Reduced build contains less PQs */
 5027                 if (!(p_hwfn->p_dev->b_is_emul_full)) {
 5028                         resc_num[ECORE_PQ] = 32;
 5029                         resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
 5030                                                p_hwfn->enabled_func_idx;
 5031                 }
 5032 
 5033                 /* For AH emulation, since we have a possible maximal number of
 5034                  * 16 enabled PFs, in case there are not enough ILT lines -
 5035                  * allocate only first PF as RoCE and have all the other ETH
 5036                  * only with less ILT lines.
 5037                  */
 5038                 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
 5039                         resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
 5040                                                          resc_num[ECORE_ILT],
 5041                                                          roce_min_ilt_lines);
 5042         }
 5043 
 5044         /* Correct the common ILT calculation if PF0 has more */
 5045         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
 5046             p_hwfn->p_dev->b_is_emul_full &&
 5047             p_hwfn->rel_pf_id &&
 5048             resc_num[ECORE_ILT] < roce_min_ilt_lines)
 5049                 resc_start[ECORE_ILT] += roce_min_ilt_lines -
 5050                                          resc_num[ECORE_ILT];
 5051 #endif
 5052 
 5053         /* Sanity for ILT */
 5054         if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
 5055             (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
 5056                 DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n",
 5057                           RESC_START(p_hwfn, ECORE_ILT),
 5058                           RESC_END(p_hwfn, ECORE_ILT) - 1);
 5059                 return ECORE_INVAL;
 5060         }
 5061 
 5062         /* This will also learn the number of SBs from MFW */
 5063         if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
 5064                 return ECORE_INVAL;
 5065 
 5066         ecore_hw_set_feat(p_hwfn);
 5067 
 5068         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
 5069                    "The numbers for each resource are:\n");
 5070         for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++)
 5071                 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n",
 5072                            ecore_hw_get_resc_name(res_id),
 5073                            RESC_NUM(p_hwfn, res_id),
 5074                            RESC_START(p_hwfn, res_id));
 5075 
 5076         return ECORE_SUCCESS;
 5077 
 5078 unlock_and_exit:
 5079         if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
 5080                 ecore_mcp_resc_unlock(p_hwfn, p_ptt,
 5081                                       &resc_unlock_params);
 5082         return rc;
 5083 }
 5084 
 5085 static enum _ecore_status_t
 5086 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
 5087                       struct ecore_ptt *p_ptt,
 5088                       struct ecore_hw_prepare_params *p_params)
 5089 {
 5090         u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
 5091         u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
 5092         struct ecore_mcp_link_capabilities *p_caps;
 5093         struct ecore_mcp_link_params *link;
 5094         enum _ecore_status_t rc;
 5095         u32 dcbx_mode;  /* __LINUX__THROW__ */
 5096 
 5097         /* Read global nvm_cfg address */
 5098         nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
 5099 
 5100         /* Verify MCP has initialized it */
 5101         if (!nvm_cfg_addr) {
 5102                 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
 5103                 if (p_params->b_relaxed_probe)
 5104                         p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
 5105                 return ECORE_INVAL;
 5106         }
 5107 
 5108         /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
 5109         nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
 5110 
 5111         addr = MCP_REG_SCRATCH  + nvm_cfg1_offset +
 5112                    OFFSETOF(struct nvm_cfg1, glob) +
 5113                    OFFSETOF(struct nvm_cfg1_glob, core_cfg);
 5114 
 5115         core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
 5116 
 5117         switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
 5118                 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
 5119         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
 5120                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
 5121                 break;
 5122         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
 5123                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
 5124                 break;
 5125         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
 5126                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
 5127                 break;
 5128         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
 5129                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
 5130                 break;
 5131         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
 5132                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
 5133                 break;
 5134         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
 5135                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
 5136                 break;
 5137         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
 5138                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
 5139                 break;
 5140         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
 5141                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
 5142                 break;
 5143         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
 5144                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
 5145                 break;
 5146         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
 5147                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
 5148                 break;
 5149         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
 5150                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
 5151                 break;
 5152         default:
 5153                 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
 5154                           core_cfg);
 5155                 break;
 5156         }
 5157 
 5158 #ifndef __EXTRACT__LINUX__THROW__
 5159         /* Read DCBX configuration */
 5160         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
 5161                         OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
 5162         dcbx_mode = ecore_rd(p_hwfn, p_ptt,
 5163                              port_cfg_addr +
 5164                              OFFSETOF(struct nvm_cfg1_port, generic_cont0));
 5165         dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
 5166                 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
 5167         switch (dcbx_mode) {
 5168         case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
 5169                 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
 5170                 break;
 5171         case NVM_CFG1_PORT_DCBX_MODE_CEE:
 5172                 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
 5173                 break;
 5174         case NVM_CFG1_PORT_DCBX_MODE_IEEE:
 5175                 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
 5176                 break;
 5177         default:
 5178                 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
 5179         }
 5180 #endif
 5181 
 5182         /* Read default link configuration */
 5183         link = &p_hwfn->mcp_info->link_input;
 5184         p_caps = &p_hwfn->mcp_info->link_capabilities;
 5185         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
 5186                         OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
 5187         link_temp = ecore_rd(p_hwfn, p_ptt,
 5188                              port_cfg_addr +
 5189                              OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
 5190         link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
 5191         link->speed.advertised_speeds = link_temp;
 5192         p_caps->speed_capabilities = link->speed.advertised_speeds;
 5193 
 5194         link_temp = ecore_rd(p_hwfn, p_ptt,
 5195                                  port_cfg_addr +
 5196                                  OFFSETOF(struct nvm_cfg1_port, link_settings));
 5197         switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
 5198                 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
 5199         case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
 5200                 link->speed.autoneg = true;
 5201                 break;
 5202         case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
 5203                 link->speed.forced_speed = 1000;
 5204                 break;
 5205         case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
 5206                 link->speed.forced_speed = 10000;
 5207                 break;
 5208         case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
 5209                 link->speed.forced_speed = 20000;
 5210                 break;
 5211         case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
 5212                 link->speed.forced_speed = 25000;
 5213                 break;
 5214         case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
 5215                 link->speed.forced_speed = 40000;
 5216                 break;
 5217         case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
 5218                 link->speed.forced_speed = 50000;
 5219                 break;
 5220         case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
 5221                 link->speed.forced_speed = 100000;
 5222                 break;
 5223         default:
 5224                 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n",
 5225                           link_temp);
 5226         }
 5227 
 5228         p_caps->default_speed = link->speed.forced_speed; /* __LINUX__THROW__ */
 5229         p_caps->default_speed_autoneg = link->speed.autoneg;
 5230 
 5231         link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
 5232         link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
 5233         link->pause.autoneg = !!(link_temp &
 5234                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
 5235         link->pause.forced_rx = !!(link_temp &
 5236                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
 5237         link->pause.forced_tx = !!(link_temp &
 5238                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
 5239         link->loopback_mode = 0;
 5240 
 5241         if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
 5242                 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
 5243                                      OFFSETOF(struct nvm_cfg1_port, ext_phy));
 5244                 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
 5245                 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
 5246                 p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
 5247                 link->eee.enable = true;
 5248                 switch (link_temp) {
 5249                 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
 5250                         p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
 5251                         link->eee.enable = false;
 5252                         break;
 5253                 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
 5254                         p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
 5255                         break;
 5256                 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
 5257                         p_caps->eee_lpi_timer =
 5258                                 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
 5259                         break;
 5260                 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
 5261                         p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
 5262                         break;
 5263                 }
 5264 
 5265                 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
 5266                 link->eee.tx_lpi_enable = link->eee.enable;
 5267                 link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV;
 5268         } else {
 5269                 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
 5270         }
 5271 
 5272         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 5273                    "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
 5274                    link->speed.forced_speed, link->speed.advertised_speeds,
 5275                    link->speed.autoneg, link->pause.autoneg,
 5276                    p_caps->default_eee, p_caps->eee_lpi_timer);
 5277 
 5278         /* Read Multi-function information from shmem */
 5279         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
 5280                    OFFSETOF(struct nvm_cfg1, glob) +
 5281                    OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
 5282 
 5283         generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
 5284 
 5285         mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
 5286                   NVM_CFG1_GLOB_MF_MODE_OFFSET;
 5287 
 5288         switch (mf_mode) {
 5289         case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
 5290                 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
 5291                 break;
 5292         case NVM_CFG1_GLOB_MF_MODE_UFP:
 5293                 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
 5294                                          1 << ECORE_MF_LLH_PROTO_CLSS |
 5295                                          1 << ECORE_MF_UFP_SPECIFIC |
 5296                                          1 << ECORE_MF_8021Q_TAGGING;
 5297                 break;
 5298         case NVM_CFG1_GLOB_MF_MODE_BD:
 5299                 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
 5300                                          1 << ECORE_MF_LLH_PROTO_CLSS |
 5301                                          1 << ECORE_MF_8021AD_TAGGING;
 5302                 break;
 5303         case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
 5304                 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
 5305                                          1 << ECORE_MF_LLH_PROTO_CLSS |
 5306                                          1 << ECORE_MF_LL2_NON_UNICAST |
 5307                                          1 << ECORE_MF_INTER_PF_SWITCH |
 5308                                          1 << ECORE_MF_DISABLE_ARFS;
 5309                 break;
 5310         case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
 5311                 p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
 5312                                          1 << ECORE_MF_LLH_PROTO_CLSS |
 5313                                          1 << ECORE_MF_LL2_NON_UNICAST;
 5314                 if (ECORE_IS_BB(p_hwfn->p_dev))
 5315                         p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
 5316                 break;
 5317         }
 5318         DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
 5319                 p_hwfn->p_dev->mf_bits);
 5320 
 5321         if (ECORE_IS_CMT(p_hwfn->p_dev))
 5322                 p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
 5323 
 5324 #ifndef __EXTRACT__LINUX__THROW__
 5325         /* It's funny since we have another switch, but it's easier
 5326          * to throw this away in linux this way. Long term, it might be
 5327          * better to have have getters for needed ECORE_MF_* fields,
 5328          * convert client code and eliminate this.
 5329          */
 5330         switch (mf_mode) {
 5331         case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
 5332                 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
 5333                 break;
 5334         case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
 5335                 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
 5336                 break;
 5337         case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
 5338                 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
 5339                 break;
 5340         case NVM_CFG1_GLOB_MF_MODE_UFP:
 5341                 p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
 5342                 break;
 5343         }
 5344 #endif
 5345 
 5346         /* Read Multi-function information from shmem */
 5347         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
 5348                    OFFSETOF(struct nvm_cfg1, glob) +
 5349                    OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
 5350 
 5351         device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
 5352         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
 5353                 OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
 5354                                  &p_hwfn->hw_info.device_capabilities);
 5355         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
 5356                 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
 5357                                  &p_hwfn->hw_info.device_capabilities);
 5358         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
 5359                 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
 5360                                  &p_hwfn->hw_info.device_capabilities);
 5361         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
 5362                 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
 5363                                  &p_hwfn->hw_info.device_capabilities);
 5364         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
 5365                 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
 5366                                  &p_hwfn->hw_info.device_capabilities);
 5367 
 5368         rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
 5369         if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
 5370                 rc = ECORE_SUCCESS;
 5371                 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
 5372         }
 5373 
 5374         return rc;
 5375 }
 5376 
 5377 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
 5378                                 struct ecore_ptt *p_ptt)
 5379 {
 5380         u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
 5381         u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
 5382         struct ecore_dev *p_dev = p_hwfn->p_dev;
 5383 
 5384         num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
 5385 
 5386         /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
 5387          * in the other bits are selected.
 5388          * Bits 1-15 are for functions 1-15, respectively, and their value is
 5389          * '' only for enabled functions (function 0 always exists and
 5390          * enabled).
 5391          * In case of CMT in BB, only the "even" functions are enabled, and thus
 5392          * the number of functions for both hwfns is learnt from the same bits.
 5393          */
 5394         reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
 5395 
 5396         if (reg_function_hide & 0x1) {
 5397                 if (ECORE_IS_BB(p_dev)) {
 5398                         if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
 5399                                 num_funcs = 0;
 5400                                 eng_mask = 0xaaaa;
 5401                         } else {
 5402                                 num_funcs = 1;
 5403                                 eng_mask = 0x5554;
 5404                         }
 5405                 } else {
 5406                         num_funcs = 1;
 5407                         eng_mask = 0xfffe;
 5408                 }
 5409 
 5410                 /* Get the number of the enabled functions on the engine */
 5411                 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
 5412                 while (tmp) {
 5413                         if (tmp & 0x1)
 5414                                 num_funcs++;
 5415                         tmp >>= 0x1;
 5416                 }
 5417 
 5418                 /* Get the PF index within the enabled functions */
 5419                 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
 5420                 tmp = reg_function_hide & eng_mask & low_pfs_mask;
 5421                 while (tmp) {
 5422                         if (tmp & 0x1)
 5423                                 enabled_func_idx--;
 5424                         tmp >>= 0x1;
 5425                 }
 5426         }
 5427 
 5428         p_hwfn->num_funcs_on_engine = num_funcs;
 5429         p_hwfn->enabled_func_idx = enabled_func_idx;
 5430 
 5431 #ifndef ASIC_ONLY
 5432         if (CHIP_REV_IS_FPGA(p_dev)) {
 5433                 DP_NOTICE(p_hwfn, false,
 5434                           "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n");
 5435                 p_hwfn->num_funcs_on_engine = 4;
 5436         }
 5437 #endif
 5438 
 5439         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
 5440                    "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
 5441                    p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
 5442                    p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
 5443 }
 5444 
 5445 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
 5446                                       struct ecore_ptt *p_ptt)
 5447 {
 5448         struct ecore_dev *p_dev = p_hwfn->p_dev;
 5449         u32 port_mode;
 5450 
 5451 #ifndef ASIC_ONLY
 5452         /* Read the port mode */
 5453         if (CHIP_REV_IS_FPGA(p_dev))
 5454                 port_mode = 4;
 5455         else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
 5456                 /* In CMT on emulation, assume 1 port */
 5457                 port_mode = 1;
 5458         else
 5459 #endif
 5460         port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
 5461 
 5462         if (port_mode < 3) {
 5463                 p_dev->num_ports_in_engine = 1;
 5464         } else if (port_mode <= 5) {
 5465                 p_dev->num_ports_in_engine = 2;
 5466         } else {
 5467                 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
 5468                           p_dev->num_ports_in_engine);
 5469 
 5470                 /* Default num_ports_in_engine to something */
 5471                 p_dev->num_ports_in_engine = 1;
 5472         }
 5473 }
 5474 
 5475 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
 5476                                          struct ecore_ptt *p_ptt)
 5477 {
 5478         struct ecore_dev *p_dev = p_hwfn->p_dev;
 5479         u32 port;
 5480         int i;
 5481 
 5482         p_dev->num_ports_in_engine = 0;
 5483 
 5484 #ifndef ASIC_ONLY
 5485         if (CHIP_REV_IS_EMUL(p_dev)) {
 5486                 port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
 5487                 switch ((port & 0xf000) >> 12) {
 5488                 case 1:
 5489                         p_dev->num_ports_in_engine = 1;
 5490                         break;
 5491                 case 3:
 5492                         p_dev->num_ports_in_engine = 2;
 5493                         break;
 5494                 case 0xf:
 5495                         p_dev->num_ports_in_engine = 4;
 5496                         break;
 5497                 default:
 5498                         DP_NOTICE(p_hwfn, false,
 5499                                   "Unknown port mode in ECO_RESERVED %08x\n",
 5500                                   port);
 5501                 }
 5502         } else
 5503 #endif
 5504         for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
 5505                 port = ecore_rd(p_hwfn, p_ptt,
 5506                                 CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4));
 5507                 if (port & 1)
 5508                         p_dev->num_ports_in_engine++;
 5509         }
 5510 
 5511         if (!p_dev->num_ports_in_engine) {
 5512                 DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
 5513 
 5514                 /* Default num_ports_in_engine to something */
 5515                 p_dev->num_ports_in_engine = 1;
 5516         }
 5517 }
 5518 
 5519 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
 5520                                    struct ecore_ptt *p_ptt)
 5521 {
 5522         struct ecore_dev *p_dev = p_hwfn->p_dev;
 5523 
 5524         /* Determine the number of ports per engine */
 5525         if (ECORE_IS_BB(p_dev))
 5526                 ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
 5527         else
 5528                 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
 5529 
 5530         /* Get the total number of ports of the device */
 5531         if (ECORE_IS_CMT(p_dev)) {
 5532                 /* In CMT there is always only one port */
 5533                 p_dev->num_ports = 1;
 5534 #ifndef ASIC_ONLY
 5535         } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
 5536                 p_dev->num_ports = p_dev->num_ports_in_engine *
 5537                                    ecore_device_num_engines(p_dev);
 5538 #endif
 5539         } else {
 5540                 u32 addr, global_offsize, global_addr;
 5541 
 5542                 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
 5543                                             PUBLIC_GLOBAL);
 5544                 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
 5545                 global_addr = SECTION_ADDR(global_offsize, 0);
 5546                 addr = global_addr + OFFSETOF(struct public_global, max_ports);
 5547                 p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
 5548         }
 5549 }
 5550 
 5551 static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
 5552                                    struct ecore_ptt *p_ptt)
 5553 {
 5554         struct ecore_mcp_link_capabilities *p_caps;
 5555         u32 eee_status;
 5556 
 5557         p_caps = &p_hwfn->mcp_info->link_capabilities;
 5558         if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED)
 5559                 return;
 5560 
 5561         p_caps->eee_speed_caps = 0;
 5562         eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
 5563                               OFFSETOF(struct public_port, eee_status));
 5564         eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
 5565                         EEE_SUPPORTED_SPEED_OFFSET;
 5566         if (eee_status & EEE_1G_SUPPORTED)
 5567                 p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
 5568         if (eee_status & EEE_10G_ADV)
 5569                 p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV;
 5570 }
 5571 
 5572 static enum _ecore_status_t
 5573 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
 5574                   enum ecore_pci_personality personality,
 5575                   struct ecore_hw_prepare_params *p_params)
 5576 {
 5577         bool drv_resc_alloc = p_params->drv_resc_alloc;
 5578         enum _ecore_status_t rc;
 5579 
 5580         /* Since all information is common, only first hwfns should do this */
 5581         if (IS_LEAD_HWFN(p_hwfn)) {
 5582                 rc = ecore_iov_hw_info(p_hwfn);
 5583                 if (rc != ECORE_SUCCESS) {
 5584                         if (p_params->b_relaxed_probe)
 5585                                 p_params->p_relaxed_res =
 5586                                                 ECORE_HW_PREPARE_BAD_IOV;
 5587                         else
 5588                                 return rc;
 5589                 }
 5590         }
 5591 
 5592         if (IS_LEAD_HWFN(p_hwfn))
 5593                 ecore_hw_info_port_num(p_hwfn, p_ptt);
 5594 
 5595         ecore_mcp_get_capabilities(p_hwfn, p_ptt);
 5596 
 5597 #ifndef ASIC_ONLY
 5598         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
 5599 #endif
 5600         rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
 5601         if (rc != ECORE_SUCCESS)
 5602                 return rc;
 5603 #ifndef ASIC_ONLY
 5604         }
 5605 #endif
 5606 
 5607         rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
 5608         if (rc != ECORE_SUCCESS) {
 5609                 if (p_params->b_relaxed_probe)
 5610                         p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
 5611                 else
 5612                         return rc;
 5613         }
 5614 
 5615 #ifndef ASIC_ONLY
 5616         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
 5617 #endif
 5618         OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
 5619                     p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
 5620 #ifndef ASIC_ONLY
 5621         } else {
 5622                 static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6};
 5623 
 5624                 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
 5625                 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
 5626         }
 5627 #endif
 5628 
 5629         if (ecore_mcp_is_init(p_hwfn)) {
 5630                 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
 5631                         p_hwfn->hw_info.ovlan =
 5632                                 p_hwfn->mcp_info->func_info.ovlan;
 5633 
 5634                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
 5635 
 5636                 ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
 5637 
 5638                 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
 5639         }
 5640 
 5641         if (personality != ECORE_PCI_DEFAULT) {
 5642                 p_hwfn->hw_info.personality = personality;
 5643         } else if (ecore_mcp_is_init(p_hwfn)) {
 5644                 enum ecore_pci_personality protocol;
 5645 
 5646                 protocol = p_hwfn->mcp_info->func_info.protocol;
 5647                 p_hwfn->hw_info.personality = protocol;
 5648         }
 5649 
 5650 #ifndef ASIC_ONLY
 5651         /* To overcome ILT lack for emulation, until at least until we'll have
 5652          * a definite answer from system about it, allow only PF0 to be RoCE.
 5653          */
 5654         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
 5655                 if (!p_hwfn->rel_pf_id)
 5656                         p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
 5657                 else
 5658                         p_hwfn->hw_info.personality = ECORE_PCI_ETH;
 5659         }
 5660 #endif
 5661 
 5662         /* although in BB some constellations may support more than 4 tcs,
 5663          * that can result in performance penalty in some cases. 4
 5664          * represents a good tradeoff between performance and flexibility.
 5665          */
 5666         p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
 5667 
 5668         /* start out with a single active tc. This can be increased either
 5669          * by dcbx negotiation or by upper layer driver
 5670          */
 5671         p_hwfn->hw_info.num_active_tc = 1;
 5672 
 5673         ecore_get_num_funcs(p_hwfn, p_ptt);
 5674 
 5675         if (ecore_mcp_is_init(p_hwfn))
 5676                 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
 5677 
 5678         /* In case of forcing the driver's default resource allocation, calling
 5679          * ecore_hw_get_resc() should come after initializing the personality
 5680          * and after getting the number of functions, since the calculation of
 5681          * the resources/features depends on them.
 5682          * This order is not harmful if not forcing.
 5683          */
 5684         rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
 5685         if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
 5686                 rc = ECORE_SUCCESS;
 5687                 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
 5688         }
 5689 
 5690         return rc;
 5691 }
 5692 
 5693 #define ECORE_MAX_DEVICE_NAME_LEN       (8)
 5694 
 5695 void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)
 5696 {
 5697         u8 n;
 5698 
 5699         n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN);
 5700         OSAL_SNPRINTF(name, n, "%s %c%d", ECORE_IS_BB(p_dev) ? "BB" : "AH",
 5701                       'A' + p_dev->chip_rev, (int)p_dev->chip_metal);
 5702 }
 5703 
 5704 static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
 5705                                                struct ecore_ptt *p_ptt)
 5706 {
 5707         struct ecore_dev *p_dev = p_hwfn->p_dev;
 5708         u16 device_id_mask;
 5709         u32 tmp;
 5710 
 5711         /* Read Vendor Id / Device Id */
 5712         OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
 5713                                   &p_dev->vendor_id);
 5714         OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
 5715                                   &p_dev->device_id);
 5716 
 5717         /* Determine type */
 5718         device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK;
 5719         switch (device_id_mask) {
 5720         case ECORE_DEV_ID_MASK_BB:
 5721                 p_dev->type = ECORE_DEV_TYPE_BB;
 5722                 break;
 5723         case ECORE_DEV_ID_MASK_AH:
 5724                 p_dev->type = ECORE_DEV_TYPE_AH;
 5725                 break;
 5726         case ECORE_DEV_ID_MASK_E5:
 5727                 p_dev->type = ECORE_DEV_TYPE_E5;
 5728                 break;
 5729         default:
 5730                 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
 5731                           p_dev->device_id);
 5732                 return ECORE_ABORTED;
 5733         }
 5734 
 5735         tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
 5736         p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
 5737         tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
 5738         p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
 5739 
 5740         /* Learn number of HW-functions */
 5741         tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
 5742 
 5743         if (tmp & (1 << p_hwfn->rel_pf_id)) {
 5744                 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
 5745                 p_dev->num_hwfns = 2;
 5746         } else {
 5747                 p_dev->num_hwfns = 1;
 5748         }
 5749 
 5750 #ifndef ASIC_ONLY
 5751         if (CHIP_REV_IS_EMUL(p_dev)) {
 5752                 /* For some reason we have problems with this register
 5753                  * in B0 emulation; Simply assume no CMT
 5754                  */
 5755                 DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n");
 5756                 p_dev->num_hwfns = 1;
 5757         }
 5758 #endif
 5759 
 5760         tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG);
 5761         p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
 5762         tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
 5763         p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
 5764 
 5765         DP_INFO(p_dev->hwfns,
 5766                 "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
 5767                 ECORE_IS_BB(p_dev) ? "BB" : "AH",
 5768                 'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
 5769                 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
 5770                 p_dev->chip_metal);
 5771 
 5772         if (ECORE_IS_BB_A0(p_dev)) {
 5773                 DP_NOTICE(p_dev->hwfns, false,
 5774                           "The chip type/rev (BB A0) is not supported!\n");
 5775                 return ECORE_ABORTED;
 5776         }
 5777 
 5778 #ifndef ASIC_ONLY
 5779         if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
 5780                 ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
 5781 
 5782         if (CHIP_REV_IS_EMUL(p_dev)) {
 5783                 tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
 5784                 if (tmp & (1 << 29)) {
 5785                         DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n");
 5786                         p_dev->b_is_emul_full = true;
 5787                 } else {
 5788                         DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n");
 5789                 }
 5790         }
 5791 #endif
 5792 
 5793         return ECORE_SUCCESS;
 5794 }
 5795 
 5796 #ifndef LINUX_REMOVE
 5797 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev)
 5798 {
 5799         int j;
 5800 
 5801         if (IS_VF(p_dev))
 5802                 return;
 5803 
 5804         for_each_hwfn(p_dev, j) {
 5805                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
 5806 
 5807                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n");
 5808 
 5809                 p_hwfn->hw_init_done = false;
 5810 
 5811                 ecore_ptt_invalidate(p_hwfn);
 5812         }
 5813 }
 5814 
 5815 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev)
 5816 {
 5817         int j = 0;
 5818 
 5819         if (IS_VF(p_dev))
 5820                 return;
 5821 
 5822         for_each_hwfn(p_dev, j) {
 5823                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
 5824                 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
 5825 
 5826                 ecore_hw_hwfn_prepare(p_hwfn);
 5827 
 5828                 if (!p_ptt)
 5829                         DP_NOTICE(p_hwfn, false, "ptt acquire failed\n");
 5830                 else {
 5831                         ecore_load_mcp_offsets(p_hwfn, p_ptt);
 5832                         ecore_ptt_release(p_hwfn, p_ptt);
 5833                 }
 5834                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n");
 5835         }
 5836 }
 5837 
 5838 #endif
 5839 
 5840 static enum _ecore_status_t
 5841 ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
 5842                         void OSAL_IOMEM *p_doorbells, u64 db_phys_addr,
 5843                         struct ecore_hw_prepare_params *p_params)
 5844 {
 5845         struct ecore_mdump_retain_data mdump_retain;
 5846         struct ecore_dev *p_dev = p_hwfn->p_dev;
 5847         struct ecore_mdump_info mdump_info;
 5848         enum _ecore_status_t rc = ECORE_SUCCESS;
 5849 
 5850         /* Split PCI bars evenly between hwfns */
 5851         p_hwfn->regview = p_regview;
 5852         p_hwfn->doorbells = p_doorbells;
 5853         p_hwfn->db_phys_addr = db_phys_addr;
 5854 
 5855 #ifndef LINUX_REMOVE
 5856        p_hwfn->reg_offset = (u8 *)p_hwfn->regview - (u8 *)p_hwfn->p_dev->regview;
 5857        p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - (u8 *)p_hwfn->p_dev->doorbells;
 5858 #endif
 5859 
 5860         if (IS_VF(p_dev))
 5861                 return ecore_vf_hw_prepare(p_hwfn);
 5862 
 5863         /* Validate that chip access is feasible */
 5864         if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
 5865                 DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n");
 5866                 if (p_params->b_relaxed_probe)
 5867                         p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
 5868                 return ECORE_INVAL;
 5869         }
 5870 
 5871         get_function_id(p_hwfn);
 5872 
 5873         /* Allocate PTT pool */
 5874         rc = ecore_ptt_pool_alloc(p_hwfn);
 5875         if (rc) {
 5876                 DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
 5877                 if (p_params->b_relaxed_probe)
 5878                         p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
 5879                 goto err0;
 5880         }
 5881 
 5882         /* Allocate the main PTT */
 5883         p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
 5884 
 5885         /* First hwfn learns basic information, e.g., number of hwfns */
 5886         if (!p_hwfn->my_id) {
 5887                 rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
 5888                 if (rc != ECORE_SUCCESS) {
 5889                         if (p_params->b_relaxed_probe)
 5890                                 p_params->p_relaxed_res =
 5891                                         ECORE_HW_PREPARE_FAILED_DEV;
 5892                         goto err1;
 5893                 }
 5894         }
 5895 
 5896         ecore_hw_hwfn_prepare(p_hwfn);
 5897 
 5898         /* Initialize MCP structure */
 5899         rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
 5900         if (rc) {
 5901                 DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
 5902                 if (p_params->b_relaxed_probe)
 5903                         p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
 5904                 goto err1;
 5905         }
 5906 
 5907         /* Read the device configuration information from the HW and SHMEM */
 5908         rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
 5909                                p_params->personality, p_params);
 5910         if (rc) {
 5911                 DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
 5912                 goto err2;
 5913         }
 5914 
 5915         /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
 5916          * called, since among others it sets the ports number in an engine.
 5917          */
 5918         if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
 5919             !p_dev->recov_in_prog) {
 5920                 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
 5921                 if (rc != ECORE_SUCCESS)
 5922                         DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
 5923         }
 5924 
 5925         /* Check if mdump logs/data are present and update the epoch value */
 5926         if (IS_LEAD_HWFN(p_hwfn)) {
 5927 #ifndef ASIC_ONLY
 5928                 if (!CHIP_REV_IS_EMUL(p_dev)) {
 5929 #endif
 5930                 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
 5931                                               &mdump_info);
 5932                 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
 5933                         DP_NOTICE(p_hwfn, false,
 5934                                   "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
 5935 
 5936                 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
 5937                                                 &mdump_retain);
 5938                 if (rc == ECORE_SUCCESS && mdump_retain.valid)
 5939                         DP_NOTICE(p_hwfn, false,
 5940                                   "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
 5941                                   mdump_retain.epoch, mdump_retain.pf,
 5942                                   mdump_retain.status);
 5943 
 5944                 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
 5945                                            p_params->epoch);
 5946 #ifndef ASIC_ONLY
 5947                 }
 5948 #endif
 5949         }
 5950 
 5951         /* Allocate the init RT array and initialize the init-ops engine */
 5952         rc = ecore_init_alloc(p_hwfn);
 5953         if (rc) {
 5954                 DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
 5955                 if (p_params->b_relaxed_probe)
 5956                         p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
 5957                 goto err2;
 5958         }
 5959 
 5960 #ifndef ASIC_ONLY
 5961         if (CHIP_REV_IS_FPGA(p_dev)) {
 5962                 DP_NOTICE(p_hwfn, false,
 5963                           "FPGA: workaround; Prevent DMAE parities\n");
 5964                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
 5965                          7);
 5966 
 5967                 DP_NOTICE(p_hwfn, false,
 5968                           "FPGA: workaround: Set VF bar0 size\n");
 5969                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
 5970                          PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
 5971         }
 5972 #endif
 5973 
 5974         return rc;
 5975 err2:
 5976         if (IS_LEAD_HWFN(p_hwfn))
 5977                 ecore_iov_free_hw_info(p_dev);
 5978         ecore_mcp_free(p_hwfn);
 5979 err1:
 5980         ecore_hw_hwfn_free(p_hwfn);
 5981 err0:
 5982         return rc;
 5983 }
 5984 
 5985 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
 5986                                       struct ecore_hw_prepare_params *p_params)
 5987 {
 5988         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 5989         enum _ecore_status_t rc;
 5990 
 5991         p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
 5992         p_dev->allow_mdump = p_params->allow_mdump;
 5993 
 5994         if (p_params->b_relaxed_probe)
 5995                 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
 5996 
 5997         /* Store the precompiled init data ptrs */
 5998         if (IS_PF(p_dev))
 5999                 ecore_init_iro_array(p_dev);
 6000 
 6001         /* Initialize the first hwfn - will learn number of hwfns */
 6002         rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
 6003                                      p_dev->doorbells, p_dev->db_phys_addr,
 6004                                      p_params);
 6005         if (rc != ECORE_SUCCESS)
 6006                 return rc;
 6007 
 6008         p_params->personality = p_hwfn->hw_info.personality;
 6009 
 6010         /* initilalize 2nd hwfn if necessary */
 6011         if (ECORE_IS_CMT(p_dev)) {
 6012                 void OSAL_IOMEM *p_regview, *p_doorbell;
 6013                 u8 OSAL_IOMEM *addr;
 6014                 u64 db_phys_addr;
 6015                 u32 offset;
 6016 
 6017                 /* adjust bar offset for second engine */
 6018                 offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
 6019                                            BAR_ID_0) / 2;
 6020                 addr = (u8 OSAL_IOMEM *)p_dev->regview + offset;
 6021                 p_regview = (void OSAL_IOMEM *)addr;
 6022 
 6023                 offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
 6024                                            BAR_ID_1) / 2;
 6025                 addr = (u8 OSAL_IOMEM *)p_dev->doorbells + offset;
 6026                 p_doorbell = (void OSAL_IOMEM *)addr;
 6027                 db_phys_addr = p_dev->db_phys_addr + offset;
 6028 
 6029                 /* prepare second hw function */
 6030                 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
 6031                                              p_doorbell, db_phys_addr,
 6032                                              p_params);
 6033 
 6034                 /* in case of error, need to free the previously
 6035                  * initiliazed hwfn 0.
 6036                  */
 6037                 if (rc != ECORE_SUCCESS) {
 6038                         if (p_params->b_relaxed_probe)
 6039                                 p_params->p_relaxed_res =
 6040                                                 ECORE_HW_PREPARE_FAILED_ENG2;
 6041 
 6042                         if (IS_PF(p_dev)) {
 6043                                 ecore_init_free(p_hwfn);
 6044                                 ecore_mcp_free(p_hwfn);
 6045                                 ecore_hw_hwfn_free(p_hwfn);
 6046                         } else {
 6047                                 DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
 6048                         }
 6049                         return rc;
 6050                 }
 6051         }
 6052 
 6053         return rc;
 6054 }
 6055 
 6056 void ecore_hw_remove(struct ecore_dev *p_dev)
 6057 {
 6058         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
 6059         int i;
 6060 
 6061         if (IS_PF(p_dev))
 6062                 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
 6063                                                  ECORE_OV_DRIVER_STATE_NOT_LOADED);
 6064 
 6065         for_each_hwfn(p_dev, i) {
 6066                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 6067 
 6068                 if (IS_VF(p_dev)) {
 6069                         ecore_vf_pf_release(p_hwfn);
 6070                         continue;
 6071                 }
 6072 
 6073                 ecore_init_free(p_hwfn);
 6074                 ecore_hw_hwfn_free(p_hwfn);
 6075                 ecore_mcp_free(p_hwfn);
 6076 
 6077 #ifdef CONFIG_ECORE_LOCK_ALLOC
 6078                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
 6079 #endif
 6080         }
 6081 
 6082         ecore_iov_free_hw_info(p_dev);
 6083 }
 6084 
 6085 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
 6086                                       struct ecore_chain *p_chain)
 6087 {
 6088         void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
 6089         dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
 6090         struct ecore_chain_next *p_next;
 6091         u32 size, i;
 6092 
 6093         if (!p_virt)
 6094                 return;
 6095 
 6096         size = p_chain->elem_size * p_chain->usable_per_page;
 6097 
 6098         for (i = 0; i < p_chain->page_cnt; i++) {
 6099                 if (!p_virt)
 6100                         break;
 6101 
 6102                 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
 6103                 p_virt_next = p_next->next_virt;
 6104                 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
 6105 
 6106                 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
 6107                                        ECORE_CHAIN_PAGE_SIZE);
 6108 
 6109                 p_virt = p_virt_next;
 6110                 p_phys = p_phys_next;
 6111         }
 6112 }
 6113 
 6114 static void ecore_chain_free_single(struct ecore_dev *p_dev,
 6115                                     struct ecore_chain *p_chain)
 6116 {
 6117         if (!p_chain->p_virt_addr)
 6118                 return;
 6119 
 6120         OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
 6121                                p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
 6122 }
 6123 
 6124 static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
 6125                                  struct ecore_chain *p_chain)
 6126 {
 6127         void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
 6128         u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
 6129         u32 page_cnt = p_chain->page_cnt, i, pbl_size;
 6130 
 6131         if (!pp_virt_addr_tbl)
 6132                 return;
 6133 
 6134         if (!p_pbl_virt)
 6135                 goto out;
 6136 
 6137         for (i = 0; i < page_cnt; i++) {
 6138                 if (!pp_virt_addr_tbl[i])
 6139                         break;
 6140 
 6141                 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
 6142                                        *(dma_addr_t *)p_pbl_virt,
 6143                                        ECORE_CHAIN_PAGE_SIZE);
 6144 
 6145                 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
 6146         }
 6147 
 6148         pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
 6149 
 6150         if (!p_chain->b_external_pbl) {
 6151                 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
 6152                                        p_chain->pbl_sp.p_phys_table, pbl_size);
 6153         }
 6154 out:
 6155         OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
 6156         p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
 6157 }
 6158 
 6159 void ecore_chain_free(struct ecore_dev *p_dev,
 6160                       struct ecore_chain *p_chain)
 6161 {
 6162         switch (p_chain->mode) {
 6163         case ECORE_CHAIN_MODE_NEXT_PTR:
 6164                 ecore_chain_free_next_ptr(p_dev, p_chain);
 6165                 break;
 6166         case ECORE_CHAIN_MODE_SINGLE:
 6167                 ecore_chain_free_single(p_dev, p_chain);
 6168                 break;
 6169         case ECORE_CHAIN_MODE_PBL:
 6170                 ecore_chain_free_pbl(p_dev, p_chain);
 6171                 break;
 6172         }
 6173 }
 6174 
 6175 static enum _ecore_status_t
 6176 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
 6177                                enum ecore_chain_cnt_type cnt_type,
 6178                                osal_size_t elem_size, u32 page_cnt)
 6179 {
 6180         u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
 6181 
 6182         /* The actual chain size can be larger than the maximal possible value
 6183          * after rounding up the requested elements number to pages, and after
 6184          * taking into acount the unusuable elements (next-ptr elements).
 6185          * The size of a "u16" chain can be (U16_MAX + 1) since the chain
 6186          * size/capacity fields are of a u32 type.
 6187          */
 6188         if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
 6189              chain_size > ((u32)ECORE_U16_MAX + 1)) ||
 6190             (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
 6191              chain_size > ECORE_U32_MAX)) {
 6192                 DP_NOTICE(p_dev, true,
 6193                           "The actual chain size (0x%llx) is larger than the maximal possible value\n",
 6194                           (unsigned long long)chain_size);
 6195                 return ECORE_INVAL;
 6196         }
 6197 
 6198         return ECORE_SUCCESS;
 6199 }
 6200 
 6201 static enum _ecore_status_t
 6202 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
 6203 {
 6204         void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
 6205         dma_addr_t p_phys = 0;
 6206         u32 i;
 6207 
 6208         for (i = 0; i < p_chain->page_cnt; i++) {
 6209                 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
 6210                                                  ECORE_CHAIN_PAGE_SIZE);
 6211                 if (!p_virt) {
 6212                         DP_NOTICE(p_dev, false,
 6213                                   "Failed to allocate chain memory\n");
 6214                         return ECORE_NOMEM;
 6215                 }
 6216 
 6217                 if (i == 0) {
 6218                         ecore_chain_init_mem(p_chain, p_virt, p_phys);
 6219                         ecore_chain_reset(p_chain);
 6220                 } else {
 6221                         ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
 6222                                                        p_virt, p_phys);
 6223                 }
 6224 
 6225                 p_virt_prev = p_virt;
 6226         }
 6227         /* Last page's next element should point to the beginning of the
 6228          * chain.
 6229          */
 6230         ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
 6231                                        p_chain->p_virt_addr,
 6232                                        p_chain->p_phys_addr);
 6233 
 6234         return ECORE_SUCCESS;
 6235 }
 6236 
 6237 static enum _ecore_status_t
 6238 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
 6239 {
 6240         dma_addr_t p_phys = 0;
 6241         void *p_virt = OSAL_NULL;
 6242 
 6243         p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
 6244         if (!p_virt) {
 6245                 DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
 6246                 return ECORE_NOMEM;
 6247         }
 6248 
 6249         ecore_chain_init_mem(p_chain, p_virt, p_phys);
 6250         ecore_chain_reset(p_chain);
 6251 
 6252         return ECORE_SUCCESS;
 6253 }
 6254 
 6255 static enum _ecore_status_t
 6256 ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
 6257                       struct ecore_chain *p_chain,
 6258                       struct ecore_chain_ext_pbl *ext_pbl)
 6259 {
 6260         u32 page_cnt = p_chain->page_cnt, size, i;
 6261         dma_addr_t p_phys = 0, p_pbl_phys = 0;
 6262         void **pp_virt_addr_tbl = OSAL_NULL;
 6263         u8 *p_pbl_virt = OSAL_NULL;
 6264         void *p_virt = OSAL_NULL;
 6265 
 6266         size = page_cnt * sizeof(*pp_virt_addr_tbl);
 6267         pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
 6268         if (!pp_virt_addr_tbl) {
 6269                 DP_NOTICE(p_dev, false,
 6270                           "Failed to allocate memory for the chain virtual addresses table\n");
 6271                 return ECORE_NOMEM;
 6272         }
 6273 
 6274         /* The allocation of the PBL table is done with its full size, since it
 6275          * is expected to be successive.
 6276          * ecore_chain_init_pbl_mem() is called even in a case of an allocation
 6277          * failure, since pp_virt_addr_tbl was previously allocated, and it
 6278          * should be saved to allow its freeing during the error flow.
 6279          */
 6280         size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
 6281 
 6282         if (ext_pbl == OSAL_NULL) {
 6283                 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
 6284         } else {
 6285                 p_pbl_virt = ext_pbl->p_pbl_virt;
 6286                 p_pbl_phys = ext_pbl->p_pbl_phys;
 6287                 p_chain->b_external_pbl = true;
 6288         }
 6289 
 6290         ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
 6291                                  pp_virt_addr_tbl);
 6292         if (!p_pbl_virt) {
 6293                 DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
 6294                 return ECORE_NOMEM;
 6295         }
 6296 
 6297         for (i = 0; i < page_cnt; i++) {
 6298                 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
 6299                                                  ECORE_CHAIN_PAGE_SIZE);
 6300                 if (!p_virt) {
 6301                         DP_NOTICE(p_dev, false,
 6302                                   "Failed to allocate chain memory\n");
 6303                         return ECORE_NOMEM;
 6304                 }
 6305 
 6306                 if (i == 0) {
 6307                         ecore_chain_init_mem(p_chain, p_virt, p_phys);
 6308                         ecore_chain_reset(p_chain);
 6309                 }
 6310 
 6311                 /* Fill the PBL table with the physical address of the page */
 6312                 *(dma_addr_t *)p_pbl_virt = p_phys;
 6313                 /* Keep the virtual address of the page */
 6314                 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
 6315 
 6316                 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
 6317         }
 6318 
 6319         return ECORE_SUCCESS;
 6320 }
 6321 
 6322 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
 6323                                        enum ecore_chain_use_mode intended_use,
 6324                                        enum ecore_chain_mode mode,
 6325                                        enum ecore_chain_cnt_type cnt_type,
 6326                                        u32 num_elems, osal_size_t elem_size,
 6327                                        struct ecore_chain *p_chain,
 6328                                        struct ecore_chain_ext_pbl *ext_pbl)
 6329 {
 6330         u32 page_cnt;
 6331         enum _ecore_status_t rc = ECORE_SUCCESS;
 6332 
 6333         if (mode == ECORE_CHAIN_MODE_SINGLE)
 6334                 page_cnt = 1;
 6335         else
 6336                 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
 6337 
 6338         rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
 6339                                             page_cnt);
 6340         if (rc) {
 6341                 DP_NOTICE(p_dev, false,
 6342                           "Cannot allocate a chain with the given arguments:\n"
 6343                           "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
 6344                           intended_use, mode, cnt_type, num_elems, elem_size);
 6345                 return rc;
 6346         }
 6347 
 6348         ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
 6349                                 mode, cnt_type, p_dev->dp_ctx);
 6350 
 6351         switch (mode) {
 6352         case ECORE_CHAIN_MODE_NEXT_PTR:
 6353                 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
 6354                 break;
 6355         case ECORE_CHAIN_MODE_SINGLE:
 6356                 rc = ecore_chain_alloc_single(p_dev, p_chain);
 6357                 break;
 6358         case ECORE_CHAIN_MODE_PBL:
 6359                 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
 6360                 break;
 6361         }
 6362         if (rc)
 6363                 goto nomem;
 6364 
 6365         return ECORE_SUCCESS;
 6366 
 6367 nomem:
 6368         ecore_chain_free(p_dev, p_chain);
 6369         return rc;
 6370 }
 6371 
 6372 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
 6373                                        u16 src_id, u16 *dst_id)
 6374 {
 6375         if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
 6376                 u16 min, max;
 6377 
 6378                 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
 6379                 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
 6380                 DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
 6381                           src_id, min, max);
 6382 
 6383                 return ECORE_INVAL;
 6384         }
 6385 
 6386         *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
 6387 
 6388         return ECORE_SUCCESS;
 6389 }
 6390 
 6391 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
 6392                                     u8 src_id, u8 *dst_id)
 6393 {
 6394         if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
 6395                 u8 min, max;
 6396 
 6397                 min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
 6398                 max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
 6399                 DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n",
 6400                           src_id, min, max);
 6401 
 6402                 return ECORE_INVAL;
 6403         }
 6404 
 6405         *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
 6406 
 6407         return ECORE_SUCCESS;
 6408 }
 6409 
 6410 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
 6411                                       u8 src_id, u8 *dst_id)
 6412 {
 6413         if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
 6414                 u8 min, max;
 6415 
 6416                 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
 6417                 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
 6418                 DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
 6419                           src_id, min, max);
 6420 
 6421                 return ECORE_INVAL;
 6422         }
 6423 
 6424         *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
 6425 
 6426         return ECORE_SUCCESS;
 6427 }
 6428 
 6429 enum _ecore_status_t
 6430 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
 6431                                   struct ecore_ptt *p_ptt)
 6432 {
 6433         if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
 6434                 ecore_wr(p_hwfn, p_ptt,
 6435                          NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
 6436                          1 << p_hwfn->abs_pf_id / 2);
 6437                 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
 6438                 return ECORE_SUCCESS;
 6439         } else {
 6440                 DP_NOTICE(p_hwfn, false,
 6441                           "This function can't be set as default\n");
 6442                 return ECORE_INVAL;
 6443         }
 6444 }
 6445 
 6446 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
 6447                                                struct ecore_ptt *p_ptt,
 6448                                                u32 hw_addr, void *p_eth_qzone,
 6449                                                osal_size_t eth_qzone_size,
 6450                                                u8 timeset)
 6451 {
 6452         struct coalescing_timeset *p_coal_timeset;
 6453 
 6454         if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
 6455                 DP_NOTICE(p_hwfn, true,
 6456                           "Coalescing configuration not enabled\n");
 6457                 return ECORE_INVAL;
 6458         }
 6459 
 6460         p_coal_timeset = p_eth_qzone;
 6461         OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
 6462         SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
 6463         SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
 6464         ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
 6465 
 6466         return ECORE_SUCCESS;
 6467 }
 6468 
 6469 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
 6470                                               u16 rx_coal, u16 tx_coal,
 6471                                               void *p_handle)
 6472 {
 6473         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
 6474         enum _ecore_status_t rc = ECORE_SUCCESS;
 6475         struct ecore_ptt *p_ptt;
 6476 
 6477         /* TODO - Configuring a single queue's coalescing but
 6478          * claiming all queues are abiding same configuration
 6479          * for PF and VF both.
 6480          */
 6481 
 6482 #ifdef CONFIG_ECORE_SRIOV
 6483         if (IS_VF(p_hwfn->p_dev))
 6484                 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
 6485                                                 tx_coal, p_cid);
 6486 #endif /* #ifdef CONFIG_ECORE_SRIOV */
 6487 
 6488         p_ptt = ecore_ptt_acquire(p_hwfn);
 6489         if (!p_ptt)
 6490                 return ECORE_AGAIN;
 6491 
 6492         if (rx_coal) {
 6493                 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
 6494                 if (rc)
 6495                         goto out;
 6496                 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
 6497         }
 6498 
 6499         if (tx_coal) {
 6500                 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
 6501                 if (rc)
 6502                         goto out;
 6503                 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
 6504         }
 6505 out:
 6506         ecore_ptt_release(p_hwfn, p_ptt);
 6507 
 6508         return rc;
 6509 }
 6510 
 6511 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
 6512                                             struct ecore_ptt *p_ptt,
 6513                                             u16 coalesce,
 6514                                             struct ecore_queue_cid *p_cid)
 6515 {
 6516         struct ustorm_eth_queue_zone eth_qzone;
 6517         u8 timeset, timer_res;
 6518         u32 address;
 6519         enum _ecore_status_t rc;
 6520 
 6521         /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
 6522         if (coalesce <= 0x7F)
 6523                 timer_res = 0;
 6524         else if (coalesce <= 0xFF)
 6525                 timer_res = 1;
 6526         else if (coalesce <= 0x1FF)
 6527                 timer_res = 2;
 6528         else {
 6529                 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
 6530                 return ECORE_INVAL;
 6531         }
 6532         timeset = (u8)(coalesce >> timer_res);
 6533 
 6534         rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
 6535                                      p_cid->sb_igu_id, false);
 6536         if (rc != ECORE_SUCCESS)
 6537                 goto out;
 6538 
 6539         address = BAR0_MAP_REG_USDM_RAM +
 6540                   USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
 6541 
 6542         rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
 6543                                 sizeof(struct ustorm_eth_queue_zone), timeset);
 6544         if (rc != ECORE_SUCCESS)
 6545                 goto out;
 6546 
 6547 out:
 6548         return rc;
 6549 }
 6550 
 6551 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
 6552                                             struct ecore_ptt *p_ptt,
 6553                                             u16 coalesce,
 6554                                             struct ecore_queue_cid *p_cid)
 6555 {
 6556         struct xstorm_eth_queue_zone eth_qzone;
 6557         u8 timeset, timer_res;
 6558         u32 address;
 6559         enum _ecore_status_t rc;
 6560 
 6561         /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
 6562         if (coalesce <= 0x7F)
 6563                 timer_res = 0;
 6564         else if (coalesce <= 0xFF)
 6565                 timer_res = 1;
 6566         else if (coalesce <= 0x1FF)
 6567                 timer_res = 2;
 6568         else {
 6569                 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
 6570                 return ECORE_INVAL;
 6571         }
 6572         timeset = (u8)(coalesce >> timer_res);
 6573 
 6574         rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
 6575                                      p_cid->sb_igu_id, true);
 6576         if (rc != ECORE_SUCCESS)
 6577                 goto out;
 6578 
 6579         address = BAR0_MAP_REG_XSDM_RAM +
 6580                   XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
 6581 
 6582         rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
 6583                                 sizeof(struct xstorm_eth_queue_zone), timeset);
 6584 out:
 6585         return rc;
 6586 }
 6587 
 6588 /* Calculate final WFQ values for all vports and configure it.
 6589  * After this configuration each vport must have
 6590  * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
 6591  */
 6592 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 6593                                                struct ecore_ptt *p_ptt,
 6594                                                u32 min_pf_rate)
 6595 {
 6596         struct init_qm_vport_params *vport_params;
 6597         int i;
 6598 
 6599         vport_params = p_hwfn->qm_info.qm_vport_params;
 6600 
 6601         for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
 6602                 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
 6603 
 6604                 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
 6605                                             min_pf_rate;
 6606                 ecore_init_vport_wfq(p_hwfn, p_ptt,
 6607                                      vport_params[i].first_tx_pq_id,
 6608                                      vport_params[i].vport_wfq);
 6609         }
 6610 }
 6611 
 6612 static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
 6613 
 6614 {
 6615         int i;
 6616 
 6617         for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
 6618                 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
 6619 }
 6620 
 6621 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 6622                                              struct ecore_ptt *p_ptt)
 6623 {
 6624         struct init_qm_vport_params *vport_params;
 6625         int i;
 6626 
 6627         vport_params = p_hwfn->qm_info.qm_vport_params;
 6628 
 6629         for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
 6630                 ecore_init_wfq_default_param(p_hwfn);
 6631                 ecore_init_vport_wfq(p_hwfn, p_ptt,
 6632                                      vport_params[i].first_tx_pq_id,
 6633                                      vport_params[i].vport_wfq);
 6634         }
 6635 }
 6636 
 6637 /* This function performs several validations for WFQ
 6638  * configuration and required min rate for a given vport
 6639  * 1. req_rate must be greater than one percent of min_pf_rate.
 6640  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
 6641  *    rates to get less than one percent of min_pf_rate.
 6642  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
 6643  */
 6644 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
 6645                                                  u16 vport_id, u32 req_rate,
 6646                                                  u32 min_pf_rate)
 6647 {
 6648         u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
 6649         int non_requested_count = 0, req_count = 0, i, num_vports;
 6650 
 6651         num_vports = p_hwfn->qm_info.num_vports;
 6652 
 6653         /* Accounting for the vports which are configured for WFQ explicitly */
 6654         for (i = 0; i < num_vports; i++) {
 6655                 u32 tmp_speed;
 6656 
 6657                 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
 6658                         req_count++;
 6659                         tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
 6660                         total_req_min_rate += tmp_speed;
 6661                 }
 6662         }
 6663 
 6664         /* Include current vport data as well */
 6665         req_count++;
 6666         total_req_min_rate += req_rate;
 6667         non_requested_count = num_vports - req_count;
 6668 
 6669         /* validate possible error cases */
 6670         if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
 6671                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 6672                            "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
 6673                            vport_id, req_rate, min_pf_rate);
 6674                 return ECORE_INVAL;
 6675         }
 6676 
 6677         /* TBD - for number of vports greater than 100 */
 6678         if (num_vports > ECORE_WFQ_UNIT) {
 6679                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 6680                            "Number of vports is greater than %d\n",
 6681                            ECORE_WFQ_UNIT);
 6682                 return ECORE_INVAL;
 6683         }
 6684 
 6685         if (total_req_min_rate > min_pf_rate) {
 6686                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 6687                            "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
 6688                            total_req_min_rate, min_pf_rate);
 6689                 return ECORE_INVAL;
 6690         }
 6691 
 6692         /* Data left for non requested vports */
 6693         total_left_rate = min_pf_rate - total_req_min_rate;
 6694         left_rate_per_vp = total_left_rate / non_requested_count;
 6695 
 6696         /* validate if non requested get < 1% of min bw */
 6697         if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
 6698                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 6699                            "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
 6700                            left_rate_per_vp, min_pf_rate);
 6701                 return ECORE_INVAL;
 6702         }
 6703 
 6704         /* now req_rate for given vport passes all scenarios.
 6705          * assign final wfq rates to all vports.
 6706          */
 6707         p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
 6708         p_hwfn->qm_info.wfq_data[vport_id].configured = true;
 6709 
 6710         for (i = 0; i < num_vports; i++) {
 6711                 if (p_hwfn->qm_info.wfq_data[i].configured)
 6712                         continue;
 6713 
 6714                 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
 6715         }
 6716 
 6717         return ECORE_SUCCESS;
 6718 }
 6719 
 6720 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
 6721                                        struct ecore_ptt *p_ptt,
 6722                                        u16 vp_id, u32 rate)
 6723 {
 6724         struct ecore_mcp_link_state *p_link;
 6725         int rc = ECORE_SUCCESS;
 6726 
 6727         p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
 6728 
 6729         if (!p_link->min_pf_rate) {
 6730                 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
 6731                 p_hwfn->qm_info.wfq_data[vp_id].configured = true;
 6732                 return rc;
 6733         }
 6734 
 6735         rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
 6736 
 6737         if (rc == ECORE_SUCCESS)
 6738                 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
 6739                                                    p_link->min_pf_rate);
 6740         else
 6741                 DP_NOTICE(p_hwfn, false,
 6742                           "Validation failed while configuring min rate\n");
 6743 
 6744         return rc;
 6745 }
 6746 
 6747 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
 6748                                                    struct ecore_ptt *p_ptt,
 6749                                                    u32 min_pf_rate)
 6750 {
 6751         bool use_wfq = false;
 6752         int rc = ECORE_SUCCESS;
 6753         u16 i;
 6754 
 6755         /* Validate all pre configured vports for wfq */
 6756         for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
 6757                 u32 rate;
 6758 
 6759                 if (!p_hwfn->qm_info.wfq_data[i].configured)
 6760                         continue;
 6761 
 6762                 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
 6763                 use_wfq = true;
 6764 
 6765                 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
 6766                 if (rc != ECORE_SUCCESS) {
 6767                         DP_NOTICE(p_hwfn, false,
 6768                                   "WFQ validation failed while configuring min rate\n");
 6769                         break;
 6770                 }
 6771         }
 6772 
 6773         if (rc == ECORE_SUCCESS && use_wfq)
 6774                 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
 6775         else
 6776                 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
 6777 
 6778         return rc;
 6779 }
 6780 
 6781 /* Main API for ecore clients to configure vport min rate.
 6782  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
 6783  * rate - Speed in Mbps needs to be assigned to a given vport.
 6784  */
 6785 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
 6786 {
 6787         int i, rc = ECORE_INVAL;
 6788 
 6789         /* TBD - for multiple hardware functions - that is 100 gig */
 6790         if (ECORE_IS_CMT(p_dev)) {
 6791                 DP_NOTICE(p_dev, false,
 6792                           "WFQ configuration is not supported for this device\n");
 6793                 return rc;
 6794         }
 6795 
 6796         for_each_hwfn(p_dev, i) {
 6797                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 6798                 struct ecore_ptt *p_ptt;
 6799 
 6800                 p_ptt = ecore_ptt_acquire(p_hwfn);
 6801                 if (!p_ptt)
 6802                         return ECORE_TIMEOUT;
 6803 
 6804                 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
 6805 
 6806                 if (rc != ECORE_SUCCESS) {
 6807                         ecore_ptt_release(p_hwfn, p_ptt);
 6808                         return rc;
 6809                 }
 6810 
 6811                 ecore_ptt_release(p_hwfn, p_ptt);
 6812         }
 6813 
 6814         return rc;
 6815 }
 6816 
 6817 /* API to configure WFQ from mcp link change */
 6818 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
 6819                                            struct ecore_ptt *p_ptt,
 6820                                            u32 min_pf_rate)
 6821 {
 6822         int i;
 6823 
 6824         /* TBD - for multiple hardware functions - that is 100 gig */
 6825         if (ECORE_IS_CMT(p_dev)) {
 6826                 DP_VERBOSE(p_dev, ECORE_MSG_LINK,
 6827                            "WFQ configuration is not supported for this device\n");
 6828                 return;
 6829         }
 6830 
 6831         for_each_hwfn(p_dev, i) {
 6832                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 6833 
 6834                 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
 6835                                                         min_pf_rate);
 6836         }
 6837 }
 6838 
 6839 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
 6840                                        struct ecore_ptt *p_ptt,
 6841                                        struct ecore_mcp_link_state *p_link,
 6842                                        u8 max_bw)
 6843 {
 6844         int rc = ECORE_SUCCESS;
 6845 
 6846         p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
 6847 
 6848         if (!p_link->line_speed && (max_bw != 100))
 6849                 return rc;
 6850 
 6851         p_link->speed = (p_link->line_speed * max_bw) / 100;
 6852         p_hwfn->qm_info.pf_rl = p_link->speed;
 6853 
 6854         /* Since the limiter also affects Tx-switched traffic, we don't want it
 6855          * to limit such traffic in case there's no actual limit.
 6856          * In that case, set limit to imaginary high boundary.
 6857          */
 6858         if (max_bw == 100)
 6859                 p_hwfn->qm_info.pf_rl = 100000;
 6860 
 6861         rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
 6862                               p_hwfn->qm_info.pf_rl);
 6863 
 6864         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 6865                    "Configured MAX bandwidth to be %08x Mb/sec\n",
 6866                    p_link->speed);
 6867 
 6868         return rc;
 6869 }
 6870 
 6871 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
 6872 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
 6873 {
 6874         int i, rc = ECORE_INVAL;
 6875 
 6876         if (max_bw < 1 || max_bw > 100) {
 6877                 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
 6878                 return rc;
 6879         }
 6880 
 6881         for_each_hwfn(p_dev, i) {
 6882                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 6883                 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
 6884                 struct ecore_mcp_link_state *p_link;
 6885                 struct ecore_ptt *p_ptt;
 6886 
 6887                 p_link = &p_lead->mcp_info->link_output;
 6888 
 6889                 p_ptt = ecore_ptt_acquire(p_hwfn);
 6890                 if (!p_ptt)
 6891                         return ECORE_TIMEOUT;
 6892 
 6893                 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
 6894                                                         p_link, max_bw);
 6895 
 6896                 ecore_ptt_release(p_hwfn, p_ptt);
 6897 
 6898                 if (rc != ECORE_SUCCESS)
 6899                         break;
 6900         }
 6901 
 6902         return rc;
 6903 }
 6904 
 6905 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
 6906                                        struct ecore_ptt *p_ptt,
 6907                                        struct ecore_mcp_link_state *p_link,
 6908                                        u8 min_bw)
 6909 {
 6910         int rc = ECORE_SUCCESS;
 6911 
 6912         p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
 6913         p_hwfn->qm_info.pf_wfq = min_bw;
 6914 
 6915         if (!p_link->line_speed)
 6916                 return rc;
 6917 
 6918         p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
 6919 
 6920         rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
 6921 
 6922         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
 6923                    "Configured MIN bandwidth to be %d Mb/sec\n",
 6924                    p_link->min_pf_rate);
 6925 
 6926         return rc;
 6927 }
 6928 
 6929 /* Main API to configure PF min bandwidth where bw range is [1-100] */
 6930 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
 6931 {
 6932         int i, rc = ECORE_INVAL;
 6933 
 6934         if (min_bw < 1 || min_bw > 100) {
 6935                 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
 6936                 return rc;
 6937         }
 6938 
 6939         for_each_hwfn(p_dev, i) {
 6940                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 6941                 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
 6942                 struct ecore_mcp_link_state *p_link;
 6943                 struct ecore_ptt *p_ptt;
 6944 
 6945                 p_link = &p_lead->mcp_info->link_output;
 6946 
 6947                 p_ptt = ecore_ptt_acquire(p_hwfn);
 6948                 if (!p_ptt)
 6949                         return ECORE_TIMEOUT;
 6950 
 6951                 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
 6952                                                         p_link, min_bw);
 6953                 if (rc != ECORE_SUCCESS) {
 6954                         ecore_ptt_release(p_hwfn, p_ptt);
 6955                         return rc;
 6956                 }
 6957 
 6958                 if (p_link->min_pf_rate) {
 6959                         u32 min_rate = p_link->min_pf_rate;
 6960 
 6961                         rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
 6962                                                                      p_ptt,
 6963                                                                      min_rate);
 6964                 }
 6965 
 6966                 ecore_ptt_release(p_hwfn, p_ptt);
 6967         }
 6968 
 6969         return rc;
 6970 }
 6971 
 6972 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 6973 {
 6974         struct ecore_mcp_link_state *p_link;
 6975 
 6976         p_link = &p_hwfn->mcp_info->link_output;
 6977 
 6978         if (p_link->min_pf_rate)
 6979                 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
 6980 
 6981         OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
 6982                     sizeof(*p_hwfn->qm_info.wfq_data) *
 6983                                 p_hwfn->qm_info.num_vports);
 6984 }
 6985 
 6986 int ecore_device_num_engines(struct ecore_dev *p_dev)
 6987 {
 6988         return ECORE_IS_BB(p_dev) ? 2 : 1;
 6989 }
 6990 
 6991 int ecore_device_num_ports(struct ecore_dev *p_dev)
 6992 {
 6993         return p_dev->num_ports;
 6994 }
 6995 
 6996 void ecore_set_fw_mac_addr(__le16 *fw_msb,
 6997                           __le16 *fw_mid,
 6998                           __le16 *fw_lsb,
 6999                           u8 *mac)
 7000 {
 7001         ((u8 *)fw_msb)[0] = mac[1];
 7002         ((u8 *)fw_msb)[1] = mac[0];
 7003         ((u8 *)fw_mid)[0] = mac[3];
 7004         ((u8 *)fw_mid)[1] = mac[2];
 7005         ((u8 *)fw_lsb)[0] = mac[5];
 7006         ((u8 *)fw_lsb)[1] = mac[4];
 7007 }
 7008 
 7009 void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable)
 7010 {
 7011         if (p_dev->recov_in_prog != !b_enable) {
 7012                 DP_INFO(p_dev, "%s access to the device\n",
 7013                         b_enable ?  "Enable" : "Disable");
 7014                 p_dev->recov_in_prog = !b_enable;
 7015         }
 7016 }
 7017 
 7018 #ifdef _NTDDK_
 7019 #pragma warning(pop)
 7020 #endif

Cache object: ec83bcb92a5743d6b3a03e710e266a4d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.