The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_sp_commands.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2017-2018 Cavium, Inc. 
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * File : ecore_sp_commands.c
   30  */
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include "bcm_osal.h"
   35 
   36 #include "ecore.h"
   37 #include "ecore_status.h"
   38 #include "ecore_chain.h"
   39 #include "ecore_spq.h"
   40 #include "ecore_init_fw_funcs.h"
   41 #include "ecore_cxt.h"
   42 #include "ecore_sp_commands.h"
   43 #include "ecore_gtt_reg_addr.h"
   44 #include "ecore_iro.h"
   45 #include "reg_addr.h"
   46 #include "ecore_int.h"
   47 #include "ecore_hw.h"
   48 #include "ecore_dcbx.h"
   49 #include "ecore_sriov.h"
   50 #include "ecore_vf.h"
   51 #ifndef LINUX_REMOVE
   52 #include "ecore_tcp_ip.h"
   53 #endif
   54 
   55 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
   56                                            struct ecore_spq_entry **pp_ent,
   57                                            u8 cmd,
   58                                            u8 protocol,
   59                                            struct ecore_sp_init_data *p_data)
   60 {
   61         u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
   62         struct ecore_spq_entry *p_ent = OSAL_NULL;
   63         enum _ecore_status_t rc;
   64 
   65         if (!pp_ent)
   66                 return ECORE_INVAL;
   67 
   68         /* Get an SPQ entry */
   69         rc = ecore_spq_get_entry(p_hwfn, pp_ent);
   70         if (rc != ECORE_SUCCESS)
   71                 return rc;
   72 
   73         /* Fill the SPQ entry */
   74         p_ent = *pp_ent;
   75         p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
   76         p_ent->elem.hdr.cmd_id = cmd;
   77         p_ent->elem.hdr.protocol_id = protocol;
   78         p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
   79         p_ent->comp_mode = p_data->comp_mode;
   80         p_ent->comp_done.done = 0;
   81 
   82         switch (p_ent->comp_mode) {
   83         case ECORE_SPQ_MODE_EBLOCK:
   84                 p_ent->comp_cb.cookie = &p_ent->comp_done;
   85                 break;
   86 
   87         case ECORE_SPQ_MODE_BLOCK:
   88                 if (!p_data->p_comp_data)
   89                         return ECORE_INVAL;
   90 
   91                 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
   92                 break;
   93 
   94         case ECORE_SPQ_MODE_CB:
   95                 if (!p_data->p_comp_data)
   96                         p_ent->comp_cb.function = OSAL_NULL;
   97                 else
   98                         p_ent->comp_cb = *p_data->p_comp_data;
   99                 break;
  100 
  101         default:
  102                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
  103                           p_ent->comp_mode);
  104                 return ECORE_INVAL;
  105         }
  106 
  107         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
  108                    "Initialized: CID %08x cmd %02x protocol %02x data_addr %llx comp_mode [%s]\n",
  109                    opaque_cid, cmd, protocol,
  110                    (unsigned long long)(osal_uintptr_t)&p_ent->ramrod,
  111                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
  112                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
  113                            "MODE_CB"));
  114 
  115         OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
  116 
  117         return ECORE_SUCCESS;
  118 }
  119 
  120 static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
  121 {
  122         switch (type) {
  123         case ECORE_TUNN_CLSS_MAC_VLAN:
  124                 return TUNNEL_CLSS_MAC_VLAN;
  125         case ECORE_TUNN_CLSS_MAC_VNI:
  126                 return TUNNEL_CLSS_MAC_VNI;
  127         case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
  128                 return TUNNEL_CLSS_INNER_MAC_VLAN;
  129         case ECORE_TUNN_CLSS_INNER_MAC_VNI:
  130                 return TUNNEL_CLSS_INNER_MAC_VNI;
  131         case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
  132                 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
  133         default:
  134                 return TUNNEL_CLSS_MAC_VLAN;
  135         }
  136 }
  137 
  138 static void
  139 ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
  140                               struct ecore_tunnel_info *p_src,
  141                               bool b_pf_start)
  142 {
  143         if (p_src->vxlan.b_update_mode || b_pf_start)
  144                 p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
  145 
  146         if (p_src->l2_gre.b_update_mode || b_pf_start)
  147                 p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
  148 
  149         if (p_src->ip_gre.b_update_mode || b_pf_start)
  150                 p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
  151 
  152         if (p_src->l2_geneve.b_update_mode || b_pf_start)
  153                 p_tun->l2_geneve.b_mode_enabled =
  154                                 p_src->l2_geneve.b_mode_enabled;
  155 
  156         if (p_src->ip_geneve.b_update_mode || b_pf_start)
  157                 p_tun->ip_geneve.b_mode_enabled =
  158                                 p_src->ip_geneve.b_mode_enabled;
  159 }
  160 
  161 static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
  162                                     struct ecore_tunnel_info *p_src)
  163 {
  164         enum tunnel_clss type;
  165 
  166         p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
  167         p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
  168 
  169         type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
  170         p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
  171         type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
  172         p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
  173         type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
  174         p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
  175         type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
  176         p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
  177         type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
  178         p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
  179 }
  180 
  181 static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
  182                                  struct ecore_tunnel_info *p_src)
  183 {
  184         p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
  185         p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
  186 
  187         if (p_src->geneve_port.b_update_port)
  188                 p_tun->geneve_port.port = p_src->geneve_port.port;
  189 
  190         if (p_src->vxlan_port.b_update_port)
  191                 p_tun->vxlan_port.port = p_src->vxlan_port.port;
  192 }
  193 
  194 static void
  195 __ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
  196                                 struct ecore_tunn_update_type *tun_type)
  197 {
  198         *p_tunn_cls = tun_type->tun_cls;
  199 }
  200 
  201 static void
  202 ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
  203                               struct ecore_tunn_update_type *tun_type,
  204                               u8 *p_update_port, __le16 *p_port,
  205                               struct ecore_tunn_update_udp_port *p_udp_port)
  206 {
  207         __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
  208         if (p_udp_port->b_update_port) {
  209                 *p_update_port = 1;
  210                 *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
  211         }
  212 }
  213 
  214 static void
  215 ecore_tunn_set_pf_update_params(struct ecore_hwfn               *p_hwfn,
  216                                 struct ecore_tunnel_info *p_src,
  217                                 struct pf_update_tunnel_config  *p_tunn_cfg)
  218 {
  219         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
  220 
  221         ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
  222         ecore_set_tunn_cls_info(p_tun, p_src);
  223         ecore_set_tunn_ports(p_tun, p_src);
  224 
  225         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
  226                                       &p_tun->vxlan,
  227                                       &p_tunn_cfg->set_vxlan_udp_port_flg,
  228                                       &p_tunn_cfg->vxlan_udp_port,
  229                                       &p_tun->vxlan_port);
  230 
  231         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
  232                                       &p_tun->l2_geneve,
  233                                       &p_tunn_cfg->set_geneve_udp_port_flg,
  234                                       &p_tunn_cfg->geneve_udp_port,
  235                                       &p_tun->geneve_port);
  236 
  237         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
  238                                         &p_tun->ip_geneve);
  239 
  240         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
  241                                         &p_tun->l2_gre);
  242 
  243         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
  244                                         &p_tun->ip_gre);
  245 
  246         p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
  247 }
  248 
  249 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
  250                                    struct ecore_ptt  *p_ptt,
  251                                    struct ecore_tunnel_info *p_tun)
  252 {
  253         ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
  254                              p_tun->ip_gre.b_mode_enabled);
  255         ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
  256 
  257         ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
  258                                 p_tun->ip_geneve.b_mode_enabled);
  259 }
  260 
  261 static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
  262                                         struct ecore_ptt  *p_ptt,
  263                                         struct ecore_tunnel_info *p_tunn)
  264 {
  265         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
  266                 DP_NOTICE(p_hwfn, true,
  267                           "A0 chip: tunnel hw config is not supported\n");
  268                 return;
  269         }
  270 
  271         if (p_tunn->vxlan_port.b_update_port)
  272                 ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
  273                                           p_tunn->vxlan_port.port);
  274 
  275         if (p_tunn->geneve_port.b_update_port)
  276                 ecore_set_geneve_dest_port(p_hwfn, p_ptt,
  277                                            p_tunn->geneve_port.port);
  278 
  279         ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
  280 }
  281 
  282 static void
  283 ecore_tunn_set_pf_start_params(struct ecore_hwfn                *p_hwfn,
  284                                struct ecore_tunnel_info         *p_src,
  285                                struct pf_start_tunnel_config    *p_tunn_cfg)
  286 {
  287         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
  288 
  289         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
  290                 DP_NOTICE(p_hwfn, true,
  291                           "A0 chip: tunnel pf start config is not supported\n");
  292                 return;
  293         }
  294 
  295         if (!p_src)
  296                 return;
  297 
  298         ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
  299         ecore_set_tunn_cls_info(p_tun, p_src);
  300         ecore_set_tunn_ports(p_tun, p_src);
  301 
  302         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
  303                                       &p_tun->vxlan,
  304                                       &p_tunn_cfg->set_vxlan_udp_port_flg,
  305                                       &p_tunn_cfg->vxlan_udp_port,
  306                                       &p_tun->vxlan_port);
  307 
  308         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
  309                                       &p_tun->l2_geneve,
  310                                       &p_tunn_cfg->set_geneve_udp_port_flg,
  311                                       &p_tunn_cfg->geneve_udp_port,
  312                                       &p_tun->geneve_port);
  313 
  314         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
  315                                         &p_tun->ip_geneve);
  316 
  317         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
  318                                         &p_tun->l2_gre);
  319 
  320         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
  321                                         &p_tun->ip_gre);
  322 }
  323 
  324 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
  325                                        struct ecore_ptt *p_ptt,
  326                                        struct ecore_tunnel_info *p_tunn,
  327                                        bool allow_npar_tx_switch)
  328 {
  329         struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
  330         u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
  331         u8 sb_index = p_hwfn->p_eq->eq_sb_index;
  332         struct ecore_spq_entry *p_ent = OSAL_NULL;
  333         struct ecore_sp_init_data init_data;
  334         enum _ecore_status_t rc = ECORE_NOTIMPL;
  335         u8 page_cnt;
  336         u8 i;
  337 
  338         /* update initial eq producer */
  339         ecore_eq_prod_update(p_hwfn,
  340                              ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
  341 
  342         /* Initialize the SPQ entry for the ramrod */
  343         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  344         init_data.cid = ecore_spq_get_cid(p_hwfn);
  345         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  346         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  347 
  348         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  349                                    COMMON_RAMROD_PF_START,
  350                                    PROTOCOLID_COMMON,
  351                                    &init_data);
  352         if (rc != ECORE_SUCCESS)
  353                 return rc;
  354 
  355         /* Fill the ramrod data */
  356         p_ramrod = &p_ent->ramrod.pf_start;
  357         p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
  358         p_ramrod->event_ring_sb_index = sb_index;
  359         p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
  360 
  361         /* For easier debugging */
  362         p_ramrod->dont_log_ramrods = 0;
  363         p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
  364 
  365         if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
  366                 p_ramrod->mf_mode = MF_OVLAN;
  367         else
  368                 p_ramrod->mf_mode = MF_NPAR;
  369 
  370         p_ramrod->outer_tag_config.outer_tag.tci =
  371                 OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
  372         if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits))
  373                 p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
  374         else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
  375                  &p_hwfn->p_dev->mf_bits)) {
  376                 p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
  377                 p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
  378         }
  379 
  380         p_ramrod->outer_tag_config.pri_map_valid = 1;
  381         for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
  382                 p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
  383 
  384         /* enable_stag_pri_change should be set if port is in BD mode or,
  385          * UFP with Host Control mode or, UFP with DCB over base interface.
  386          */
  387         if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
  388                 if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
  389                     (p_hwfn->p_dcbx_info->results.dcbx_enabled))
  390                         p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
  391                 else
  392                         p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
  393         }
  394 
  395         /* Place EQ address in RAMROD */
  396         DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
  397                        p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
  398         page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
  399         p_ramrod->event_ring_num_pages = page_cnt;
  400         DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
  401                        p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
  402 
  403         ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
  404                                        &p_ramrod->tunnel_config);
  405 
  406         if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
  407                           &p_hwfn->p_dev->mf_bits))
  408                 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
  409 
  410         switch (p_hwfn->hw_info.personality) {
  411         case ECORE_PCI_ETH:
  412                 p_ramrod->personality = PERSONALITY_ETH;
  413                 break;
  414         case ECORE_PCI_FCOE:
  415                 p_ramrod->personality = PERSONALITY_FCOE;
  416                 break;
  417         case ECORE_PCI_ISCSI:
  418                 p_ramrod->personality = PERSONALITY_ISCSI;
  419                 break;
  420         case ECORE_PCI_ETH_IWARP:
  421         case ECORE_PCI_ETH_ROCE:
  422         case ECORE_PCI_ETH_RDMA:
  423                 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
  424                 break;
  425         default:
  426                 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
  427                           p_hwfn->hw_info.personality);
  428                 p_ramrod->personality = PERSONALITY_ETH;
  429         }
  430 
  431         if (p_hwfn->p_dev->p_iov_info) {
  432                 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
  433 
  434                 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
  435                 p_ramrod->num_vfs = (u8)p_iov->total_vfs;
  436         }
  437         /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
  438          * version is available.
  439          */
  440         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
  441         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
  442 
  443         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
  444                    "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
  445                    sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
  446                    p_ramrod->outer_tag_config.outer_tag.tci);
  447 
  448         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  449 
  450         if (p_tunn)
  451                 ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
  452                                             &p_hwfn->p_dev->tunnel);
  453 
  454         return rc;
  455 }
  456 
  457 enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
  458 {
  459         struct ecore_spq_entry *p_ent = OSAL_NULL;
  460         struct ecore_sp_init_data init_data;
  461         enum _ecore_status_t rc = ECORE_NOTIMPL;
  462 
  463         /* Get SPQ entry */
  464         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  465         init_data.cid = ecore_spq_get_cid(p_hwfn);
  466         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  467         init_data.comp_mode = ECORE_SPQ_MODE_CB;
  468 
  469         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  470                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
  471                                    &init_data);
  472         if (rc != ECORE_SUCCESS)
  473                 return rc;
  474 
  475         ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
  476                                         &p_ent->ramrod.pf_update);
  477 
  478         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  479 }
  480 
  481 enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
  482 {
  483         struct ecore_spq_entry *p_ent = OSAL_NULL;
  484         struct ecore_sp_init_data init_data;
  485         enum _ecore_status_t rc = ECORE_NOTIMPL;
  486 
  487         if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_UNKNOWN) {
  488                 DP_INFO(p_hwfn, "Invalid priority type %d\n",
  489                         p_hwfn->ufp_info.pri_type);
  490                 return ECORE_INVAL;
  491         }
  492 
  493         /* Get SPQ entry */
  494         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  495         init_data.cid = ecore_spq_get_cid(p_hwfn);
  496         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  497         init_data.comp_mode = ECORE_SPQ_MODE_CB;
  498 
  499         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  500                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
  501                                    &init_data);
  502         if (rc != ECORE_SUCCESS)
  503                 return rc;
  504 
  505         p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
  506         if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
  507             (p_hwfn->p_dcbx_info->results.dcbx_enabled))
  508                 p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
  509         else
  510                 p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
  511 
  512         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  513 }
  514 
  515 /* QM rate limiter resolution is 1.6Mbps */
  516 #define QM_RL_RESOLUTION(mb_val)        ((mb_val) * 10 / 16)
  517 
  518 /* FW uses 1/64k to express gd */
  519 #define FW_GD_RESOLUTION(gd)            (64 * 1024 / (gd))
  520 
  521 static u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
  522 {
  523         return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
  524 }
  525 
  526 static u16 ecore_sp_rl_gd_denom(u32 gd)
  527 {
  528         return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
  529 }
  530 
  531 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
  532                                         struct ecore_rl_update_params *params)
  533 {
  534         struct ecore_spq_entry *p_ent = OSAL_NULL;
  535         enum _ecore_status_t rc = ECORE_NOTIMPL;
  536         struct rl_update_ramrod_data *rl_update;
  537         struct ecore_sp_init_data init_data;
  538 
  539         /* Get SPQ entry */
  540         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  541         init_data.cid = ecore_spq_get_cid(p_hwfn);
  542         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  543         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  544 
  545         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  546                                    COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
  547                                    &init_data);
  548         if (rc != ECORE_SUCCESS)
  549                 return rc;
  550 
  551         rl_update = &p_ent->ramrod.rl_update;
  552 
  553         rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
  554         rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
  555         rl_update->rl_init_flg = params->rl_init_flg;
  556         rl_update->rl_start_flg = params->rl_start_flg;
  557         rl_update->rl_stop_flg = params->rl_stop_flg;
  558         rl_update->rl_id_first = params->rl_id_first;
  559         rl_update->rl_id_last = params->rl_id_last;
  560         rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
  561         rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
  562         rl_update->rl_max_rate = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
  563         rl_update->rl_r_ai = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
  564         rl_update->rl_r_hai = OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
  565         rl_update->dcqcn_g = OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
  566         rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
  567         rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
  568         rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
  569 
  570         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
  571                    rl_update->qcn_update_param_flg, rl_update->dcqcn_update_param_flg,
  572                    rl_update->rl_init_flg, rl_update->rl_start_flg,
  573                    rl_update->rl_stop_flg, rl_update->rl_id_first,
  574                    rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
  575                    rl_update->rl_bc_rate, rl_update->rl_max_rate,
  576                    rl_update->rl_r_ai, rl_update->rl_r_hai,
  577                    rl_update->dcqcn_g, rl_update->dcqcn_k_us,
  578                    rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
  579 
  580         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  581 }
  582 
  583 /* Set pf update ramrod command params */
  584 enum _ecore_status_t
  585 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
  586                             struct ecore_ptt *p_ptt,
  587                             struct ecore_tunnel_info *p_tunn,
  588                             enum spq_mode comp_mode,
  589                             struct ecore_spq_comp_cb *p_comp_data)
  590 {
  591         struct ecore_spq_entry *p_ent = OSAL_NULL;
  592         struct ecore_sp_init_data init_data;
  593         enum _ecore_status_t rc = ECORE_NOTIMPL;
  594 
  595         if (IS_VF(p_hwfn->p_dev))
  596                 return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
  597 
  598         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
  599                 DP_NOTICE(p_hwfn, true,
  600                           "A0 chip: tunnel pf update config is not supported\n");
  601                 return rc;
  602         }
  603 
  604         if (!p_tunn)
  605                 return ECORE_INVAL;
  606 
  607         /* Get SPQ entry */
  608         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  609         init_data.cid = ecore_spq_get_cid(p_hwfn);
  610         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  611         init_data.comp_mode = comp_mode;
  612         init_data.p_comp_data = p_comp_data;
  613 
  614         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  615                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
  616                                    &init_data);
  617         if (rc != ECORE_SUCCESS)
  618                 return rc;
  619 
  620         ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
  621                                         &p_ent->ramrod.pf_update.tunnel_config);
  622 
  623         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  624         if (rc != ECORE_SUCCESS)
  625                 return rc;
  626 
  627         ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
  628 
  629         return rc;
  630 }
  631 
  632 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
  633 {
  634         struct ecore_spq_entry *p_ent = OSAL_NULL;
  635         struct ecore_sp_init_data init_data;
  636         enum _ecore_status_t rc = ECORE_NOTIMPL;
  637 
  638         /* Get SPQ entry */
  639         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  640         init_data.cid = ecore_spq_get_cid(p_hwfn);
  641         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  642         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  643 
  644         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  645                                    COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
  646                                    &init_data);
  647         if (rc != ECORE_SUCCESS)
  648                 return rc;
  649 
  650         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  651 }
  652 
  653 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
  654 {
  655         struct ecore_spq_entry *p_ent = OSAL_NULL;
  656         struct ecore_sp_init_data init_data;
  657         enum _ecore_status_t rc;
  658 
  659         /* Get SPQ entry */
  660         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  661         init_data.cid = ecore_spq_get_cid(p_hwfn);
  662         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  663         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  664 
  665         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  666                                    COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
  667                                    &init_data);
  668         if (rc != ECORE_SUCCESS)
  669                 return rc;
  670 
  671         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  672 }
  673 
  674 enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
  675 {
  676         struct ecore_spq_entry *p_ent = OSAL_NULL;
  677         struct ecore_sp_init_data init_data;
  678         enum _ecore_status_t rc = ECORE_NOTIMPL;
  679 
  680         /* Get SPQ entry */
  681         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  682         init_data.cid = ecore_spq_get_cid(p_hwfn);
  683         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  684         init_data.comp_mode = ECORE_SPQ_MODE_CB;
  685 
  686         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  687                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
  688                                    &init_data);
  689         if (rc != ECORE_SUCCESS)
  690                 return rc;
  691 
  692         p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
  693         p_ent->ramrod.pf_update.mf_vlan = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
  694 
  695         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  696 }

Cache object: 9d5e3304bf29322b737ad09e72d17b04


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.