The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_sriov.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2018-2019 Cavium, Inc.
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include "bcm_osal.h"
   32 #include "ecore.h"
   33 #include "reg_addr.h"
   34 #include "ecore_sriov.h"
   35 #include "ecore_status.h"
   36 #include "ecore_hw.h"
   37 #include "ecore_hw_defs.h"
   38 #include "ecore_int.h"
   39 #include "ecore_hsi_eth.h"
   40 #include "ecore_l2.h"
   41 #include "ecore_vfpf_if.h"
   42 #include "ecore_rt_defs.h"
   43 #include "ecore_init_ops.h"
   44 #include "pcics_reg_driver.h"
   45 #include "ecore_gtt_reg_addr.h"
   46 #include "ecore_iro.h"
   47 #include "ecore_mcp.h"
   48 #include "ecore_cxt.h"
   49 #include "ecore_vf.h"
   50 #include "ecore_init_fw_funcs.h"
   51 #include "ecore_sp_commands.h"
   52 
   53 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
   54                                                   u8 opcode,
   55                                                   __le16 echo,
   56                                                   union event_ring_data *data,
   57                                                   u8 fw_return_code);
   58 
   59 const char *ecore_channel_tlvs_string[] = {
   60         "CHANNEL_TLV_NONE", /* ends tlv sequence */
   61         "CHANNEL_TLV_ACQUIRE",
   62         "CHANNEL_TLV_VPORT_START",
   63         "CHANNEL_TLV_VPORT_UPDATE",
   64         "CHANNEL_TLV_VPORT_TEARDOWN",
   65         "CHANNEL_TLV_START_RXQ",
   66         "CHANNEL_TLV_START_TXQ",
   67         "CHANNEL_TLV_STOP_RXQ",
   68         "CHANNEL_TLV_STOP_TXQ",
   69         "CHANNEL_TLV_UPDATE_RXQ",
   70         "CHANNEL_TLV_INT_CLEANUP",
   71         "CHANNEL_TLV_CLOSE",
   72         "CHANNEL_TLV_RELEASE",
   73         "CHANNEL_TLV_LIST_END",
   74         "CHANNEL_TLV_UCAST_FILTER",
   75         "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
   76         "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
   77         "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
   78         "CHANNEL_TLV_VPORT_UPDATE_MCAST",
   79         "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
   80         "CHANNEL_TLV_VPORT_UPDATE_RSS",
   81         "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
   82         "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
   83         "CHANNEL_TLV_UPDATE_TUNN_PARAM",
   84         "CHANNEL_TLV_COALESCE_UPDATE",
   85         "CHANNEL_TLV_QID",
   86         "CHANNEL_TLV_COALESCE_READ",
   87         "CHANNEL_TLV_MAX"
   88 };
   89 
   90 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
   91 {
   92         u8 legacy = 0;
   93 
   94         if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
   95             ETH_HSI_VER_NO_PKT_LEN_TUNN)
   96                 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
   97 
   98         if (!(p_vf->acquire.vfdev_info.capabilities &
   99              VFPF_ACQUIRE_CAP_QUEUE_QIDS))
  100                 legacy |= ECORE_QCID_LEGACY_VF_CID;
  101 
  102         return legacy;
  103 }
  104 
  105 /* IOV ramrods */
  106 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
  107                                               struct ecore_vf_info *p_vf)
  108 {
  109         struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
  110         struct ecore_spq_entry *p_ent = OSAL_NULL;
  111         struct ecore_sp_init_data init_data;
  112         enum _ecore_status_t rc = ECORE_NOTIMPL;
  113         u8 fp_minor;
  114 
  115         /* Get SPQ entry */
  116         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  117         init_data.cid = ecore_spq_get_cid(p_hwfn);
  118         init_data.opaque_fid = p_vf->opaque_fid;
  119         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  120 
  121         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  122                                    COMMON_RAMROD_VF_START,
  123                                    PROTOCOLID_COMMON, &init_data);
  124         if (rc != ECORE_SUCCESS)
  125                 return rc;
  126 
  127         p_ramrod = &p_ent->ramrod.vf_start;
  128 
  129         p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
  130         p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
  131 
  132         switch (p_hwfn->hw_info.personality) {
  133         case ECORE_PCI_ETH:
  134                 p_ramrod->personality = PERSONALITY_ETH;
  135                 break;
  136         case ECORE_PCI_ETH_ROCE:
  137         case ECORE_PCI_ETH_IWARP:
  138                 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
  139                 break;
  140         default:
  141                 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
  142                           p_hwfn->hw_info.personality);
  143                 return ECORE_INVAL;
  144         }
  145 
  146         fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
  147         if (fp_minor > ETH_HSI_VER_MINOR &&
  148             fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
  149                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  150                            "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
  151                            p_vf->abs_vf_id,
  152                            ETH_HSI_VER_MAJOR, fp_minor,
  153                            ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
  154                 fp_minor = ETH_HSI_VER_MINOR;
  155         }
  156 
  157         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
  158         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
  159 
  160         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  161                    "VF[%d] - Starting using HSI %02x.%02x\n",
  162                    p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
  163 
  164         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  165 }
  166 
  167 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
  168                                              u32 concrete_vfid,
  169                                              u16 opaque_vfid)
  170 {
  171         struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
  172         struct ecore_spq_entry *p_ent = OSAL_NULL;
  173         struct ecore_sp_init_data init_data;
  174         enum _ecore_status_t rc = ECORE_NOTIMPL;
  175 
  176         /* Get SPQ entry */
  177         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
  178         init_data.cid = ecore_spq_get_cid(p_hwfn);
  179         init_data.opaque_fid = opaque_vfid;
  180         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
  181 
  182         rc = ecore_sp_init_request(p_hwfn, &p_ent,
  183                                    COMMON_RAMROD_VF_STOP,
  184                                    PROTOCOLID_COMMON, &init_data);
  185         if (rc != ECORE_SUCCESS)
  186                 return rc;
  187 
  188         p_ramrod = &p_ent->ramrod.vf_stop;
  189 
  190         p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
  191 
  192         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
  193 }
  194 
  195 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
  196                              bool b_enabled_only, bool b_non_malicious)
  197 {
  198         if (!p_hwfn->pf_iov_info) {
  199                 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
  200                 return false;
  201         }
  202 
  203         if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
  204             (rel_vf_id < 0))
  205                 return false;
  206 
  207         if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
  208             b_enabled_only)
  209                 return false;
  210 
  211         if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
  212             b_non_malicious)
  213                 return false;
  214 
  215         return true;
  216 }
  217 
  218 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
  219                                             u16 relative_vf_id,
  220                                             bool b_enabled_only)
  221 {
  222         struct ecore_vf_info *vf = OSAL_NULL;
  223 
  224         if (!p_hwfn->pf_iov_info) {
  225                 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
  226                 return OSAL_NULL;
  227         }
  228 
  229         if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
  230                                     b_enabled_only, false))
  231                 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
  232         else
  233                 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
  234                        relative_vf_id);
  235 
  236         return vf;
  237 }
  238 
  239 static struct ecore_queue_cid *
  240 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
  241 {
  242         int i;
  243 
  244         for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
  245                 if (p_queue->cids[i].p_cid &&
  246                     !p_queue->cids[i].b_is_tx)
  247                         return p_queue->cids[i].p_cid;
  248         }
  249 
  250         return OSAL_NULL;
  251 }
  252 
  253 enum ecore_iov_validate_q_mode {
  254         ECORE_IOV_VALIDATE_Q_NA,
  255         ECORE_IOV_VALIDATE_Q_ENABLE,
  256         ECORE_IOV_VALIDATE_Q_DISABLE,
  257 };
  258 
  259 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
  260                                           u16 qid,
  261                                           enum ecore_iov_validate_q_mode mode,
  262                                           bool b_is_tx)
  263 {
  264         int i;
  265 
  266         if (mode == ECORE_IOV_VALIDATE_Q_NA)
  267                 return true;
  268 
  269         for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
  270                 struct ecore_vf_queue_cid *p_qcid;
  271 
  272                 p_qcid = &p_vf->vf_queues[qid].cids[i];
  273 
  274                 if (p_qcid->p_cid == OSAL_NULL)
  275                         continue;
  276 
  277                 if (p_qcid->b_is_tx != b_is_tx)
  278                         continue;
  279 
  280                 /* Found. It's enabled. */
  281                 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
  282         }
  283 
  284         /* In case we haven't found any valid cid, then its disabled */
  285         return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
  286 }
  287 
  288 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
  289                                    struct ecore_vf_info *p_vf,
  290                                    u16 rx_qid,
  291                                    enum ecore_iov_validate_q_mode mode)
  292 {
  293         if (rx_qid >= p_vf->num_rxqs) {
  294                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  295                            "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
  296                            p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
  297                 return false;
  298         }
  299 
  300         return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
  301 }
  302 
  303 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
  304                                    struct ecore_vf_info *p_vf,
  305                                    u16 tx_qid,
  306                                    enum ecore_iov_validate_q_mode mode)
  307 {
  308         if (tx_qid >= p_vf->num_txqs) {
  309                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  310                            "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
  311                            p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
  312                 return false;
  313         }
  314 
  315         return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
  316 }
  317 
  318 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
  319                                   struct ecore_vf_info *p_vf,
  320                                   u16 sb_idx)
  321 {
  322         int i;
  323 
  324         for (i = 0; i < p_vf->num_sbs; i++)
  325                 if (p_vf->igu_sbs[i] == sb_idx)
  326                         return true;
  327 
  328         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  329                    "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
  330                    p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
  331 
  332         return false;
  333 }
  334 
  335 /* Is there at least 1 queue open? */
  336 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
  337 {
  338         u8 i;
  339 
  340         for (i = 0; i < p_vf->num_rxqs; i++)
  341                 if (ecore_iov_validate_queue_mode(p_vf, i,
  342                                                   ECORE_IOV_VALIDATE_Q_ENABLE,
  343                                                   false))
  344                         return true;
  345 
  346         return false;
  347 }
  348 
  349 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
  350 {
  351         u8 i;
  352 
  353         for (i = 0; i < p_vf->num_txqs; i++)
  354                 if (ecore_iov_validate_queue_mode(p_vf, i,
  355                                                   ECORE_IOV_VALIDATE_Q_ENABLE,
  356                                                   true))
  357                         return true;
  358 
  359         return false;
  360 }
  361 
  362 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
  363                                                 int vfid,
  364                                                 struct ecore_ptt *p_ptt)
  365 {
  366         struct ecore_bulletin_content *p_bulletin;
  367         int crc_size = sizeof(p_bulletin->crc);
  368         struct ecore_dmae_params params;
  369         struct ecore_vf_info *p_vf;
  370 
  371         p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
  372         if (!p_vf)
  373                 return ECORE_INVAL;
  374 
  375         /* TODO - check VF is in a state where it can accept message */
  376         if (!p_vf->vf_bulletin)
  377                 return ECORE_INVAL;
  378 
  379         p_bulletin = p_vf->bulletin.p_virt;
  380 
  381         /* Increment bulletin board version and compute crc */
  382         p_bulletin->version++;
  383         p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
  384                                      p_vf->bulletin.size - crc_size);
  385 
  386         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  387                    "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
  388                    p_bulletin->version, p_vf->relative_vf_id,
  389                    p_bulletin->crc);
  390 
  391         /* propagate bulletin board via dmae to vm memory */
  392         OSAL_MEMSET(&params, 0, sizeof(params));
  393         params.flags = ECORE_DMAE_FLAG_VF_DST;
  394         params.dst_vfid = p_vf->abs_vf_id;
  395         return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
  396                                     p_vf->vf_bulletin, p_vf->bulletin.size / 4,
  397                                     &params);
  398 }
  399 
  400 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
  401 {
  402         struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
  403         int pos = iov->pos;
  404 
  405         DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
  406         OSAL_PCI_READ_CONFIG_WORD(p_dev,
  407                                   pos + PCI_SRIOV_CTRL,
  408                                   &iov->ctrl);
  409 
  410         OSAL_PCI_READ_CONFIG_WORD(p_dev,
  411                                   pos + PCI_SRIOV_TOTAL_VF,
  412                                   &iov->total_vfs);
  413         OSAL_PCI_READ_CONFIG_WORD(p_dev,
  414                                   pos + PCI_SRIOV_INITIAL_VF,
  415                                   &iov->initial_vfs);
  416 
  417         OSAL_PCI_READ_CONFIG_WORD(p_dev,
  418                                   pos + PCI_SRIOV_NUM_VF,
  419                                   &iov->num_vfs);
  420         if (iov->num_vfs) {
  421                 /* @@@TODO - in future we might want to add an OSAL here to
  422                  * allow each OS to decide on its own how to act.
  423                  */
  424                 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
  425                            "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
  426                 iov->num_vfs = 0;
  427         }
  428 
  429         OSAL_PCI_READ_CONFIG_WORD(p_dev,
  430                                   pos + PCI_SRIOV_VF_OFFSET,
  431                                   &iov->offset);
  432 
  433         OSAL_PCI_READ_CONFIG_WORD(p_dev,
  434                                   pos + PCI_SRIOV_VF_STRIDE,
  435                                   &iov->stride);
  436 
  437         OSAL_PCI_READ_CONFIG_WORD(p_dev,
  438                                   pos + PCI_SRIOV_VF_DID,
  439                                   &iov->vf_device_id);
  440 
  441         OSAL_PCI_READ_CONFIG_DWORD(p_dev,
  442                                    pos + PCI_SRIOV_SUP_PGSIZE,
  443                                    &iov->pgsz);
  444 
  445         OSAL_PCI_READ_CONFIG_DWORD(p_dev,
  446                                    pos + PCI_SRIOV_CAP,
  447                                    &iov->cap);
  448 
  449         OSAL_PCI_READ_CONFIG_BYTE(p_dev,
  450                                   pos + PCI_SRIOV_FUNC_LINK,
  451                                   &iov->link);
  452 
  453         DP_VERBOSE(p_dev, ECORE_MSG_IOV,
  454                    "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
  455                    iov->nres, iov->cap, iov->ctrl,
  456                    iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
  457                    iov->offset, iov->stride, iov->pgsz);
  458 
  459         /* Some sanity checks */
  460         if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
  461             iov->total_vfs > NUM_OF_VFS(p_dev)) {
  462                 /* This can happen only due to a bug. In this case we set
  463                  * num_vfs to zero to avoid memory corruption in the code that
  464                  * assumes max number of vfs
  465                  */
  466                 DP_NOTICE(p_dev, false, "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
  467                           iov->num_vfs);
  468 
  469                 iov->num_vfs = 0;
  470                 iov->total_vfs = 0;
  471         }
  472 
  473         return ECORE_SUCCESS;
  474 }
  475 
  476 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
  477 {
  478         struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
  479         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
  480         struct ecore_bulletin_content *p_bulletin_virt;
  481         dma_addr_t req_p, rply_p, bulletin_p;
  482         union pfvf_tlvs *p_reply_virt_addr;
  483         union vfpf_tlvs *p_req_virt_addr;
  484         u8 idx = 0;
  485 
  486         OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
  487 
  488         p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
  489         req_p = p_iov_info->mbx_msg_phys_addr;
  490         p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
  491         rply_p = p_iov_info->mbx_reply_phys_addr;
  492         p_bulletin_virt = p_iov_info->p_bulletins;
  493         bulletin_p = p_iov_info->bulletins_phys;
  494         if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
  495                 DP_ERR(p_hwfn, "ecore_iov_setup_vfdb called without allocating mem first\n");
  496                 return;
  497         }
  498 
  499         for (idx = 0; idx < p_iov->total_vfs; idx++) {
  500                 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
  501                 u32 concrete;
  502 
  503                 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
  504                 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
  505                 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
  506                 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
  507 
  508 #ifdef CONFIG_ECORE_SW_CHANNEL
  509                 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
  510                 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
  511 #endif
  512                 vf->state = VF_STOPPED;
  513                 vf->b_init = false;
  514 
  515                 vf->bulletin.phys = idx *
  516                                     sizeof(struct ecore_bulletin_content) +
  517                                     bulletin_p;
  518                 vf->bulletin.p_virt = p_bulletin_virt + idx;
  519                 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
  520 
  521                 vf->relative_vf_id = idx;
  522                 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
  523                 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
  524                 vf->concrete_fid = concrete;
  525                 /* TODO - need to devise a better way of getting opaque */
  526                 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
  527                                  (vf->abs_vf_id << 8);
  528 
  529                 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
  530                 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
  531         }
  532 }
  533 
  534 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
  535 {
  536         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
  537         void **p_v_addr;
  538         u16 num_vfs = 0;
  539 
  540         num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
  541 
  542         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  543                    "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
  544 
  545         /* Allocate PF Mailbox buffer (per-VF) */
  546         p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
  547         p_v_addr = &p_iov_info->mbx_msg_virt_addr;
  548         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
  549                                             &p_iov_info->mbx_msg_phys_addr,
  550                                             p_iov_info->mbx_msg_size);
  551         if (!*p_v_addr)
  552                 return ECORE_NOMEM;
  553 
  554         /* Allocate PF Mailbox Reply buffer (per-VF) */
  555         p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
  556         p_v_addr = &p_iov_info->mbx_reply_virt_addr;
  557         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
  558                                             &p_iov_info->mbx_reply_phys_addr,
  559                                             p_iov_info->mbx_reply_size);
  560         if (!*p_v_addr)
  561                 return ECORE_NOMEM;
  562 
  563         p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
  564                                      num_vfs;
  565         p_v_addr = &p_iov_info->p_bulletins;
  566         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
  567                                             &p_iov_info->bulletins_phys,
  568                                             p_iov_info->bulletins_size);
  569         if (!*p_v_addr)
  570                 return ECORE_NOMEM;
  571 
  572         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  573                    "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
  574                    p_iov_info->mbx_msg_virt_addr,
  575                    (unsigned long long)p_iov_info->mbx_msg_phys_addr,
  576                    p_iov_info->mbx_reply_virt_addr,
  577                    (unsigned long long)p_iov_info->mbx_reply_phys_addr,
  578                    p_iov_info->p_bulletins,
  579                    (unsigned long long)p_iov_info->bulletins_phys);
  580 
  581         return ECORE_SUCCESS;
  582 }
  583 
  584 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
  585 {
  586         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
  587 
  588         if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
  589                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
  590                                        p_iov_info->mbx_msg_virt_addr,
  591                                        p_iov_info->mbx_msg_phys_addr,
  592                                        p_iov_info->mbx_msg_size);
  593 
  594         if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
  595                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
  596                                        p_iov_info->mbx_reply_virt_addr,
  597                                        p_iov_info->mbx_reply_phys_addr,
  598                                        p_iov_info->mbx_reply_size);
  599 
  600         if (p_iov_info->p_bulletins)
  601                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
  602                                        p_iov_info->p_bulletins,
  603                                        p_iov_info->bulletins_phys,
  604                                        p_iov_info->bulletins_size);
  605 }
  606 
  607 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
  608 {
  609         struct ecore_pf_iov *p_sriov;
  610 
  611         if (!IS_PF_SRIOV(p_hwfn)) {
  612                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  613                            "No SR-IOV - no need for IOV db\n");
  614                 return ECORE_SUCCESS;
  615         }
  616 
  617         p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
  618         if (!p_sriov) {
  619                 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
  620                 return ECORE_NOMEM;
  621         }
  622 
  623         p_hwfn->pf_iov_info = p_sriov;
  624 
  625         ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
  626                                     ecore_sriov_eqe_event);
  627 
  628         return ecore_iov_allocate_vfdb(p_hwfn);
  629 }
  630 
  631 void ecore_iov_setup(struct ecore_hwfn  *p_hwfn)
  632 {
  633         if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
  634                 return;
  635 
  636         ecore_iov_setup_vfdb(p_hwfn);
  637 }
  638 
  639 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
  640 {
  641         ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
  642 
  643         if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
  644                 ecore_iov_free_vfdb(p_hwfn);
  645                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
  646                 p_hwfn->pf_iov_info = OSAL_NULL;
  647         }
  648 }
  649 
  650 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
  651 {
  652         OSAL_FREE(p_dev, p_dev->p_iov_info);
  653         p_dev->p_iov_info = OSAL_NULL;
  654 }
  655 
  656 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
  657 {
  658         struct ecore_dev *p_dev = p_hwfn->p_dev;
  659         int pos;
  660         enum _ecore_status_t rc;
  661 
  662         if (IS_VF(p_hwfn->p_dev))
  663                 return ECORE_SUCCESS;
  664 
  665         /* Learn the PCI configuration */
  666         pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
  667                                            PCI_EXT_CAP_ID_SRIOV);
  668         if (!pos) {
  669                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
  670                 return ECORE_SUCCESS;
  671         }
  672 
  673         /* Allocate a new struct for IOV information */
  674         /* TODO - can change to VALLOC when its available */
  675         p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
  676                                         sizeof(*p_dev->p_iov_info));
  677         if (!p_dev->p_iov_info) {
  678                 DP_NOTICE(p_hwfn, false,
  679                           "Can't support IOV due to lack of memory\n");
  680                 return ECORE_NOMEM;
  681         }
  682         p_dev->p_iov_info->pos = pos;
  683 
  684         rc = ecore_iov_pci_cfg_info(p_dev);
  685         if (rc)
  686                 return rc;
  687 
  688         /* We want PF IOV to be synonemous with the existence of p_iov_info;
  689          * In case the capability is published but there are no VFs, simply
  690          * de-allocate the struct.
  691          */
  692         if (!p_dev->p_iov_info->total_vfs) {
  693                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  694                            "IOV capabilities, but no VFs are published\n");
  695                 OSAL_FREE(p_dev, p_dev->p_iov_info);
  696                 p_dev->p_iov_info = OSAL_NULL;
  697                 return ECORE_SUCCESS;
  698         }
  699 
  700         /* First VF index based on offset is tricky:
  701          *  - If ARI is supported [likely], offset - (16 - pf_id) would
  702          *    provide the number for eng0. 2nd engine Vfs would begin
  703          *    after the first engine's VFs.
  704          *  - If !ARI, VFs would start on next device.
  705          *    so offset - (256 - pf_id) would provide the number.
  706          * Utilize the fact that (256 - pf_id) is achieved only be later
  707          * to diffrentiate between the two.
  708          */
  709 
  710         if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
  711                 u32 first = p_hwfn->p_dev->p_iov_info->offset +
  712                             p_hwfn->abs_pf_id - 16;
  713 
  714                 p_dev->p_iov_info->first_vf_in_pf = first;
  715 
  716                 if (ECORE_PATH_ID(p_hwfn))
  717                         p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
  718         } else {
  719                 u32 first = p_hwfn->p_dev->p_iov_info->offset +
  720                             p_hwfn->abs_pf_id - 256;
  721 
  722                 p_dev->p_iov_info->first_vf_in_pf = first;
  723         }
  724 
  725         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
  726                    "First VF in hwfn 0x%08x\n",
  727                    p_dev->p_iov_info->first_vf_in_pf);
  728 
  729         return ECORE_SUCCESS;
  730 }
  731 
  732 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
  733                                        bool b_fail_malicious)
  734 {
  735         /* Check PF supports sriov */
  736         if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
  737             !IS_PF_SRIOV_ALLOC(p_hwfn))
  738                 return false;
  739 
  740         /* Check VF validity */
  741         if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
  742                 return false;
  743 
  744         return true;
  745 }
  746 
  747 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
  748 {
  749         return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
  750 }
  751 
  752 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
  753                                  u16 rel_vf_id,
  754                                  u8 to_disable)
  755 {
  756         struct ecore_vf_info *vf;
  757         int i;
  758 
  759         for_each_hwfn(p_dev, i) {
  760                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
  761 
  762                 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
  763                 if (!vf)
  764                         continue;
  765 
  766                 vf->to_disable = to_disable;
  767         }
  768 }
  769 
  770 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
  771                                   u8 to_disable)
  772 {
  773         u16 i;
  774 
  775         if (!IS_ECORE_SRIOV(p_dev))
  776                 return;
  777 
  778         for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
  779                 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
  780 }
  781 
  782 #ifndef LINUX_REMOVE
  783 /* @@@TBD Consider taking outside of ecore... */
  784 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
  785                                           u16               vf_id,
  786                                           void              *ctx)
  787 {
  788         enum _ecore_status_t rc = ECORE_SUCCESS;
  789         struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
  790 
  791         if (vf != OSAL_NULL) {
  792                 vf->ctx = ctx;
  793 #ifdef CONFIG_ECORE_SW_CHANNEL
  794                 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
  795 #endif
  796         } else {
  797                 rc = ECORE_UNKNOWN_ERROR;
  798         }
  799         return rc;
  800 }
  801 #endif
  802 
  803 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn      *p_hwfn,
  804                                          struct ecore_ptt       *p_ptt,
  805                                          u8                     abs_vfid)
  806 {
  807         ecore_wr(p_hwfn, p_ptt,
  808                  PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
  809                  1 << (abs_vfid & 0x1f));
  810 }
  811 
  812 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
  813                                    struct ecore_ptt *p_ptt,
  814                                    struct ecore_vf_info *vf)
  815 {
  816         int i;
  817 
  818         /* Set VF masks and configuration - pretend */
  819         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
  820 
  821         ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
  822 
  823         /* unpretend */
  824         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
  825 
  826         /* iterate over all queues, clear sb consumer */
  827         for (i = 0; i < vf->num_sbs; i++)
  828                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
  829                                                   vf->igu_sbs[i],
  830                                                   vf->opaque_fid, true);
  831 }
  832 
  833 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn          *p_hwfn,
  834                                      struct ecore_ptt           *p_ptt,
  835                                      struct ecore_vf_info       *vf,
  836                                      bool                       enable)
  837 {
  838         u32 igu_vf_conf;
  839 
  840         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
  841 
  842         igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
  843 
  844         if (enable) {
  845                 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
  846         } else {
  847                 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
  848         }
  849 
  850         ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
  851 
  852         /* unpretend */
  853         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
  854 }
  855 
  856 static enum _ecore_status_t
  857 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
  858                                 struct ecore_ptt *p_ptt,
  859                                 u8 abs_vf_id,
  860                                 u8 num_sbs)
  861 {
  862         u8 current_max = 0;
  863         int i;
  864 
  865         /* If client overrides this, don't do anything */
  866         if (p_hwfn->p_dev->b_dont_override_vf_msix)
  867                 return ECORE_SUCCESS;
  868 
  869         /* For AH onward, configuration is per-PF. Find maximum of all
  870          * the currently enabled child VFs, and set the number to be that.
  871          */
  872         if (!ECORE_IS_BB(p_hwfn->p_dev)) {
  873                 ecore_for_each_vf(p_hwfn, i) {
  874                         struct ecore_vf_info *p_vf;
  875 
  876                         p_vf  = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
  877                         if (!p_vf)
  878                                 continue;
  879 
  880                         current_max = OSAL_MAX_T(u8, current_max,
  881                                                  p_vf->num_sbs);
  882                 }
  883         }
  884 
  885         if (num_sbs > current_max)
  886                 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
  887                                                 abs_vf_id, num_sbs);
  888 
  889         return ECORE_SUCCESS;
  890 }
  891 
  892 static enum _ecore_status_t ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
  893                                                        struct ecore_ptt *p_ptt,
  894                                                        struct ecore_vf_info *vf)
  895 {
  896         u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
  897         enum _ecore_status_t rc = ECORE_SUCCESS;
  898 
  899         /* It's possible VF was previously considered malicious -
  900          * clear the indication even if we're only going to disable VF.
  901          */
  902         vf->b_malicious = false;
  903 
  904         if (vf->to_disable)
  905                 return ECORE_SUCCESS;
  906 
  907         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Enable internal access for vf %x [abs %x]\n",
  908                    vf->abs_vf_id, ECORE_VF_ABS_ID(p_hwfn, vf));
  909 
  910         ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
  911                                      ECORE_VF_ABS_ID(p_hwfn, vf));
  912 
  913         ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
  914 
  915         rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
  916                                              vf->abs_vf_id, vf->num_sbs);
  917         if (rc != ECORE_SUCCESS)
  918                 return rc;
  919 
  920         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
  921 
  922         SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
  923         STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
  924 
  925         ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
  926                        p_hwfn->hw_info.hw_mode);
  927 
  928         /* unpretend */
  929         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
  930 
  931         vf->state = VF_FREE;
  932 
  933         return rc;
  934 }
  935 
  936 /**
  937  * @brief ecore_iov_config_perm_table - configure the permission
  938  *      zone table.
  939  *      In E4, queue zone permission table size is 320x9. There
  940  *      are 320 VF queues for single engine device (256 for dual
  941  *      engine device), and each entry has the following format:
  942  *      {Valid, VF[7:0]}
  943  * @param p_hwfn
  944  * @param p_ptt
  945  * @param vf
  946  * @param enable
  947  */
  948 static void ecore_iov_config_perm_table(struct ecore_hwfn       *p_hwfn,
  949                                         struct ecore_ptt        *p_ptt,
  950                                         struct ecore_vf_info    *vf,
  951                                         u8                      enable)
  952 {
  953         u32 reg_addr, val;
  954         u16 qzone_id = 0;
  955         int qid;
  956 
  957         for (qid = 0; qid < vf->num_rxqs; qid++) {
  958                 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
  959                                   &qzone_id);
  960 
  961                 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
  962                 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
  963                 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
  964         }
  965 }
  966 
  967 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
  968                                         struct ecore_ptt *p_ptt,
  969                                         struct ecore_vf_info *vf)
  970 {
  971         /* Reset vf in IGU - interrupts are still disabled */
  972         ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
  973 
  974         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
  975 
  976         /* Permission Table */
  977         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
  978 }
  979 
  980 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
  981                                      struct ecore_ptt *p_ptt,
  982                                      struct ecore_vf_info *vf,
  983                                      u16 num_rx_queues)
  984 {
  985         struct ecore_igu_block *p_block;
  986         struct cau_sb_entry sb_entry;
  987         int qid = 0;
  988         u32 val = 0;
  989 
  990         if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
  991                 num_rx_queues =
  992                 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
  993         p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
  994 
  995         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
  996         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
  997         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
  998 
  999         for (qid = 0; qid < num_rx_queues; qid++) {
 1000                 p_block = ecore_get_igu_free_sb(p_hwfn, false);
 1001                 vf->igu_sbs[qid] = p_block->igu_sb_id;
 1002                 p_block->status &= ~ECORE_IGU_STATUS_FREE;
 1003                 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
 1004 
 1005                 ecore_wr(p_hwfn, p_ptt,
 1006                          IGU_REG_MAPPING_MEMORY +
 1007                          sizeof(u32) * p_block->igu_sb_id, val);
 1008 
 1009                 /* Configure igu sb in CAU which were marked valid */
 1010                 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
 1011                                         p_hwfn->rel_pf_id,
 1012                                         vf->abs_vf_id, 1);
 1013 
 1014                 ecore_dmae_host2grc(p_hwfn, p_ptt,
 1015                                     (u64)(osal_uintptr_t)&sb_entry,
 1016                                     CAU_REG_SB_VAR_MEMORY +
 1017                                     p_block->igu_sb_id * sizeof(u64), 2,
 1018                                     OSAL_NULL /* default parameters */);
 1019         }
 1020 
 1021         vf->num_sbs = (u8)num_rx_queues;
 1022 
 1023         return vf->num_sbs;
 1024 }
 1025 
 1026 /**
 1027  *
 1028  * @brief The function invalidates all the VF entries,
 1029  *        technically this isn't required, but added for
 1030  *        cleaness and ease of debugging incase a VF attempts to
 1031  *        produce an interrupt after it has been taken down.
 1032  *
 1033  * @param p_hwfn
 1034  * @param p_ptt
 1035  * @param vf
 1036  */
 1037 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
 1038                                       struct ecore_ptt *p_ptt,
 1039                                       struct ecore_vf_info *vf)
 1040 
 1041 {
 1042         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
 1043         int idx, igu_id;
 1044         u32 addr, val;
 1045 
 1046         /* Invalidate igu CAM lines and mark them as free */
 1047         for (idx = 0; idx < vf->num_sbs; idx++) {
 1048                 igu_id = vf->igu_sbs[idx];
 1049                 addr = IGU_REG_MAPPING_MEMORY +
 1050                        sizeof(u32) * igu_id;
 1051 
 1052                 val = ecore_rd(p_hwfn, p_ptt, addr);
 1053                 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
 1054                 ecore_wr(p_hwfn, p_ptt, addr, val);
 1055 
 1056                 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
 1057                 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
 1058         }
 1059 
 1060         vf->num_sbs = 0;
 1061 }
 1062 
 1063 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
 1064                         u16 vfid,
 1065                         struct ecore_mcp_link_params *params,
 1066                         struct ecore_mcp_link_state *link,
 1067                         struct ecore_mcp_link_capabilities *p_caps)
 1068 {
 1069         struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
 1070         struct ecore_bulletin_content *p_bulletin;
 1071 
 1072         if (!p_vf)
 1073                 return;
 1074 
 1075         p_bulletin = p_vf->bulletin.p_virt;
 1076         p_bulletin->req_autoneg = params->speed.autoneg;
 1077         p_bulletin->req_adv_speed = params->speed.advertised_speeds;
 1078         p_bulletin->req_forced_speed = params->speed.forced_speed;
 1079         p_bulletin->req_autoneg_pause = params->pause.autoneg;
 1080         p_bulletin->req_forced_rx = params->pause.forced_rx;
 1081         p_bulletin->req_forced_tx = params->pause.forced_tx;
 1082         p_bulletin->req_loopback = params->loopback_mode;
 1083 
 1084         p_bulletin->link_up = link->link_up;
 1085         p_bulletin->speed = link->speed;
 1086         p_bulletin->full_duplex = link->full_duplex;
 1087         p_bulletin->autoneg = link->an;
 1088         p_bulletin->autoneg_complete = link->an_complete;
 1089         p_bulletin->parallel_detection = link->parallel_detection;
 1090         p_bulletin->pfc_enabled = link->pfc_enabled;
 1091         p_bulletin->partner_adv_speed = link->partner_adv_speed;
 1092         p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
 1093         p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
 1094         p_bulletin->partner_adv_pause = link->partner_adv_pause;
 1095         p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
 1096 
 1097         p_bulletin->capability_speed = p_caps->speed_capabilities;
 1098 }
 1099 
 1100 enum _ecore_status_t
 1101 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
 1102                          struct ecore_ptt *p_ptt,
 1103                          struct ecore_iov_vf_init_params *p_params)
 1104 {
 1105         struct ecore_mcp_link_capabilities link_caps;
 1106         struct ecore_mcp_link_params link_params;
 1107         struct ecore_mcp_link_state link_state;
 1108         u8 num_of_vf_avaiable_chains  = 0;
 1109         struct ecore_vf_info *vf = OSAL_NULL;
 1110         u16 qid, num_irqs;
 1111         enum _ecore_status_t rc = ECORE_SUCCESS;
 1112         u32 cids;
 1113         u8 i;
 1114 
 1115         vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
 1116         if (!vf) {
 1117                 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
 1118                 return ECORE_UNKNOWN_ERROR;
 1119         }
 1120 
 1121         if (vf->b_init) {
 1122                 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
 1123                           p_params->rel_vf_id);
 1124                 return ECORE_INVAL;
 1125         }
 1126 
 1127         /* Perform sanity checking on the requested vport/rss */
 1128         if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
 1129                 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
 1130                           p_params->rel_vf_id, p_params->vport_id);
 1131                 return ECORE_INVAL;
 1132         }
 1133 
 1134         if ((p_params->num_queues > 1) &&
 1135             (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
 1136                 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
 1137                           p_params->rel_vf_id, p_params->rss_eng_id);
 1138                 return ECORE_INVAL;
 1139         }
 1140 
 1141         /* TODO - remove this once we get confidence of change */
 1142         if (!p_params->vport_id) {
 1143                 DP_NOTICE(p_hwfn, false,
 1144                           "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
 1145                           p_params->rel_vf_id);
 1146         }
 1147         if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
 1148                 DP_NOTICE(p_hwfn, false,
 1149                           "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
 1150                           p_params->rel_vf_id);
 1151         }
 1152         vf->vport_id = p_params->vport_id;
 1153         vf->rss_eng_id = p_params->rss_eng_id;
 1154 
 1155         /* Since it's possible to relocate SBs, it's a bit difficult to check
 1156          * things here. Simply check whether the index falls in the range
 1157          * belonging to the PF.
 1158          */
 1159         for (i = 0; i < p_params->num_queues; i++) {
 1160                 qid = p_params->req_rx_queue[i];
 1161                 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
 1162                         DP_NOTICE(p_hwfn, true,
 1163                                   "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
 1164                                   qid, p_params->rel_vf_id,
 1165                                   (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
 1166                         return ECORE_INVAL;
 1167                 }
 1168 
 1169                 qid = p_params->req_tx_queue[i];
 1170                 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
 1171                         DP_NOTICE(p_hwfn, true,
 1172                                   "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
 1173                                   qid, p_params->rel_vf_id,
 1174                                   (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
 1175                         return ECORE_INVAL;
 1176                 }
 1177         }
 1178 
 1179         /* Limit number of queues according to number of CIDs */
 1180         ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
 1181         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1182                    "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
 1183                    vf->relative_vf_id, p_params->num_queues, (u16)cids);
 1184         num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
 1185 
 1186         num_of_vf_avaiable_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
 1187                                                                p_ptt,
 1188                                                                vf,
 1189                                                                num_irqs);
 1190         if (num_of_vf_avaiable_chains == 0) {
 1191                 DP_ERR(p_hwfn, "no available igu sbs\n");
 1192                 return ECORE_NOMEM;
 1193         }
 1194 
 1195         /* Choose queue number and index ranges */
 1196         vf->num_rxqs = num_of_vf_avaiable_chains;
 1197         vf->num_txqs = num_of_vf_avaiable_chains;
 1198 
 1199         for (i = 0; i < vf->num_rxqs; i++) {
 1200                 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
 1201 
 1202                 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
 1203                 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
 1204 
 1205                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1206                            "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
 1207                            vf->relative_vf_id, i, vf->igu_sbs[i],
 1208                            p_queue->fw_rx_qid, p_queue->fw_tx_qid);
 1209         }
 1210 
 1211         /* Update the link configuration in bulletin.
 1212          */
 1213         OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
 1214                     sizeof(link_params));
 1215         OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
 1216                     sizeof(link_state));
 1217         OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
 1218                     sizeof(link_caps));
 1219         ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
 1220                            &link_params, &link_state, &link_caps);
 1221 
 1222         rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
 1223 
 1224         if (rc == ECORE_SUCCESS) {
 1225                 vf->b_init = true;
 1226 #ifndef REMOVE_DBG
 1227                 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
 1228                         (1ULL << (vf->relative_vf_id % 64));
 1229 #endif
 1230 
 1231                 if (IS_LEAD_HWFN(p_hwfn))
 1232                         p_hwfn->p_dev->p_iov_info->num_vfs++;
 1233         }
 1234 
 1235         return rc;
 1236 }
 1237 
 1238 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
 1239                                                  struct ecore_ptt  *p_ptt,
 1240                                                  u16               rel_vf_id)
 1241 {
 1242         struct ecore_mcp_link_capabilities caps;
 1243         struct ecore_mcp_link_params params;
 1244         struct ecore_mcp_link_state link;
 1245         struct ecore_vf_info *vf = OSAL_NULL;
 1246 
 1247         vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 1248         if (!vf) {
 1249                 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
 1250                 return ECORE_UNKNOWN_ERROR;
 1251         }
 1252 
 1253         if (vf->bulletin.p_virt)
 1254                 OSAL_MEMSET(vf->bulletin.p_virt, 0,
 1255                             sizeof(*vf->bulletin.p_virt));
 1256 
 1257         OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
 1258 
 1259         /* Get the link configuration back in bulletin so
 1260          * that when VFs are re-enabled they get the actual
 1261          * link configuration.
 1262          */
 1263         OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
 1264         OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
 1265         OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
 1266                     sizeof(caps));
 1267         ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
 1268 
 1269         /* Forget the VF's acquisition message */
 1270         OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
 1271 
 1272         /* disablng interrupts and resetting permission table was done during
 1273          * vf-close, however, we could get here without going through vf_close
 1274          */
 1275         /* Disable Interrupts for VF */
 1276         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
 1277 
 1278         /* Reset Permission table */
 1279         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
 1280 
 1281         vf->num_rxqs = 0;
 1282         vf->num_txqs = 0;
 1283         ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
 1284 
 1285         if (vf->b_init) {
 1286                 vf->b_init = false;
 1287 #ifndef REMOVE_DBG
 1288                 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
 1289                                         ~(1ULL << (vf->relative_vf_id / 64));
 1290 #endif
 1291 
 1292                 if (IS_LEAD_HWFN(p_hwfn))
 1293                         p_hwfn->p_dev->p_iov_info->num_vfs--;
 1294         }
 1295 
 1296         return ECORE_SUCCESS;
 1297 }
 1298 
 1299 static bool ecore_iov_tlv_supported(u16 tlvtype)
 1300 {
 1301         return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
 1302 }
 1303 
 1304 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
 1305                                          struct ecore_vf_info *vf,
 1306                                          u16 tlv)
 1307 {
 1308         /* lock the channel */
 1309         /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
 1310 
 1311         /* record the locking op */
 1312         /* vf->op_current = tlv; @@@TBD MichalK */
 1313 
 1314         /* log the lock */
 1315         if (ecore_iov_tlv_supported(tlv))
 1316                 DP_VERBOSE(p_hwfn,
 1317                            ECORE_MSG_IOV,
 1318                            "VF[%d]: vf pf channel locked by %s\n",
 1319                            vf->abs_vf_id,
 1320                            ecore_channel_tlvs_string[tlv]);
 1321         else
 1322                 DP_VERBOSE(p_hwfn,
 1323                            ECORE_MSG_IOV,
 1324                            "VF[%d]: vf pf channel locked by %04x\n",
 1325                            vf->abs_vf_id, tlv);
 1326 }
 1327 
 1328 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
 1329                                            struct ecore_vf_info *vf,
 1330                                            u16 expected_tlv)
 1331 {
 1332         /*WARN(expected_tlv != vf->op_current,
 1333              "lock mismatch: expected %s found %s",
 1334              channel_tlvs_string[expected_tlv],
 1335              channel_tlvs_string[vf->op_current]);
 1336              @@@TBD MichalK
 1337         */
 1338 
 1339         /* lock the channel */
 1340         /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
 1341 
 1342         /* log the unlock */
 1343         if (ecore_iov_tlv_supported(expected_tlv))
 1344                 DP_VERBOSE(p_hwfn,
 1345                            ECORE_MSG_IOV,
 1346                            "VF[%d]: vf pf channel unlocked by %s\n",
 1347                            vf->abs_vf_id,
 1348                            ecore_channel_tlvs_string[expected_tlv]);
 1349         else
 1350                 DP_VERBOSE(p_hwfn,
 1351                            ECORE_MSG_IOV,
 1352                            "VF[%d]: vf pf channel unlocked by %04x\n",
 1353                            vf->abs_vf_id, expected_tlv);
 1354 
 1355         /* record the locking op */
 1356         /* vf->op_current = CHANNEL_TLV_NONE;*/
 1357 }
 1358 
 1359 /* place a given tlv on the tlv buffer, continuing current tlv list */
 1360 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
 1361 {
 1362         struct channel_tlv *tl = (struct channel_tlv *)*offset;
 1363 
 1364         tl->type = type;
 1365         tl->length = length;
 1366 
 1367         /* Offset should keep pointing to next TLV (the end of the last) */
 1368         *offset += length;
 1369 
 1370         /* Return a pointer to the start of the added tlv */
 1371         return *offset - length;
 1372 }
 1373 
 1374 /* list the types and lengths of the tlvs on the buffer */
 1375 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
 1376 {
 1377         u16 i = 1, total_length = 0;
 1378         struct channel_tlv *tlv;
 1379 
 1380         do {
 1381                 /* cast current tlv list entry to channel tlv header*/
 1382                 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
 1383 
 1384                 /* output tlv */
 1385                 if (ecore_iov_tlv_supported(tlv->type))
 1386                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1387                                    "TLV number %d: type %s, length %d\n",
 1388                                    i, ecore_channel_tlvs_string[tlv->type],
 1389                                    tlv->length);
 1390                 else
 1391                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1392                                    "TLV number %d: type %d, length %d\n",
 1393                                    i, tlv->type, tlv->length);
 1394 
 1395                 if (tlv->type == CHANNEL_TLV_LIST_END)
 1396                         return;
 1397 
 1398                 /* Validate entry - protect against malicious VFs */
 1399                 if (!tlv->length) {
 1400                         DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
 1401                         return;
 1402                 }
 1403 
 1404                 total_length += tlv->length;
 1405 
 1406                 if (total_length >= sizeof(struct tlv_buffer_size)) {
 1407                         DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
 1408                         return;
 1409                 }
 1410 
 1411                 i++;
 1412         } while (1);
 1413 }
 1414 
 1415 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
 1416                                     struct ecore_ptt *p_ptt,
 1417                                     struct ecore_vf_info *p_vf,
 1418 #ifdef CONFIG_ECORE_SW_CHANNEL
 1419                                     u16 length,
 1420 #else
 1421                                     u16 OSAL_UNUSED length,
 1422 #endif
 1423                                     u8 status)
 1424 {
 1425         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
 1426         struct ecore_dmae_params params;
 1427         u8 eng_vf_id;
 1428 
 1429         mbx->reply_virt->default_resp.hdr.status = status;
 1430 
 1431         ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
 1432 
 1433 #ifdef CONFIG_ECORE_SW_CHANNEL
 1434         mbx->sw_mbx.response_size =
 1435                 length + sizeof(struct channel_list_end_tlv);
 1436 
 1437         if (!p_vf->b_hw_channel)
 1438                 return;
 1439 #endif
 1440 
 1441         eng_vf_id = p_vf->abs_vf_id;
 1442 
 1443         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
 1444         params.flags = ECORE_DMAE_FLAG_VF_DST;
 1445         params.dst_vfid = eng_vf_id;
 1446 
 1447         ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
 1448                              mbx->req_virt->first_tlv.reply_address +
 1449                              sizeof(u64),
 1450                              (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4 ,
 1451                              &params);
 1452 
 1453         /* Once PF copies the rc to the VF, the latter can continue and
 1454          * and send an additional message. So we have to make sure the
 1455          * channel would be re-set to ready prior to that.
 1456          */
 1457         REG_WR(p_hwfn,
 1458                GTT_BAR0_MAP_REG_USDM_RAM +
 1459                USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id),
 1460                1);
 1461 
 1462         ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
 1463                              mbx->req_virt->first_tlv.reply_address,
 1464                              sizeof(u64) / 4, &params);
 1465 
 1466         OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
 1467 }
 1468 
 1469 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
 1470 {
 1471         switch (flag) {
 1472         case ECORE_IOV_VP_UPDATE_ACTIVATE:
 1473                 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
 1474         case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
 1475                 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
 1476         case ECORE_IOV_VP_UPDATE_TX_SWITCH:
 1477                 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
 1478         case ECORE_IOV_VP_UPDATE_MCAST:
 1479                 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
 1480         case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
 1481                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
 1482         case ECORE_IOV_VP_UPDATE_RSS:
 1483                 return CHANNEL_TLV_VPORT_UPDATE_RSS;
 1484         case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
 1485                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
 1486         case ECORE_IOV_VP_UPDATE_SGE_TPA:
 1487                 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
 1488         default:
 1489                 return 0;
 1490         }
 1491 }
 1492 
 1493 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
 1494                                               struct ecore_vf_info *p_vf,
 1495                                               struct ecore_iov_vf_mbx *p_mbx,
 1496                                               u8 status, u16 tlvs_mask,
 1497                                               u16 tlvs_accepted)
 1498 {
 1499         struct pfvf_def_resp_tlv *resp;
 1500         u16 size, total_len, i;
 1501 
 1502         OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
 1503         p_mbx->offset = (u8 *)p_mbx->reply_virt;
 1504         size = sizeof(struct pfvf_def_resp_tlv);
 1505         total_len = size;
 1506 
 1507         ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
 1508 
 1509         /* Prepare response for all extended tlvs if they are found by PF */
 1510         for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
 1511                 if (!(tlvs_mask & (1 << i)))
 1512                         continue;
 1513 
 1514                 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
 1515                                      size);
 1516 
 1517                 if (tlvs_accepted & (1 << i))
 1518                         resp->hdr.status = status;
 1519                 else
 1520                         resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
 1521 
 1522                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1523                            "VF[%d] - vport_update response: TLV %d, status %02x\n",
 1524                            p_vf->relative_vf_id,
 1525                            ecore_iov_vport_to_tlv(i),
 1526                            resp->hdr.status);
 1527 
 1528                 total_len += size;
 1529         }
 1530 
 1531         ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
 1532                       sizeof(struct channel_list_end_tlv));
 1533 
 1534         return total_len;
 1535 }
 1536 
 1537 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
 1538                                    struct ecore_ptt *p_ptt,
 1539                                    struct ecore_vf_info *vf_info,
 1540                                    u16 type, u16 length, u8 status)
 1541 {
 1542         struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
 1543 
 1544         mbx->offset = (u8 *)mbx->reply_virt;
 1545 
 1546         ecore_add_tlv(&mbx->offset, type, length);
 1547         ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 1548                       sizeof(struct channel_list_end_tlv));
 1549 
 1550         ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
 1551 }
 1552 
 1553 struct ecore_public_vf_info * ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
 1554                                                            u16 relative_vf_id,
 1555                                                            bool b_enabled_only)
 1556 {
 1557         struct ecore_vf_info *vf = OSAL_NULL;
 1558 
 1559         vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
 1560         if (!vf)
 1561                 return OSAL_NULL;
 1562 
 1563         return &vf->p_vf_info;
 1564 }
 1565 
 1566 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
 1567                                  struct ecore_vf_info *p_vf)
 1568 {
 1569         u32 i, j;
 1570 
 1571         p_vf->vf_bulletin = 0;
 1572         p_vf->vport_instance = 0;
 1573         p_vf->configured_features = 0;
 1574 
 1575         /* If VF previously requested less resources, go back to default */
 1576         p_vf->num_rxqs = p_vf->num_sbs;
 1577         p_vf->num_txqs = p_vf->num_sbs;
 1578 
 1579         p_vf->num_active_rxqs = 0;
 1580 
 1581         for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
 1582                 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
 1583 
 1584                 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
 1585                         if (!p_queue->cids[j].p_cid)
 1586                                 continue;
 1587 
 1588                         ecore_eth_queue_cid_release(p_hwfn,
 1589                                                     p_queue->cids[j].p_cid);
 1590                         p_queue->cids[j].p_cid = OSAL_NULL;
 1591                 }
 1592         }
 1593 
 1594         OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
 1595         OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
 1596         OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
 1597 }
 1598 
 1599 /* Returns either 0, or log(size) */
 1600 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
 1601                                     struct ecore_ptt *p_ptt)
 1602 {
 1603         u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
 1604 
 1605         if (val)
 1606                 return val + 11;
 1607         return 0;
 1608 }
 1609 
 1610 static void
 1611 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
 1612                                    struct ecore_ptt *p_ptt,
 1613                                    struct ecore_vf_info *p_vf,
 1614                                    struct vf_pf_resc_request *p_req,
 1615                                    struct pf_vf_resc *p_resp)
 1616 {
 1617         u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
 1618         u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
 1619                      DB_ADDR_VF(0, DQ_DEMS_LEGACY);
 1620         u32 bar_size;
 1621 
 1622         p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
 1623 
 1624         /* If VF didn't bother asking for QIDs than don't bother limiting
 1625          * number of CIDs. The VF doesn't care about the number, and this
 1626          * has the likely result of causing an additional acquisition.
 1627          */
 1628         if (!(p_vf->acquire.vfdev_info.capabilities &
 1629               VFPF_ACQUIRE_CAP_QUEUE_QIDS))
 1630                 return;
 1631 
 1632         /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
 1633          * that would make sure doorbells for all CIDs fall within the bar.
 1634          * If it doesn't, make sure regview window is sufficient.
 1635          */
 1636         if (p_vf->acquire.vfdev_info.capabilities &
 1637             VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
 1638                 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
 1639                 if (bar_size)
 1640                         bar_size = 1 << bar_size;
 1641 
 1642                 if (ECORE_IS_CMT(p_hwfn->p_dev))
 1643                         bar_size /= 2;
 1644         } else {
 1645                 bar_size = PXP_VF_BAR0_DQ_LENGTH;
 1646         }
 1647 
 1648         if (bar_size / db_size < 256)
 1649                 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
 1650                                               (u8)(bar_size / db_size));
 1651 }
 1652 
 1653 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
 1654                                         struct ecore_ptt *p_ptt,
 1655                                         struct ecore_vf_info *p_vf,
 1656                                         struct vf_pf_resc_request *p_req,
 1657                                         struct pf_vf_resc *p_resp)
 1658 {
 1659         u8 i;
 1660 
 1661         /* Queue related information */
 1662         p_resp->num_rxqs = p_vf->num_rxqs;
 1663         p_resp->num_txqs = p_vf->num_txqs;
 1664         p_resp->num_sbs = p_vf->num_sbs;
 1665 
 1666         for (i = 0; i < p_resp->num_sbs; i++) {
 1667                 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
 1668                 /* TODO - what's this sb_qid field? Is it deprecated?
 1669                  * or is there an ecore_client that looks at this?
 1670                  */
 1671                 p_resp->hw_sbs[i].sb_qid = 0;
 1672         }
 1673 
 1674         /* These fields are filled for backward compatibility.
 1675          * Unused by modern vfs.
 1676          */
 1677         for (i = 0; i < p_resp->num_rxqs; i++) {
 1678                 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
 1679                                   (u16 *)&p_resp->hw_qid[i]);
 1680                 p_resp->cid[i] = i;
 1681         }
 1682 
 1683         /* Filter related information */
 1684         p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
 1685                                              p_req->num_mac_filters);
 1686         p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
 1687                                               p_req->num_vlan_filters);
 1688 
 1689         ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
 1690 
 1691         /* This isn't really needed/enforced, but some legacy VFs might depend
 1692          * on the correct filling of this field.
 1693          */
 1694         p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
 1695 
 1696         /* Validate sufficient resources for VF */
 1697         if (p_resp->num_rxqs < p_req->num_rxqs ||
 1698             p_resp->num_txqs < p_req->num_txqs ||
 1699             p_resp->num_sbs < p_req->num_sbs ||
 1700             p_resp->num_mac_filters < p_req->num_mac_filters ||
 1701             p_resp->num_vlan_filters < p_req->num_vlan_filters ||
 1702             p_resp->num_mc_filters < p_req->num_mc_filters ||
 1703             p_resp->num_cids < p_req->num_cids) {
 1704                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1705                            "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
 1706                            p_vf->abs_vf_id,
 1707                            p_req->num_rxqs, p_resp->num_rxqs,
 1708                            p_req->num_rxqs, p_resp->num_txqs,
 1709                            p_req->num_sbs, p_resp->num_sbs,
 1710                            p_req->num_mac_filters, p_resp->num_mac_filters,
 1711                            p_req->num_vlan_filters, p_resp->num_vlan_filters,
 1712                            p_req->num_mc_filters, p_resp->num_mc_filters,
 1713                            p_req->num_cids, p_resp->num_cids);
 1714 
 1715                 /* Some legacy OSes are incapable of correctly handling this
 1716                  * failure.
 1717                  */
 1718                 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
 1719                      ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
 1720                     (p_vf->acquire.vfdev_info.os_type ==
 1721                      VFPF_ACQUIRE_OS_WINDOWS))
 1722                         return PFVF_STATUS_SUCCESS;
 1723 
 1724                 return PFVF_STATUS_NO_RESOURCE;
 1725         }
 1726 
 1727         return PFVF_STATUS_SUCCESS;
 1728 }
 1729 
 1730 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
 1731 {
 1732         p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
 1733                                   OFFSETOF(struct mstorm_vf_zone,
 1734                                            non_trigger.eth_queue_stat);
 1735         p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
 1736         p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
 1737                                   OFFSETOF(struct ustorm_vf_zone,
 1738                                            non_trigger.eth_queue_stat);
 1739         p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
 1740         p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
 1741                                   OFFSETOF(struct pstorm_vf_zone,
 1742                                            non_trigger.eth_queue_stat);
 1743         p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
 1744         p_stats->tstats.address = 0;
 1745         p_stats->tstats.len = 0;
 1746 }
 1747 
 1748 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 1749                                      struct ecore_ptt        *p_ptt,
 1750                                      struct ecore_vf_info    *vf)
 1751 {
 1752         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 1753         struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
 1754         struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
 1755         struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
 1756         u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
 1757         struct pf_vf_resc *resc = &resp->resc;
 1758         enum _ecore_status_t rc;
 1759 
 1760         OSAL_MEMSET(resp, 0, sizeof(*resp));
 1761 
 1762         /* Write the PF version so that VF would know which version
 1763          * is supported - might be later overriden. This guarantees that
 1764          * VF could recognize legacy PF based on lack of versions in reply.
 1765          */
 1766         pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
 1767         pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
 1768 
 1769         /* TODO - not doing anything is bad since we'll assert, but this isn't
 1770          * necessarily the right behavior - perhaps we should have allowed some
 1771          * versatility here.
 1772          */
 1773         if (vf->state != VF_FREE &&
 1774             vf->state != VF_STOPPED) {
 1775                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1776                            "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
 1777                            vf->abs_vf_id, vf->state);
 1778                 goto out;
 1779         }
 1780 
 1781         /* Validate FW compatibility */
 1782         if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
 1783                 if (req->vfdev_info.capabilities &
 1784                     VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
 1785                         struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
 1786 
 1787                         /* This legacy support would need to be removed once
 1788                          * the major has changed.
 1789                          */
 1790                         OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
 1791 
 1792                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1793                                    "VF[%d] is pre-fastpath HSI\n",
 1794                                    vf->abs_vf_id);
 1795                         p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
 1796                         p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
 1797                 } else {
 1798                         DP_INFO(p_hwfn,
 1799                                 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
 1800                                 vf->abs_vf_id,
 1801                                 req->vfdev_info.eth_fp_hsi_major,
 1802                                 req->vfdev_info.eth_fp_hsi_minor,
 1803                                 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
 1804 
 1805                         goto out;
 1806                 }
 1807         }
 1808 
 1809         /* On 100g PFs, prevent old VFs from loading */
 1810         if (ECORE_IS_CMT(p_hwfn->p_dev) &&
 1811             !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
 1812                 DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support 100g\n",
 1813                         vf->abs_vf_id);
 1814                 goto out;
 1815         }
 1816 
 1817 #ifndef __EXTRACT__LINUX__
 1818         if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
 1819                 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
 1820                 goto out;
 1821         }
 1822 #endif
 1823 
 1824         /* Store the acquire message */
 1825         OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
 1826 
 1827         vf->opaque_fid = req->vfdev_info.opaque_fid;
 1828 
 1829         vf->vf_bulletin = req->bulletin_addr;
 1830         vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
 1831                             vf->bulletin.size : req->bulletin_size;
 1832 
 1833         /* fill in pfdev info */
 1834         pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
 1835         pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
 1836         pfdev_info->indices_per_sb = PIS_PER_SB_E4;
 1837 
 1838         pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
 1839                                    PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
 1840         if (ECORE_IS_CMT(p_hwfn->p_dev))
 1841                 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
 1842 
 1843         /* Share our ability to use multiple queue-ids only with VFs
 1844          * that request it.
 1845          */
 1846         if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
 1847                 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
 1848 
 1849         /* Share the sizes of the bars with VF */
 1850         resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
 1851                                                              p_ptt);
 1852 
 1853         ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
 1854 
 1855         OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
 1856                     ETH_ALEN);
 1857 
 1858         pfdev_info->fw_major = FW_MAJOR_VERSION;
 1859         pfdev_info->fw_minor = FW_MINOR_VERSION;
 1860         pfdev_info->fw_rev = FW_REVISION_VERSION;
 1861         pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
 1862 
 1863         /* Incorrect when legacy, but doesn't matter as legacy isn't reading
 1864          * this field.
 1865          */
 1866         pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
 1867                                               req->vfdev_info.eth_fp_hsi_minor);
 1868         pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
 1869         ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
 1870                               OSAL_NULL);
 1871 
 1872         pfdev_info->dev_type = p_hwfn->p_dev->type;
 1873         pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
 1874 
 1875         /* Fill resources available to VF; Make sure there are enough to
 1876          * satisfy the VF's request.
 1877          */
 1878         vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
 1879                                                     &req->resc_request, resc);
 1880         if (vfpf_status != PFVF_STATUS_SUCCESS)
 1881                 goto out;
 1882 
 1883         /* Start the VF in FW */
 1884         rc = ecore_sp_vf_start(p_hwfn, vf);
 1885         if (rc != ECORE_SUCCESS) {
 1886                 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
 1887                           vf->abs_vf_id);
 1888                 vfpf_status = PFVF_STATUS_FAILURE;
 1889                 goto out;
 1890         }
 1891 
 1892         /* Fill agreed size of bulletin board in response, and post
 1893          * an initial image to the bulletin board.
 1894          */
 1895         resp->bulletin_size = vf->bulletin.size;
 1896         ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
 1897 
 1898         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1899                    "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
 1900                    "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
 1901                    vf->abs_vf_id, resp->pfdev_info.chip_num,
 1902                    resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
 1903                    (unsigned long long)resp->pfdev_info.capabilities, resc->num_rxqs,
 1904                    resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
 1905                    resc->num_vlan_filters);
 1906 
 1907         vf->state = VF_ACQUIRED;
 1908 
 1909 out:
 1910         /* Prepare Response */
 1911         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
 1912                                sizeof(struct pfvf_acquire_resp_tlv),
 1913                                vfpf_status);
 1914 }
 1915 
 1916 static enum _ecore_status_t __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
 1917                                                      struct ecore_vf_info *p_vf, bool val)
 1918 {
 1919         struct ecore_sp_vport_update_params params;
 1920         enum _ecore_status_t rc;
 1921 
 1922         if (val == p_vf->spoof_chk) {
 1923                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1924                            "Spoofchk value[%d] is already configured\n",
 1925                            val);
 1926                 return ECORE_SUCCESS;
 1927         }
 1928 
 1929         OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
 1930         params.opaque_fid = p_vf->opaque_fid;
 1931         params.vport_id = p_vf->vport_id;
 1932         params.update_anti_spoofing_en_flg = 1;
 1933         params.anti_spoofing_en = val;
 1934 
 1935         rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
 1936                                    OSAL_NULL);
 1937         if (rc == ECORE_SUCCESS) {
 1938                 p_vf->spoof_chk = val;
 1939                 p_vf->req_spoofchk_val = p_vf->spoof_chk;
 1940                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1941                            "Spoofchk val[%d] configured\n", val);
 1942         } else {
 1943                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1944                            "Spoofchk configuration[val:%d] failed for VF[%d]\n",
 1945                            val, p_vf->relative_vf_id);
 1946         }
 1947 
 1948         return rc;
 1949 }
 1950 
 1951 static enum _ecore_status_t ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
 1952                                                                struct ecore_vf_info *p_vf)
 1953 {
 1954         struct ecore_filter_ucast filter;
 1955         enum _ecore_status_t rc = ECORE_SUCCESS;
 1956         int i;
 1957 
 1958         OSAL_MEMSET(&filter, 0, sizeof(filter));
 1959         filter.is_rx_filter = 1;
 1960         filter.is_tx_filter = 1;
 1961         filter.vport_to_add_to = p_vf->vport_id;
 1962         filter.opcode = ECORE_FILTER_ADD;
 1963 
 1964         /* Reconfigure vlans */
 1965         for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
 1966                 if (!p_vf->shadow_config.vlans[i].used)
 1967                         continue;
 1968 
 1969                 filter.type = ECORE_FILTER_VLAN;
 1970                 filter.vlan = p_vf->shadow_config.vlans[i].vid;
 1971                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 1972                            "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
 1973                            filter.vlan, p_vf->relative_vf_id);
 1974                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
 1975                                                &filter, ECORE_SPQ_MODE_CB, OSAL_NULL);
 1976                 if (rc) {
 1977                         DP_NOTICE(p_hwfn, true, "Failed to configure VLAN [%04x] to VF [%04x]\n",
 1978                                   filter.vlan,
 1979                                   p_vf->relative_vf_id);
 1980                         break;
 1981                 }
 1982         }
 1983 
 1984         return rc;
 1985 }
 1986 
 1987 static enum _ecore_status_t
 1988 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
 1989                                      struct ecore_vf_info *p_vf,
 1990                                      u64 events)
 1991 {
 1992         enum _ecore_status_t rc = ECORE_SUCCESS;
 1993 
 1994         /*TODO - what about MACs? */
 1995 
 1996         if ((events & (1 << VLAN_ADDR_FORCED)) &&
 1997             !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
 1998                 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
 1999 
 2000         return rc;
 2001 }
 2002 
 2003 static  enum _ecore_status_t
 2004 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
 2005                                  struct ecore_vf_info *p_vf,
 2006                                  u64 events)
 2007 {
 2008         enum _ecore_status_t rc = ECORE_SUCCESS;
 2009         struct ecore_filter_ucast filter;
 2010 
 2011         if (!p_vf->vport_instance)
 2012                 return ECORE_INVAL;
 2013 
 2014         if (events & (1 << MAC_ADDR_FORCED)) {
 2015                 /* Since there's no way [currently] of removing the MAC,
 2016                  * we can always assume this means we need to force it.
 2017                  */
 2018                 OSAL_MEMSET(&filter, 0, sizeof(filter));
 2019                 filter.type = ECORE_FILTER_MAC;
 2020                 filter.opcode = ECORE_FILTER_REPLACE;
 2021                 filter.is_rx_filter = 1;
 2022                 filter.is_tx_filter = 1;
 2023                 filter.vport_to_add_to = p_vf->vport_id;
 2024                 OSAL_MEMCPY(filter.mac,
 2025                             p_vf->bulletin.p_virt->mac,
 2026                             ETH_ALEN);
 2027 
 2028                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
 2029                                                &filter,
 2030                                                ECORE_SPQ_MODE_CB, OSAL_NULL);
 2031                 if (rc) {
 2032                         DP_NOTICE(p_hwfn, true,
 2033                                   "PF failed to configure MAC for VF\n");
 2034                         return rc;
 2035                 }
 2036 
 2037                 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
 2038         }
 2039 
 2040         if (events & (1 << VLAN_ADDR_FORCED)) {
 2041                 struct ecore_sp_vport_update_params vport_update;
 2042                 u8 removal;
 2043                 int i;
 2044 
 2045                 OSAL_MEMSET(&filter, 0, sizeof(filter));
 2046                 filter.type = ECORE_FILTER_VLAN;
 2047                 filter.is_rx_filter = 1;
 2048                 filter.is_tx_filter = 1;
 2049                 filter.vport_to_add_to = p_vf->vport_id;
 2050                 filter.vlan = p_vf->bulletin.p_virt->pvid;
 2051                 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
 2052                                               ECORE_FILTER_FLUSH;
 2053 
 2054                 /* Send the ramrod */
 2055                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
 2056                                                &filter,
 2057                                                ECORE_SPQ_MODE_CB, OSAL_NULL);
 2058                 if (rc) {
 2059                         DP_NOTICE(p_hwfn, true,
 2060                                   "PF failed to configure VLAN for VF\n");
 2061                         return rc;
 2062                 }
 2063 
 2064                 /* Update the default-vlan & silent vlan stripping */
 2065                 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
 2066                 vport_update.opaque_fid = p_vf->opaque_fid;
 2067                 vport_update.vport_id = p_vf->vport_id;
 2068                 vport_update.update_default_vlan_enable_flg = 1;
 2069                 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
 2070                 vport_update.update_default_vlan_flg = 1;
 2071                 vport_update.default_vlan = filter.vlan;
 2072 
 2073                 vport_update.update_inner_vlan_removal_flg = 1;
 2074                 removal = filter.vlan ?
 2075                           1 : p_vf->shadow_config.inner_vlan_removal;
 2076                 vport_update.inner_vlan_removal_flg = removal;
 2077                 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
 2078                 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
 2079                                            ECORE_SPQ_MODE_EBLOCK,
 2080                                            OSAL_NULL);
 2081                 if (rc) {
 2082                         DP_NOTICE(p_hwfn, true,
 2083                                   "PF failed to configure VF vport for vlan\n");
 2084                         return rc;
 2085                 }
 2086 
 2087                 /* Update all the Rx queues */
 2088                 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
 2089                         struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
 2090                         struct ecore_queue_cid *p_cid = OSAL_NULL;
 2091 
 2092                         /* There can be at most 1 Rx queue on qzone. Find it */
 2093                         p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
 2094                         if (p_cid == OSAL_NULL)
 2095                                 continue;
 2096 
 2097                         rc = ecore_sp_eth_rx_queues_update(p_hwfn,
 2098                                                            (void **)&p_cid,
 2099                                                            1, 0, 1,
 2100                                                            ECORE_SPQ_MODE_EBLOCK,
 2101                                                            OSAL_NULL);
 2102                         if (rc) {
 2103                                 DP_NOTICE(p_hwfn, true,
 2104                                           "Failed to send Rx update fo queue[0x%04x]\n",
 2105                                           p_cid->rel.queue_id);
 2106                                 return rc;
 2107                         }
 2108                 }
 2109 
 2110                 if (filter.vlan)
 2111                         p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
 2112                 else
 2113                         p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
 2114         }
 2115 
 2116         /* If forced features are terminated, we need to configure the shadow
 2117          * configuration back again.
 2118          */
 2119         if (events)
 2120                 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
 2121 
 2122         return rc;
 2123 }
 2124 
 2125 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
 2126                                          struct ecore_ptt *p_ptt,
 2127                                          struct ecore_vf_info *vf)
 2128 {
 2129         struct ecore_sp_vport_start_params params = {0};
 2130         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 2131         struct vfpf_vport_start_tlv *start;
 2132         u8 status = PFVF_STATUS_SUCCESS;
 2133         struct ecore_vf_info *vf_info;
 2134         u64 *p_bitmap;
 2135         int sb_id;
 2136         enum _ecore_status_t rc;
 2137 
 2138         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
 2139         if (!vf_info) {
 2140                 DP_NOTICE(p_hwfn->p_dev, true,
 2141                           "Failed to get VF info, invalid vfid [%d]\n",
 2142                           vf->relative_vf_id);
 2143                 return;
 2144         }
 2145 
 2146         vf->state = VF_ENABLED;
 2147         start = &mbx->req_virt->start_vport;
 2148 
 2149         ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 2150 
 2151         /* Initialize Status block in CAU */
 2152         for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
 2153                 if (!start->sb_addr[sb_id]) {
 2154                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2155                                    "VF[%d] did not fill the address of SB %d\n",
 2156                                    vf->relative_vf_id, sb_id);
 2157                         break;
 2158                 }
 2159 
 2160                 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
 2161                                       start->sb_addr[sb_id],
 2162                                       vf->igu_sbs[sb_id],
 2163                                       vf->abs_vf_id, 1);
 2164         }
 2165 
 2166         vf->mtu = start->mtu;
 2167         vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
 2168 
 2169         /* Take into consideration configuration forced by hypervisor;
 2170          * If none is configured, use the supplied VF values [for old
 2171          * vfs that would still be fine, since they passed '' as padding].
 2172          */
 2173         p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
 2174         if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
 2175                 u8 vf_req = start->only_untagged;
 2176 
 2177                 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
 2178                 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
 2179         }
 2180 
 2181         params.tpa_mode =  start->tpa_mode;
 2182         params.remove_inner_vlan = start->inner_vlan_removal;
 2183         params.tx_switching = true;
 2184         params.zero_placement_offset = start->zero_placement_offset;
 2185 
 2186 #ifndef ASIC_ONLY
 2187         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
 2188                 DP_NOTICE(p_hwfn, false, "FPGA: Don't configure VF for Tx-switching [no pVFC]\n");
 2189                 params.tx_switching = false;
 2190         }
 2191 #endif
 2192 
 2193         params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
 2194         params.drop_ttl0 = false;
 2195         params.concrete_fid = vf->concrete_fid;
 2196         params.opaque_fid = vf->opaque_fid;
 2197         params.vport_id = vf->vport_id;
 2198         params.max_buffers_per_cqe = start->max_buffers_per_cqe;
 2199         params.mtu = vf->mtu;
 2200         params.check_mac = true;
 2201 
 2202 #ifndef ECORE_UPSTREAM
 2203         rc = OSAL_IOV_PRE_START_VPORT(p_hwfn, vf->relative_vf_id, &params);
 2204         if (rc != ECORE_SUCCESS) {
 2205                 DP_ERR(p_hwfn, "OSAL_IOV_PRE_START_VPORT returned error %d\n", rc);
 2206                 status = PFVF_STATUS_FAILURE;
 2207                 goto exit;
 2208         }
 2209 #endif
 2210 
 2211         rc = ecore_sp_eth_vport_start(p_hwfn, &params);
 2212         if (rc != ECORE_SUCCESS) {
 2213                 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
 2214                 status = PFVF_STATUS_FAILURE;
 2215         } else {
 2216                 vf->vport_instance++;
 2217 
 2218                 /* Force configuration if needed on the newly opened vport */
 2219                 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
 2220                 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
 2221                                           vf->vport_id, vf->opaque_fid);
 2222                 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
 2223         }
 2224 #ifndef ECORE_UPSTREAM
 2225 exit:
 2226 #endif
 2227         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
 2228                                sizeof(struct pfvf_def_resp_tlv), status);
 2229 }
 2230 
 2231 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
 2232                                         struct ecore_ptt *p_ptt,
 2233                                         struct ecore_vf_info *vf)
 2234 {
 2235         u8 status = PFVF_STATUS_SUCCESS;
 2236         enum _ecore_status_t rc;
 2237 
 2238         OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
 2239         vf->vport_instance--;
 2240         vf->spoof_chk = false;
 2241 
 2242         if ((ecore_iov_validate_active_rxq(vf)) ||
 2243             (ecore_iov_validate_active_txq(vf))) {
 2244                 vf->b_malicious = true;
 2245                 DP_NOTICE(p_hwfn,
 2246                           false, " VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
 2247                           vf->abs_vf_id);
 2248                 status = PFVF_STATUS_MALICIOUS;
 2249                 goto out;
 2250         }
 2251 
 2252         rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
 2253         if (rc != ECORE_SUCCESS) {
 2254                 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_stop_vport returned error %d\n",
 2255                        rc);
 2256                 status = PFVF_STATUS_FAILURE;
 2257         }
 2258 
 2259         /* Forget the configuration on the vport */
 2260         vf->configured_features = 0;
 2261         OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
 2262 
 2263 out:
 2264         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
 2265                                sizeof(struct pfvf_def_resp_tlv), status);
 2266 }
 2267 
 2268 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
 2269                                             struct ecore_ptt *p_ptt,
 2270                                             struct ecore_vf_info *vf,
 2271                                             u8 status, bool b_legacy)
 2272 {
 2273         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 2274         struct pfvf_start_queue_resp_tlv *p_tlv;
 2275         struct vfpf_start_rxq_tlv *req;
 2276         u16 length;
 2277 
 2278         mbx->offset = (u8 *)mbx->reply_virt;
 2279 
 2280         /* Taking a bigger struct instead of adding a TLV to list was a
 2281          * mistake, but one which we're now stuck with, as some older
 2282          * clients assume the size of the previous response.
 2283          */
 2284         if (!b_legacy)
 2285                 length = sizeof(*p_tlv);
 2286         else
 2287                 length = sizeof(struct pfvf_def_resp_tlv);
 2288 
 2289         p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
 2290         ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 2291                       sizeof(struct channel_list_end_tlv));
 2292 
 2293         /* Update the TLV with the response */
 2294         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
 2295                 req = &mbx->req_virt->start_rxq;
 2296                 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
 2297                                 OFFSETOF(struct mstorm_vf_zone,
 2298                                          non_trigger.eth_rx_queue_producers) +
 2299                                 sizeof(struct eth_rx_prod_data) * req->rx_qid;
 2300         }
 2301 
 2302         ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 2303 }
 2304 
 2305 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
 2306                                struct ecore_vf_info *p_vf, bool b_is_tx)
 2307 {
 2308         struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
 2309         struct vfpf_qid_tlv *p_qid_tlv;
 2310 
 2311         /* Search for the qid if the VF published if its going to provide it */
 2312         if (!(p_vf->acquire.vfdev_info.capabilities &
 2313               VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
 2314                 if (b_is_tx)
 2315                         return ECORE_IOV_LEGACY_QID_TX;
 2316                 else
 2317                         return ECORE_IOV_LEGACY_QID_RX;
 2318         }
 2319 
 2320         p_qid_tlv = (struct vfpf_qid_tlv *)
 2321                     ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 2322                                                CHANNEL_TLV_QID);
 2323         if (p_qid_tlv == OSAL_NULL) {
 2324                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2325                            "VF[%2x]: Failed to provide qid\n",
 2326                            p_vf->relative_vf_id);
 2327 
 2328                 return ECORE_IOV_QID_INVALID;
 2329         }
 2330 
 2331         if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
 2332                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2333                            "VF[%02x]: Provided qid out-of-bounds %02x\n",
 2334                            p_vf->relative_vf_id, p_qid_tlv->qid);
 2335                 return ECORE_IOV_QID_INVALID;
 2336         }
 2337 
 2338         return p_qid_tlv->qid;
 2339 }
 2340 
 2341 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 2342                                        struct ecore_ptt *p_ptt,
 2343                                        struct ecore_vf_info *vf)
 2344 {
 2345         struct ecore_queue_start_common_params params;
 2346         struct ecore_queue_cid_vf_params vf_params;
 2347         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 2348         u8 status = PFVF_STATUS_NO_RESOURCE;
 2349         u8 qid_usage_idx, vf_legacy = 0;
 2350         struct ecore_vf_queue *p_queue;
 2351         struct vfpf_start_rxq_tlv *req;
 2352         struct ecore_queue_cid *p_cid;
 2353         struct ecore_sb_info sb_dummy;
 2354         enum _ecore_status_t rc;
 2355 
 2356         req = &mbx->req_virt->start_rxq;
 2357 
 2358         if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
 2359                                     ECORE_IOV_VALIDATE_Q_DISABLE) ||
 2360             !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 2361                 goto out;
 2362 
 2363         qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
 2364         if (qid_usage_idx == ECORE_IOV_QID_INVALID)
 2365                 goto out;
 2366 
 2367         p_queue = &vf->vf_queues[req->rx_qid];
 2368         if (p_queue->cids[qid_usage_idx].p_cid)
 2369                 goto out;
 2370 
 2371         vf_legacy = ecore_vf_calculate_legacy(vf);
 2372 
 2373         /* Acquire a new queue-cid */
 2374         OSAL_MEMSET(&params, 0, sizeof(params));
 2375         params.queue_id = (u8)p_queue->fw_rx_qid;
 2376         params.vport_id = vf->vport_id;
 2377         params.stats_id = vf->abs_vf_id + 0x10;
 2378 
 2379         /* Since IGU index is passed via sb_info, construct a dummy one */
 2380         OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
 2381         sb_dummy.igu_sb_id = req->hw_sb;
 2382         params.p_sb = &sb_dummy;
 2383         params.sb_idx = req->sb_index;
 2384 
 2385         OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
 2386         vf_params.vfid = vf->relative_vf_id;
 2387         vf_params.vf_qid = (u8)req->rx_qid;
 2388         vf_params.vf_legacy = vf_legacy;
 2389         vf_params.qid_usage_idx = qid_usage_idx;
 2390 
 2391         p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
 2392                                        &params, true, &vf_params);
 2393         if (p_cid == OSAL_NULL)
 2394                 goto out;
 2395 
 2396         /* Legacy VFs have their Producers in a different location, which they
 2397          * calculate on their own and clean the producer prior to this.
 2398          */
 2399         if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
 2400                 REG_WR(p_hwfn,
 2401                        GTT_BAR0_MAP_REG_MSDM_RAM +
 2402                        MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
 2403                        0);
 2404 
 2405         rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
 2406                                         req->bd_max_bytes,
 2407                                         req->rxq_addr,
 2408                                         req->cqe_pbl_addr,
 2409                                         req->cqe_pbl_size);
 2410         if (rc != ECORE_SUCCESS) {
 2411                 status = PFVF_STATUS_FAILURE;
 2412                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
 2413         } else {
 2414                 p_queue->cids[qid_usage_idx].p_cid = p_cid;
 2415                 p_queue->cids[qid_usage_idx].b_is_tx = false;
 2416                 status = PFVF_STATUS_SUCCESS;
 2417                 vf->num_active_rxqs++;
 2418         }
 2419 
 2420 out:
 2421         ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
 2422                                         !!(vf_legacy &
 2423                                            ECORE_QCID_LEGACY_VF_RX_PROD));
 2424 }
 2425 
 2426 static void
 2427 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
 2428                                  struct ecore_tunnel_info *p_tun,
 2429                                  u16 tunn_feature_mask)
 2430 {
 2431         p_resp->tunn_feature_mask = tunn_feature_mask;
 2432         p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
 2433         p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
 2434         p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
 2435         p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
 2436         p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
 2437         p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
 2438         p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
 2439         p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
 2440         p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
 2441         p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
 2442         p_resp->geneve_udp_port = p_tun->geneve_port.port;
 2443         p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
 2444 }
 2445 
 2446 static void
 2447 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
 2448                                 struct ecore_tunn_update_type *p_tun,
 2449                                 enum ecore_tunn_mode mask, u8 tun_cls)
 2450 {
 2451         if (p_req->tun_mode_update_mask & (1 << mask)) {
 2452                 p_tun->b_update_mode = true;
 2453 
 2454                 if (p_req->tunn_mode & (1 << mask))
 2455                         p_tun->b_mode_enabled = true;
 2456         }
 2457 
 2458         p_tun->tun_cls = tun_cls;
 2459 }
 2460 
 2461 static void
 2462 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
 2463                               struct ecore_tunn_update_type *p_tun,
 2464                               struct ecore_tunn_update_udp_port *p_port,
 2465                               enum ecore_tunn_mode mask,
 2466                               u8 tun_cls, u8 update_port, u16 port)
 2467 {
 2468         if (update_port) {
 2469                 p_port->b_update_port = true;
 2470                 p_port->port = port;
 2471         }
 2472 
 2473         __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
 2474 }
 2475 
 2476 static bool
 2477 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
 2478 {
 2479         bool b_update_requested = false;
 2480 
 2481         if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
 2482             p_req->update_geneve_port || p_req->update_vxlan_port)
 2483                 b_update_requested = true;
 2484 
 2485         return b_update_requested;
 2486 }
 2487 
 2488 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
 2489                                                struct ecore_ptt *p_ptt,
 2490                                                struct ecore_vf_info *p_vf)
 2491 {
 2492         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
 2493         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
 2494         struct pfvf_update_tunn_param_tlv *p_resp;
 2495         struct vfpf_update_tunn_param_tlv *p_req;
 2496         enum _ecore_status_t rc = ECORE_SUCCESS;
 2497         u8 status = PFVF_STATUS_SUCCESS;
 2498         bool b_update_required = false;
 2499         struct ecore_tunnel_info tunn;
 2500         u16 tunn_feature_mask = 0;
 2501         int i;
 2502 
 2503         mbx->offset = (u8 *)mbx->reply_virt;
 2504 
 2505         OSAL_MEM_ZERO(&tunn, sizeof(tunn));
 2506         p_req = &mbx->req_virt->tunn_param_update;
 2507 
 2508         if (!ecore_iov_pf_validate_tunn_param(p_req)) {
 2509                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2510                            "No tunnel update requested by VF\n");
 2511                 status = PFVF_STATUS_FAILURE;
 2512                 goto send_resp;
 2513         }
 2514 
 2515         tunn.b_update_rx_cls = p_req->update_tun_cls;
 2516         tunn.b_update_tx_cls = p_req->update_tun_cls;
 2517 
 2518         ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
 2519                                       ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
 2520                                       p_req->update_vxlan_port,
 2521                                       p_req->vxlan_port);
 2522         ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
 2523                                       ECORE_MODE_L2GENEVE_TUNN,
 2524                                       p_req->l2geneve_clss,
 2525                                       p_req->update_geneve_port,
 2526                                       p_req->geneve_port);
 2527         __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
 2528                                         ECORE_MODE_IPGENEVE_TUNN,
 2529                                         p_req->ipgeneve_clss);
 2530         __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
 2531                                         ECORE_MODE_L2GRE_TUNN,
 2532                                         p_req->l2gre_clss);
 2533         __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
 2534                                         ECORE_MODE_IPGRE_TUNN,
 2535                                         p_req->ipgre_clss);
 2536 
 2537         /* If PF modifies VF's req then it should
 2538          * still return an error in case of partial configuration
 2539          * or modified configuration as opposed to requested one.
 2540          */
 2541         rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
 2542                                                  &b_update_required, &tunn);
 2543 
 2544         if (rc != ECORE_SUCCESS)
 2545                 status = PFVF_STATUS_FAILURE;
 2546 
 2547         /* If ECORE client is willing to update anything ? */
 2548         if (b_update_required) {
 2549                 u16 geneve_port;
 2550 
 2551                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
 2552                                                  ECORE_SPQ_MODE_EBLOCK,
 2553                                                  OSAL_NULL);
 2554                 if (rc != ECORE_SUCCESS)
 2555                         status = PFVF_STATUS_FAILURE;
 2556 
 2557                 geneve_port = p_tun->geneve_port.port;
 2558                 ecore_for_each_vf(p_hwfn, i) {
 2559                         ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
 2560                                                          p_tun->vxlan_port.port,
 2561                                                          geneve_port);
 2562                 }
 2563         }
 2564 
 2565 send_resp:
 2566         p_resp = ecore_add_tlv(&mbx->offset,
 2567                                CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
 2568 
 2569         ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
 2570         ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 2571                       sizeof(struct channel_list_end_tlv));
 2572 
 2573         ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
 2574 }
 2575 
 2576 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
 2577                                             struct ecore_ptt *p_ptt,
 2578                                             struct ecore_vf_info *p_vf,
 2579                                             u32 cid,
 2580                                             u8 status)
 2581 {
 2582         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
 2583         struct pfvf_start_queue_resp_tlv *p_tlv;
 2584         bool b_legacy = false;
 2585         u16 length;
 2586 
 2587         mbx->offset = (u8 *)mbx->reply_virt;
 2588 
 2589         /* Taking a bigger struct instead of adding a TLV to list was a
 2590          * mistake, but one which we're now stuck with, as some older
 2591          * clients assume the size of the previous response.
 2592          */
 2593         if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
 2594             ETH_HSI_VER_NO_PKT_LEN_TUNN)
 2595                 b_legacy = true;
 2596 
 2597         if (!b_legacy)
 2598                 length = sizeof(*p_tlv);
 2599         else
 2600                 length = sizeof(struct pfvf_def_resp_tlv);
 2601 
 2602         p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
 2603         ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 2604                       sizeof(struct channel_list_end_tlv));
 2605 
 2606         /* Update the TLV with the response */
 2607         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
 2608                 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
 2609 
 2610         ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
 2611 }
 2612 
 2613 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 2614                                        struct ecore_ptt *p_ptt,
 2615                                        struct ecore_vf_info *vf)
 2616 {
 2617         struct ecore_queue_start_common_params params;
 2618         struct ecore_queue_cid_vf_params vf_params;
 2619         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 2620         u8 status = PFVF_STATUS_NO_RESOURCE;
 2621         struct ecore_vf_queue *p_queue;
 2622         struct vfpf_start_txq_tlv *req;
 2623         struct ecore_queue_cid *p_cid;
 2624         struct ecore_sb_info sb_dummy;
 2625         u8 qid_usage_idx, vf_legacy;
 2626         u32 cid = 0;
 2627         enum _ecore_status_t rc;
 2628         u16 pq;
 2629 
 2630         OSAL_MEMSET(&params, 0, sizeof(params));
 2631         req = &mbx->req_virt->start_txq;
 2632 
 2633         if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
 2634                                     ECORE_IOV_VALIDATE_Q_NA) ||
 2635             !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 2636                 goto out;
 2637 
 2638         qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
 2639         if (qid_usage_idx == ECORE_IOV_QID_INVALID)
 2640                 goto out;
 2641 
 2642         p_queue = &vf->vf_queues[req->tx_qid];
 2643         if (p_queue->cids[qid_usage_idx].p_cid)
 2644                 goto out;
 2645 
 2646         vf_legacy = ecore_vf_calculate_legacy(vf);
 2647 
 2648         /* Acquire a new queue-cid */
 2649         params.queue_id = p_queue->fw_tx_qid;
 2650         params.vport_id = vf->vport_id;
 2651         params.stats_id = vf->abs_vf_id + 0x10;
 2652 
 2653         /* Since IGU index is passed via sb_info, construct a dummy one */
 2654         OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
 2655         sb_dummy.igu_sb_id = req->hw_sb;
 2656         params.p_sb = &sb_dummy;
 2657         params.sb_idx = req->sb_index;
 2658 
 2659         OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
 2660         vf_params.vfid = vf->relative_vf_id;
 2661         vf_params.vf_qid = (u8)req->tx_qid;
 2662         vf_params.vf_legacy = vf_legacy;
 2663         vf_params.qid_usage_idx = qid_usage_idx;
 2664 
 2665         p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
 2666                                        &params, false, &vf_params);
 2667         if (p_cid == OSAL_NULL)
 2668                 goto out;
 2669 
 2670         pq = ecore_get_cm_pq_idx_vf(p_hwfn,
 2671                                     vf->relative_vf_id);
 2672         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
 2673                                         req->pbl_addr, req->pbl_size, pq);
 2674         if (rc != ECORE_SUCCESS) {
 2675                 status = PFVF_STATUS_FAILURE;
 2676                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
 2677         } else {
 2678                 status = PFVF_STATUS_SUCCESS;
 2679                 p_queue->cids[qid_usage_idx].p_cid = p_cid;
 2680                 p_queue->cids[qid_usage_idx].b_is_tx = true;
 2681                 cid = p_cid->cid;
 2682         }
 2683 
 2684 out:
 2685         ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
 2686                                         cid, status);
 2687 }
 2688 
 2689 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
 2690                                                    struct ecore_vf_info *vf,
 2691                                                    u16 rxq_id,
 2692                                                    u8 qid_usage_idx,
 2693                                                    bool cqe_completion)
 2694 {
 2695         struct ecore_vf_queue *p_queue;
 2696         enum _ecore_status_t rc = ECORE_SUCCESS;
 2697 
 2698         if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
 2699                                     ECORE_IOV_VALIDATE_Q_NA)) {
 2700                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2701                            "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
 2702                            vf->relative_vf_id, rxq_id, qid_usage_idx);
 2703                 return ECORE_INVAL;
 2704         }
 2705 
 2706         p_queue = &vf->vf_queues[rxq_id];
 2707 
 2708         /* We've validated the index and the existence of the active RXQ -
 2709          * now we need to make sure that it's using the correct qid.
 2710          */
 2711         if (!p_queue->cids[qid_usage_idx].p_cid ||
 2712             p_queue->cids[qid_usage_idx].b_is_tx) {
 2713                 struct ecore_queue_cid *p_cid;
 2714 
 2715                 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
 2716                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2717                            "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
 2718                             vf->relative_vf_id, rxq_id, qid_usage_idx,
 2719                             rxq_id, p_cid->qid_usage_idx);
 2720                 return ECORE_INVAL;
 2721         }
 2722 
 2723         /* Now that we know we have a valid Rx-queue - close it */
 2724         rc = ecore_eth_rx_queue_stop(p_hwfn,
 2725                                      p_queue->cids[qid_usage_idx].p_cid,
 2726                                      false, cqe_completion);
 2727         if (rc != ECORE_SUCCESS)
 2728                 return rc;
 2729 
 2730         p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
 2731         vf->num_active_rxqs--;
 2732 
 2733         return ECORE_SUCCESS;
 2734 }
 2735 
 2736 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
 2737                                                    struct ecore_vf_info *vf,
 2738                                                    u16 txq_id,
 2739                                                    u8 qid_usage_idx)
 2740 {
 2741         struct ecore_vf_queue *p_queue;
 2742         enum _ecore_status_t rc = ECORE_SUCCESS;
 2743 
 2744         if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
 2745                                     ECORE_IOV_VALIDATE_Q_NA))
 2746                 return ECORE_INVAL;
 2747 
 2748         p_queue = &vf->vf_queues[txq_id];
 2749         if (!p_queue->cids[qid_usage_idx].p_cid ||
 2750             !p_queue->cids[qid_usage_idx].b_is_tx)
 2751                 return ECORE_INVAL;
 2752 
 2753         rc = ecore_eth_tx_queue_stop(p_hwfn,
 2754                                      p_queue->cids[qid_usage_idx].p_cid);
 2755         if (rc != ECORE_SUCCESS)
 2756                 return rc;
 2757 
 2758         p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
 2759         return ECORE_SUCCESS;
 2760 }
 2761 
 2762 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
 2763                                        struct ecore_ptt *p_ptt,
 2764                                        struct ecore_vf_info *vf)
 2765 {
 2766         u16 length = sizeof(struct pfvf_def_resp_tlv);
 2767         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 2768         u8 status = PFVF_STATUS_FAILURE;
 2769         struct vfpf_stop_rxqs_tlv *req;
 2770         u8 qid_usage_idx;
 2771         enum _ecore_status_t rc;
 2772 
 2773         /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
 2774          * would be one. Since no older ecore passed multiple queues
 2775          * using this API, sanitize on the value.
 2776          */
 2777         req = &mbx->req_virt->stop_rxqs;
 2778         if (req->num_rxqs != 1) {
 2779                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2780                            "Odd; VF[%d] tried stopping multiple Rx queues\n",
 2781                            vf->relative_vf_id);
 2782                 status = PFVF_STATUS_NOT_SUPPORTED;
 2783                 goto out;
 2784         }
 2785 
 2786         /* Find which qid-index is associated with the queue */
 2787         qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
 2788         if (qid_usage_idx == ECORE_IOV_QID_INVALID)
 2789                 goto out;
 2790 
 2791         rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
 2792                                     qid_usage_idx, req->cqe_completion);
 2793         if (rc == ECORE_SUCCESS)
 2794                 status = PFVF_STATUS_SUCCESS;
 2795 out:
 2796         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
 2797                                length, status);
 2798 }
 2799 
 2800 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
 2801                                        struct ecore_ptt *p_ptt,
 2802                                        struct ecore_vf_info *vf)
 2803 {
 2804         u16 length = sizeof(struct pfvf_def_resp_tlv);
 2805         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 2806         u8 status = PFVF_STATUS_FAILURE;
 2807         struct vfpf_stop_txqs_tlv *req;
 2808         u8 qid_usage_idx;
 2809         enum _ecore_status_t rc;
 2810 
 2811         /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
 2812          * would be one. Since no older ecore passed multiple queues
 2813          * using this API, sanitize on the value.
 2814          */
 2815         req = &mbx->req_virt->stop_txqs;
 2816         if (req->num_txqs != 1) {
 2817                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2818                            "Odd; VF[%d] tried stopping multiple Tx queues\n",
 2819                            vf->relative_vf_id);
 2820                 status = PFVF_STATUS_NOT_SUPPORTED;
 2821                 goto out;
 2822         }
 2823 
 2824         /* Find which qid-index is associated with the queue */
 2825         qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
 2826         if (qid_usage_idx == ECORE_IOV_QID_INVALID)
 2827                 goto out;
 2828 
 2829         rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
 2830                                     qid_usage_idx);
 2831         if (rc == ECORE_SUCCESS)
 2832                 status = PFVF_STATUS_SUCCESS;
 2833 
 2834 out:
 2835         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
 2836                                length, status);
 2837 }
 2838 
 2839 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
 2840                                          struct ecore_ptt *p_ptt,
 2841                                          struct ecore_vf_info *vf)
 2842 {
 2843         struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
 2844         u16 length = sizeof(struct pfvf_def_resp_tlv);
 2845         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 2846         struct vfpf_update_rxq_tlv *req;
 2847         u8 status = PFVF_STATUS_FAILURE;
 2848         u8 complete_event_flg;
 2849         u8 complete_cqe_flg;
 2850         u8 qid_usage_idx;
 2851         enum _ecore_status_t rc;
 2852         u16 i;
 2853 
 2854         req = &mbx->req_virt->update_rxq;
 2855         complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
 2856         complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 2857 
 2858         qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
 2859         if (qid_usage_idx == ECORE_IOV_QID_INVALID)
 2860                 goto out;
 2861 
 2862         /* Starting with the addition of CHANNEL_TLV_QID, this API started
 2863          * expecting a single queue at a time. Validate this.
 2864          */
 2865         if ((vf->acquire.vfdev_info.capabilities &
 2866              VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
 2867              req->num_rxqs != 1) {
 2868                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2869                            "VF[%d] supports QIDs but sends multiple queues\n",
 2870                            vf->relative_vf_id);
 2871                 goto out;
 2872         }
 2873 
 2874         /* Validate inputs - for the legacy case this is still true since
 2875          * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
 2876          */
 2877         for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
 2878                 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
 2879                                             ECORE_IOV_VALIDATE_Q_NA) ||
 2880                     !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
 2881                     vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
 2882                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2883                                    "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
 2884                                    vf->relative_vf_id, req->rx_qid,
 2885                                    req->num_rxqs);
 2886                         goto out;
 2887                 }
 2888         }
 2889 
 2890         for (i = 0; i < req->num_rxqs; i++) {
 2891                 u16 qid = req->rx_qid + i;
 2892 
 2893                 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
 2894         }
 2895 
 2896         rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
 2897                                            req->num_rxqs,
 2898                                            complete_cqe_flg,
 2899                                            complete_event_flg,
 2900                                            ECORE_SPQ_MODE_EBLOCK,
 2901                                            OSAL_NULL);
 2902         if (rc != ECORE_SUCCESS)
 2903                 goto out;
 2904 
 2905         status = PFVF_STATUS_SUCCESS;
 2906 out:
 2907         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
 2908                                length, status);
 2909 }
 2910 
 2911 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
 2912                                         void *p_tlvs_list, u16 req_type)
 2913 {
 2914         struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
 2915         int len = 0;
 2916 
 2917         do {
 2918                 if (!p_tlv->length) {
 2919                         DP_NOTICE(p_hwfn, true,
 2920                                   "Zero length TLV found\n");
 2921                         return OSAL_NULL;
 2922                 }
 2923 
 2924                 if (p_tlv->type == req_type) {
 2925                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 2926                                    "Extended tlv type %s, length %d found\n",
 2927                                    ecore_channel_tlvs_string[p_tlv->type],
 2928                                    p_tlv->length);
 2929                         return p_tlv;
 2930                 }
 2931 
 2932                 len += p_tlv->length;
 2933                 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
 2934 
 2935                 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
 2936                         DP_NOTICE(p_hwfn, true,
 2937                                   "TLVs has overrun the buffer size\n");
 2938                         return OSAL_NULL;
 2939                 }
 2940         } while (p_tlv->type != CHANNEL_TLV_LIST_END);
 2941 
 2942         return OSAL_NULL;
 2943 }
 2944 
 2945 static void
 2946 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
 2947                               struct ecore_sp_vport_update_params *p_data,
 2948                               struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
 2949 {
 2950         struct vfpf_vport_update_activate_tlv *p_act_tlv;
 2951         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
 2952 
 2953         p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
 2954                     ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 2955                                                tlv);
 2956         if (!p_act_tlv)
 2957                 return;
 2958 
 2959         p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
 2960         p_data->vport_active_rx_flg = p_act_tlv->active_rx;
 2961         p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
 2962         p_data->vport_active_tx_flg = p_act_tlv->active_tx;
 2963         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
 2964 }
 2965 
 2966 static void
 2967 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
 2968                                struct ecore_sp_vport_update_params *p_data,
 2969                                struct ecore_vf_info *p_vf,
 2970                                struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
 2971 {
 2972         struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
 2973         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
 2974 
 2975         p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
 2976                      ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 2977                                                 tlv);
 2978         if (!p_vlan_tlv)
 2979                 return;
 2980 
 2981         p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
 2982 
 2983         /* Ignore the VF request if we're forcing a vlan */
 2984         if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
 2985                 p_data->update_inner_vlan_removal_flg = 1;
 2986                 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
 2987         }
 2988 
 2989         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
 2990 }
 2991 
 2992 static void
 2993 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
 2994                               struct ecore_sp_vport_update_params *p_data,
 2995                               struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
 2996 {
 2997         struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
 2998         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
 2999 
 3000         p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
 3001                           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 3002                                                      tlv);
 3003         if (!p_tx_switch_tlv)
 3004                 return;
 3005 
 3006 #ifndef ASIC_ONLY
 3007         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
 3008                 DP_NOTICE(p_hwfn, false, "FPGA: Ignore tx-switching configuration originating from VFs\n");
 3009                 return;
 3010         }
 3011 #endif
 3012 
 3013         p_data->update_tx_switching_flg = 1;
 3014         p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
 3015         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
 3016 }
 3017 
 3018 static void
 3019 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
 3020                                     struct ecore_sp_vport_update_params *p_data,
 3021                                     struct ecore_iov_vf_mbx *p_mbx,
 3022                                     u16 *tlvs_mask)
 3023 {
 3024         struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
 3025         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
 3026 
 3027         p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
 3028                       ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 3029                                                  tlv);
 3030         if (!p_mcast_tlv)
 3031                 return;
 3032 
 3033         p_data->update_approx_mcast_flg = 1;
 3034         OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
 3035                     sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
 3036         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
 3037 }
 3038 
 3039 static void
 3040 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
 3041                                 struct ecore_sp_vport_update_params *p_data,
 3042                                 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
 3043 {
 3044         struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
 3045         struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
 3046         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
 3047 
 3048         p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
 3049                        ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 3050                                                   tlv);
 3051         if (!p_accept_tlv)
 3052                 return;
 3053 
 3054         p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
 3055         p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
 3056         p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
 3057         p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
 3058         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
 3059 }
 3060 
 3061 static void
 3062 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
 3063                                     struct ecore_sp_vport_update_params *p_data,
 3064                                     struct ecore_iov_vf_mbx *p_mbx,
 3065                                     u16 *tlvs_mask)
 3066 {
 3067         struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
 3068         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
 3069 
 3070         p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
 3071                             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 3072                                                        tlv);
 3073         if (!p_accept_any_vlan)
 3074                 return;
 3075 
 3076         p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
 3077         p_data->update_accept_any_vlan_flg =
 3078                         p_accept_any_vlan->update_accept_any_vlan_flg;
 3079         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
 3080 }
 3081 
 3082 static void
 3083 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
 3084                               struct ecore_vf_info *vf,
 3085                               struct ecore_sp_vport_update_params *p_data,
 3086                               struct ecore_rss_params *p_rss,
 3087                               struct ecore_iov_vf_mbx *p_mbx,
 3088                               u16 *tlvs_mask, u16 *tlvs_accepted)
 3089 {
 3090         struct vfpf_vport_update_rss_tlv *p_rss_tlv;
 3091         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
 3092         bool b_reject = false;
 3093         u16 table_size;
 3094         u16 i, q_idx;
 3095 
 3096         p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
 3097                     ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
 3098                                                tlv);
 3099         if (!p_rss_tlv) {
 3100                 p_data->rss_params = OSAL_NULL;
 3101                 return;
 3102         }
 3103 
 3104         OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
 3105 
 3106         p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
 3107                                       VFPF_UPDATE_RSS_CONFIG_FLAG);
 3108         p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
 3109                                             VFPF_UPDATE_RSS_CAPS_FLAG);
 3110         p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
 3111                                          VFPF_UPDATE_RSS_IND_TABLE_FLAG);
 3112         p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
 3113                                    VFPF_UPDATE_RSS_KEY_FLAG);
 3114 
 3115         p_rss->rss_enable = p_rss_tlv->rss_enable;
 3116         p_rss->rss_eng_id = vf->rss_eng_id;
 3117         p_rss->rss_caps = p_rss_tlv->rss_caps;
 3118         p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
 3119         OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
 3120                     sizeof(p_rss->rss_key));
 3121 
 3122         table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
 3123                                 (1 << p_rss_tlv->rss_table_size_log));
 3124 
 3125         for (i = 0; i < table_size; i++) {
 3126                 struct ecore_queue_cid *p_cid;
 3127 
 3128                 q_idx = p_rss_tlv->rss_ind_table[i];
 3129                 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
 3130                                             ECORE_IOV_VALIDATE_Q_ENABLE)) {
 3131                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3132                                    "VF[%d]: Omitting RSS due to wrong queue %04x\n",
 3133                                    vf->relative_vf_id, q_idx);
 3134                         b_reject = true;
 3135                         goto out;
 3136                 }
 3137 
 3138                 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
 3139                 p_rss->rss_ind_table[i] = p_cid;
 3140         }
 3141 
 3142         p_data->rss_params = p_rss;
 3143 out:
 3144         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
 3145         if (!b_reject)
 3146                 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
 3147 }
 3148 
 3149 static void
 3150 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
 3151                                   struct ecore_sp_vport_update_params *p_data,
 3152                                   struct ecore_sge_tpa_params *p_sge_tpa,
 3153                                   struct ecore_iov_vf_mbx *p_mbx,
 3154                                   u16 *tlvs_mask)
 3155 {
 3156         struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
 3157         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
 3158 
 3159         p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
 3160                         ecore_iov_search_list_tlvs(p_hwfn,
 3161                                                    p_mbx->req_virt, tlv);
 3162 
 3163         if (!p_sge_tpa_tlv) {
 3164                 p_data->sge_tpa_params = OSAL_NULL;
 3165                 return;
 3166         }
 3167 
 3168         OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
 3169 
 3170         p_sge_tpa->update_tpa_en_flg =
 3171                 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
 3172                    VFPF_UPDATE_TPA_EN_FLAG);
 3173         p_sge_tpa->update_tpa_param_flg =
 3174                 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
 3175                    VFPF_UPDATE_TPA_PARAM_FLAG);
 3176 
 3177         p_sge_tpa->tpa_ipv4_en_flg =
 3178                 !!(p_sge_tpa_tlv->sge_tpa_flags &
 3179                    VFPF_TPA_IPV4_EN_FLAG);
 3180         p_sge_tpa->tpa_ipv6_en_flg =
 3181                 !!(p_sge_tpa_tlv->sge_tpa_flags &
 3182                    VFPF_TPA_IPV6_EN_FLAG);
 3183         p_sge_tpa->tpa_pkt_split_flg =
 3184                 !!(p_sge_tpa_tlv->sge_tpa_flags &
 3185                    VFPF_TPA_PKT_SPLIT_FLAG);
 3186         p_sge_tpa->tpa_hdr_data_split_flg =
 3187                 !!(p_sge_tpa_tlv->sge_tpa_flags &
 3188                    VFPF_TPA_HDR_DATA_SPLIT_FLAG);
 3189         p_sge_tpa->tpa_gro_consistent_flg =
 3190                 !!(p_sge_tpa_tlv->sge_tpa_flags &
 3191                    VFPF_TPA_GRO_CONSIST_FLAG);
 3192 
 3193         p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
 3194         p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
 3195         p_sge_tpa->tpa_min_size_to_start =
 3196                 p_sge_tpa_tlv->tpa_min_size_to_start;
 3197         p_sge_tpa->tpa_min_size_to_cont =
 3198                 p_sge_tpa_tlv->tpa_min_size_to_cont;
 3199         p_sge_tpa->max_buffers_per_cqe =
 3200                 p_sge_tpa_tlv->max_buffers_per_cqe;
 3201 
 3202         p_data->sge_tpa_params = p_sge_tpa;
 3203 
 3204         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
 3205 }
 3206 
 3207 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
 3208                                           struct ecore_ptt *p_ptt,
 3209                                           struct ecore_vf_info *vf)
 3210 {
 3211         struct ecore_rss_params *p_rss_params = OSAL_NULL;
 3212         struct ecore_sp_vport_update_params params;
 3213         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 3214         struct ecore_sge_tpa_params sge_tpa_params;
 3215         u16 tlvs_mask = 0, tlvs_accepted = 0;
 3216         u8 status = PFVF_STATUS_SUCCESS;
 3217         u16 length;
 3218         enum _ecore_status_t rc;
 3219 
 3220         /* Valiate PF can send such a request */
 3221         if (!vf->vport_instance) {
 3222                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3223                            "No VPORT instance available for VF[%d], failing vport update\n",
 3224                            vf->abs_vf_id);
 3225                 status = PFVF_STATUS_FAILURE;
 3226                 goto out;
 3227         }
 3228 
 3229         p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
 3230         if (p_rss_params == OSAL_NULL) {
 3231                 status = PFVF_STATUS_FAILURE;
 3232                 goto out;
 3233         }
 3234 
 3235         OSAL_MEMSET(&params, 0, sizeof(params));
 3236         params.opaque_fid =  vf->opaque_fid;
 3237         params.vport_id = vf->vport_id;
 3238         params.rss_params = OSAL_NULL;
 3239 
 3240         /* Search for extended tlvs list and update values
 3241          * from VF in struct ecore_sp_vport_update_params.
 3242          */
 3243         ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
 3244         ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
 3245         ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
 3246         ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
 3247         ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
 3248         ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
 3249         ecore_iov_vp_update_sge_tpa_param(p_hwfn, &params,
 3250                                           &sge_tpa_params, mbx, &tlvs_mask);
 3251 
 3252         tlvs_accepted = tlvs_mask;
 3253 
 3254         /* Some of the extended TLVs need to be validated first; In that case,
 3255          * they can update the mask without updating the accepted [so that
 3256          * PF could communicate to VF it has rejected request].
 3257          */
 3258         ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
 3259                                       mbx, &tlvs_mask, &tlvs_accepted);
 3260 
 3261         /* Just log a message if there is no single extended tlv in buffer.
 3262          * When all features of vport update ramrod would be requested by VF
 3263          * as extended TLVs in buffer then an error can be returned in response
 3264          * if there is no extended TLV present in buffer.
 3265          */
 3266         if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
 3267                                      &params, &tlvs_accepted) !=
 3268             ECORE_SUCCESS) {
 3269                 tlvs_accepted = 0;
 3270                 status = PFVF_STATUS_NOT_SUPPORTED;
 3271                 goto out;
 3272         }
 3273 
 3274         if (!tlvs_accepted) {
 3275                 if (tlvs_mask)
 3276                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3277                                    "Upper-layer prevents said VF configuration\n");
 3278                 else
 3279                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3280                                    "No feature tlvs found for vport update\n");
 3281                 status = PFVF_STATUS_NOT_SUPPORTED;
 3282                 goto out;
 3283         }
 3284 
 3285         rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
 3286                                    OSAL_NULL);
 3287 
 3288         if (rc)
 3289                 status = PFVF_STATUS_FAILURE;
 3290 
 3291 out:
 3292         OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
 3293         length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
 3294                                                     tlvs_mask, tlvs_accepted);
 3295         ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 3296 }
 3297 
 3298 static enum _ecore_status_t ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
 3299                                                             struct ecore_vf_info *p_vf,
 3300                                                             struct ecore_filter_ucast *p_params)
 3301 {
 3302         int i;
 3303 
 3304         /* First remove entries and then add new ones */
 3305         if (p_params->opcode == ECORE_FILTER_REMOVE) {
 3306                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
 3307                         if (p_vf->shadow_config.vlans[i].used &&
 3308                             p_vf->shadow_config.vlans[i].vid ==
 3309                             p_params->vlan) {
 3310                                 p_vf->shadow_config.vlans[i].used = false;
 3311                                 break;
 3312                         }
 3313                 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
 3314                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3315                                    "VF [%d] - Tries to remove a non-existing vlan\n",
 3316                                     p_vf->relative_vf_id);
 3317                         return ECORE_INVAL;
 3318                 }
 3319         } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
 3320                    p_params->opcode == ECORE_FILTER_FLUSH) {
 3321                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
 3322                         p_vf->shadow_config.vlans[i].used = false;
 3323         }
 3324 
 3325         /* In forced mode, we're willing to remove entries - but we don't add
 3326          * new ones.
 3327          */
 3328         if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
 3329                 return ECORE_SUCCESS;
 3330 
 3331         if (p_params->opcode == ECORE_FILTER_ADD ||
 3332             p_params->opcode == ECORE_FILTER_REPLACE) {
 3333                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
 3334                         if (p_vf->shadow_config.vlans[i].used)
 3335                                 continue;
 3336 
 3337                         p_vf->shadow_config.vlans[i].used = true;
 3338                         p_vf->shadow_config.vlans[i].vid = p_params->vlan;
 3339                         break;
 3340                 }
 3341 
 3342                 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
 3343                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3344                                    "VF [%d] - Tries to configure more than %d vlan filters\n",
 3345                                    p_vf->relative_vf_id,
 3346                                    ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
 3347                         return ECORE_INVAL;
 3348                 }
 3349         }
 3350 
 3351         return ECORE_SUCCESS;
 3352 }
 3353 
 3354 static enum _ecore_status_t ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
 3355                                                            struct ecore_vf_info *p_vf,
 3356                                                            struct ecore_filter_ucast *p_params)
 3357 {
 3358         char empty_mac[ETH_ALEN];
 3359         int i;
 3360 
 3361         OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
 3362 
 3363         /* If we're in forced-mode, we don't allow any change */
 3364         /* TODO - this would change if we were ever to implement logic for
 3365          * removing a forced MAC altogether [in which case, like for vlans,
 3366          * we should be able to re-trace previous configuration.
 3367          */
 3368         if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
 3369                 return ECORE_SUCCESS;
 3370 
 3371         /* First remove entries and then add new ones */
 3372         if (p_params->opcode == ECORE_FILTER_REMOVE) {
 3373                 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
 3374                         if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
 3375                                          p_params->mac, ETH_ALEN)) {
 3376                                 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
 3377                                               ETH_ALEN);
 3378                                 break;
 3379                         }
 3380                 }
 3381 
 3382                 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
 3383                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3384                                    "MAC isn't configured\n");
 3385                         return ECORE_INVAL;
 3386                 }
 3387         } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
 3388                    p_params->opcode == ECORE_FILTER_FLUSH) {
 3389                 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
 3390                         OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
 3391         }
 3392 
 3393         /* List the new MAC address */
 3394         if (p_params->opcode != ECORE_FILTER_ADD &&
 3395             p_params->opcode != ECORE_FILTER_REPLACE)
 3396                 return ECORE_SUCCESS;
 3397 
 3398         for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
 3399                 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
 3400                                  empty_mac, ETH_ALEN)) {
 3401                         OSAL_MEMCPY(p_vf->shadow_config.macs[i],
 3402                                     p_params->mac, ETH_ALEN);
 3403                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3404                                    "Added MAC at %d entry in shadow\n", i);
 3405                         break;
 3406                 }
 3407         }
 3408 
 3409         if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
 3410                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3411                            "No available place for MAC\n");
 3412                 return ECORE_INVAL;
 3413         }
 3414 
 3415         return ECORE_SUCCESS;
 3416 }
 3417 
 3418 static enum _ecore_status_t
 3419 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
 3420                                    struct ecore_vf_info *p_vf,
 3421                                    struct ecore_filter_ucast *p_params)
 3422 {
 3423         enum _ecore_status_t rc = ECORE_SUCCESS;
 3424 
 3425         if (p_params->type == ECORE_FILTER_MAC) {
 3426                 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
 3427                 if (rc != ECORE_SUCCESS)
 3428                         return rc;
 3429         }
 3430 
 3431         if (p_params->type == ECORE_FILTER_VLAN)
 3432                 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
 3433 
 3434         return rc;
 3435 }
 3436 
 3437 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
 3438                                           struct ecore_ptt *p_ptt,
 3439                                           struct ecore_vf_info *vf)
 3440 {
 3441         struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
 3442         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 3443         struct vfpf_ucast_filter_tlv *req;
 3444         u8 status = PFVF_STATUS_SUCCESS;
 3445         struct ecore_filter_ucast params;
 3446         enum _ecore_status_t rc;
 3447 
 3448         /* Prepare the unicast filter params */
 3449         OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
 3450         req = &mbx->req_virt->ucast_filter;
 3451         params.opcode = (enum ecore_filter_opcode)req->opcode;
 3452         params.type = (enum ecore_filter_ucast_type)req->type;
 3453 
 3454         /* @@@TBD - We might need logic on HV side in determining this */
 3455         params.is_rx_filter = 1;
 3456         params.is_tx_filter = 1;
 3457         params.vport_to_remove_from = vf->vport_id;
 3458         params.vport_to_add_to = vf->vport_id;
 3459         OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
 3460         params.vlan = req->vlan;
 3461 
 3462         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3463                    "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
 3464                    vf->abs_vf_id, params.opcode, params.type,
 3465                    params.is_rx_filter ? "RX" : "",
 3466                    params.is_tx_filter ? "TX" : "",
 3467                    params.vport_to_add_to,
 3468                    params.mac[0], params.mac[1], params.mac[2],
 3469                    params.mac[3], params.mac[4], params.mac[5], params.vlan);
 3470 
 3471         if (!vf->vport_instance) {
 3472                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3473                            "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
 3474                            vf->abs_vf_id);
 3475                 status = PFVF_STATUS_FAILURE;
 3476                 goto out;
 3477         }
 3478 
 3479         /* Update shadow copy of the VF configuration. In case shadow indicates
 3480          * the action should be blocked return success to VF to imitate the
 3481          * firmware behaviour in such case.
 3482          */
 3483         if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
 3484             ECORE_SUCCESS)
 3485                 goto out;
 3486 
 3487         /* Determine if the unicast filtering is acceptible by PF */
 3488         if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
 3489             (params.type == ECORE_FILTER_VLAN ||
 3490              params.type == ECORE_FILTER_MAC_VLAN)) {
 3491                 /* Once VLAN is forced or PVID is set, do not allow
 3492                  * to add/replace any further VLANs.
 3493                  */
 3494                 if (params.opcode == ECORE_FILTER_ADD ||
 3495                     params.opcode == ECORE_FILTER_REPLACE)
 3496                         status = PFVF_STATUS_FORCED;
 3497                 goto out;
 3498         }
 3499 
 3500         if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
 3501             (params.type == ECORE_FILTER_MAC ||
 3502              params.type == ECORE_FILTER_MAC_VLAN)) {
 3503                 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
 3504                     (params.opcode != ECORE_FILTER_ADD &&
 3505                      params.opcode != ECORE_FILTER_REPLACE))
 3506                         status = PFVF_STATUS_FORCED;
 3507                 goto out;
 3508         }
 3509 
 3510         rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
 3511         if (rc == ECORE_EXISTS) {
 3512                 goto out;
 3513         } else if (rc == ECORE_INVAL) {
 3514                 status = PFVF_STATUS_FAILURE;
 3515                 goto out;
 3516         }
 3517 
 3518         rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
 3519                                        ECORE_SPQ_MODE_CB, OSAL_NULL);
 3520         if (rc)
 3521                 status = PFVF_STATUS_FAILURE;
 3522 
 3523 out:
 3524         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
 3525                                sizeof(struct pfvf_def_resp_tlv), status);
 3526 }
 3527 
 3528 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
 3529                                          struct ecore_ptt *p_ptt,
 3530                                          struct ecore_vf_info *vf)
 3531 {
 3532         int i;
 3533 
 3534         /* Reset the SBs */
 3535         for (i = 0; i < vf->num_sbs; i++)
 3536                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
 3537                                                   vf->igu_sbs[i],
 3538                                                   vf->opaque_fid, false);
 3539 
 3540         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
 3541                                sizeof(struct pfvf_def_resp_tlv),
 3542                                PFVF_STATUS_SUCCESS);
 3543 }
 3544 
 3545 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
 3546                                    struct ecore_ptt *p_ptt,
 3547                                    struct ecore_vf_info *vf)
 3548 {
 3549         u16                      length = sizeof(struct pfvf_def_resp_tlv);
 3550         u8                       status = PFVF_STATUS_SUCCESS;
 3551 
 3552         /* Disable Interrupts for VF */
 3553         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
 3554 
 3555         /* Reset Permission table */
 3556         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
 3557 
 3558         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
 3559                                length, status);
 3560 }
 3561 
 3562 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
 3563                                      struct ecore_ptt *p_ptt,
 3564                                      struct ecore_vf_info *p_vf)
 3565 {
 3566         u16 length = sizeof(struct pfvf_def_resp_tlv);
 3567         u8 status = PFVF_STATUS_SUCCESS;
 3568         enum _ecore_status_t rc = ECORE_SUCCESS;
 3569 
 3570         ecore_iov_vf_cleanup(p_hwfn, p_vf);
 3571 
 3572         if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
 3573                 /* Stopping the VF */
 3574                 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
 3575                                       p_vf->opaque_fid);
 3576 
 3577                 if (rc != ECORE_SUCCESS) {
 3578                         DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
 3579                                rc);
 3580                         status = PFVF_STATUS_FAILURE;
 3581                 }
 3582 
 3583                 p_vf->state = VF_STOPPED;
 3584         }
 3585 
 3586         ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
 3587                                length, status);
 3588 }
 3589 
 3590 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
 3591                                          struct ecore_ptt *p_ptt,
 3592                                          struct ecore_vf_info *p_vf)
 3593 {
 3594         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
 3595         struct pfvf_read_coal_resp_tlv *p_resp;
 3596         struct vfpf_read_coal_req_tlv *req;
 3597         u8 status = PFVF_STATUS_FAILURE;
 3598         struct ecore_vf_queue *p_queue;
 3599         struct ecore_queue_cid *p_cid;
 3600         enum _ecore_status_t rc = ECORE_SUCCESS;
 3601         u16 coal = 0, qid, i;
 3602         bool b_is_rx;
 3603 
 3604         mbx->offset = (u8 *)mbx->reply_virt;
 3605         req = &mbx->req_virt->read_coal_req;
 3606 
 3607         qid = req->qid;
 3608         b_is_rx = req->is_rx ? true : false;
 3609 
 3610         if (b_is_rx) {
 3611                 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
 3612                                             ECORE_IOV_VALIDATE_Q_ENABLE)) {
 3613                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3614                                    "VF[%d]: Invalid Rx queue_id = %d\n",
 3615                                    p_vf->abs_vf_id, qid);
 3616                         goto send_resp;
 3617                 }
 3618 
 3619                 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
 3620                 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
 3621                 if (rc != ECORE_SUCCESS)
 3622                         goto send_resp;
 3623         } else {
 3624                 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
 3625                                             ECORE_IOV_VALIDATE_Q_ENABLE)) {
 3626                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3627                                    "VF[%d]: Invalid Tx queue_id = %d\n",
 3628                                    p_vf->abs_vf_id, qid);
 3629                         goto send_resp;
 3630                 }
 3631                 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
 3632                         p_queue = &p_vf->vf_queues[qid];
 3633                         if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
 3634                             (!p_queue->cids[i].b_is_tx))
 3635                                 continue;
 3636 
 3637                         p_cid = p_queue->cids[i].p_cid;
 3638 
 3639                         rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
 3640                                                     p_cid, &coal);
 3641                         if (rc != ECORE_SUCCESS)
 3642                                 goto send_resp;
 3643                         break;
 3644                 }
 3645         }
 3646 
 3647         status = PFVF_STATUS_SUCCESS;
 3648 
 3649 send_resp:
 3650         p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
 3651                                sizeof(*p_resp));
 3652         p_resp->coal = coal;
 3653 
 3654         ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 3655                       sizeof(struct channel_list_end_tlv));
 3656 
 3657         ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
 3658 }
 3659 
 3660 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
 3661                                          struct ecore_ptt *p_ptt,
 3662                                          struct ecore_vf_info *vf)
 3663 {
 3664         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 3665         enum _ecore_status_t rc = ECORE_SUCCESS;
 3666         struct vfpf_update_coalesce *req;
 3667         u8 status = PFVF_STATUS_FAILURE;
 3668         struct ecore_queue_cid *p_cid;
 3669         u16 rx_coal, tx_coal;
 3670         u16 qid;
 3671         int i;
 3672 
 3673         req = &mbx->req_virt->update_coalesce;
 3674 
 3675         rx_coal = req->rx_coal;
 3676         tx_coal = req->tx_coal;
 3677         qid = req->qid;
 3678 
 3679         if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
 3680                                     ECORE_IOV_VALIDATE_Q_ENABLE) &&
 3681             rx_coal) {
 3682                 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
 3683                        vf->abs_vf_id, qid);
 3684                 goto out;
 3685         }
 3686 
 3687         if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
 3688                                     ECORE_IOV_VALIDATE_Q_ENABLE) &&
 3689             tx_coal) {
 3690                 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
 3691                        vf->abs_vf_id, qid);
 3692                 goto out;
 3693         }
 3694 
 3695         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3696                    "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
 3697                    vf->abs_vf_id, rx_coal, tx_coal, qid);
 3698 
 3699         if (rx_coal) {
 3700                 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
 3701 
 3702                 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
 3703                 if (rc != ECORE_SUCCESS) {
 3704                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3705                                    "VF[%d]: Unable to set rx queue = %d coalesce\n",
 3706                                    vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
 3707                         goto out;
 3708                 }
 3709                 vf->rx_coal = rx_coal;
 3710         }
 3711 
 3712         /* TODO - in future, it might be possible to pass this in a per-cid
 3713          * granularity. For now, do this for all Tx queues.
 3714          */
 3715         if (tx_coal) {
 3716                 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
 3717 
 3718                 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
 3719                         if (p_queue->cids[i].p_cid == OSAL_NULL)
 3720                                 continue;
 3721 
 3722                         if (!p_queue->cids[i].b_is_tx)
 3723                                 continue;
 3724 
 3725                         rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
 3726                                                     p_queue->cids[i].p_cid);
 3727                         if (rc != ECORE_SUCCESS) {
 3728                                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3729                                            "VF[%d]: Unable to set tx queue coalesce\n",
 3730                                            vf->abs_vf_id);
 3731                                 goto out;
 3732                         }
 3733                 }
 3734                 vf->tx_coal = tx_coal;
 3735         }
 3736 
 3737         status = PFVF_STATUS_SUCCESS;
 3738 out:
 3739         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
 3740                                sizeof(struct pfvf_def_resp_tlv), status);
 3741 }
 3742 
 3743 enum _ecore_status_t
 3744 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
 3745                                          u16 rx_coal, u16 tx_coal,
 3746                                          u16 vf_id, u16 qid)
 3747 {
 3748         struct ecore_queue_cid *p_cid;
 3749         struct ecore_vf_info *vf;
 3750         struct ecore_ptt *p_ptt;
 3751         int i, rc = 0;
 3752 
 3753         if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
 3754                 DP_NOTICE(p_hwfn, true,
 3755                           "VF[%d] - Can not set coalescing: VF is not active\n",
 3756                           vf_id);
 3757                 return ECORE_INVAL;
 3758         }
 3759 
 3760         vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
 3761         p_ptt = ecore_ptt_acquire(p_hwfn);
 3762         if (!p_ptt)
 3763                 return ECORE_AGAIN;
 3764 
 3765         if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
 3766                                     ECORE_IOV_VALIDATE_Q_ENABLE) &&
 3767             rx_coal) {
 3768                 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
 3769                        vf->abs_vf_id, qid);
 3770                 goto out;
 3771         }
 3772 
 3773         if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
 3774                                     ECORE_IOV_VALIDATE_Q_ENABLE) &&
 3775             tx_coal) {
 3776                 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
 3777                        vf->abs_vf_id, qid);
 3778                 goto out;
 3779         }
 3780 
 3781         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3782                    "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
 3783                    vf->abs_vf_id, rx_coal, tx_coal, qid);
 3784 
 3785         if (rx_coal) {
 3786                 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
 3787 
 3788                 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
 3789                 if (rc != ECORE_SUCCESS) {
 3790                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3791                                    "VF[%d]: Unable to set rx queue = %d coalesce\n",
 3792                                    vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
 3793                         goto out;
 3794                 }
 3795                 vf->rx_coal = rx_coal;
 3796         }
 3797 
 3798         /* TODO - in future, it might be possible to pass this in a per-cid
 3799          * granularity. For now, do this for all Tx queues.
 3800          */
 3801         if (tx_coal) {
 3802                 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
 3803 
 3804                 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
 3805                         if (p_queue->cids[i].p_cid == OSAL_NULL)
 3806                                 continue;
 3807 
 3808                         if (!p_queue->cids[i].b_is_tx)
 3809                                 continue;
 3810 
 3811                         rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
 3812                                                     p_queue->cids[i].p_cid);
 3813                         if (rc != ECORE_SUCCESS) {
 3814                                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3815                                            "VF[%d]: Unable to set tx queue coalesce\n",
 3816                                            vf->abs_vf_id);
 3817                                 goto out;
 3818                         }
 3819                 }
 3820                 vf->tx_coal = tx_coal;
 3821         }
 3822 
 3823 out:
 3824         ecore_ptt_release(p_hwfn, p_ptt);
 3825 
 3826         return rc;
 3827 }
 3828 
 3829 static enum _ecore_status_t
 3830 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
 3831                            struct ecore_vf_info *p_vf,
 3832                            struct ecore_ptt *p_ptt)
 3833 {
 3834         int cnt;
 3835         u32 val;
 3836 
 3837         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
 3838 
 3839         for (cnt = 0; cnt < 50; cnt++) {
 3840                 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
 3841                 if (!val)
 3842                         break;
 3843                 OSAL_MSLEEP(20);
 3844         }
 3845         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 3846 
 3847         if (cnt == 50) {
 3848                 DP_ERR(p_hwfn, "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
 3849                        p_vf->abs_vf_id, val);
 3850                 return ECORE_TIMEOUT;
 3851         }
 3852 
 3853         return ECORE_SUCCESS;
 3854 }
 3855 
 3856 static enum _ecore_status_t
 3857 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
 3858                           struct ecore_vf_info *p_vf,
 3859                           struct ecore_ptt *p_ptt)
 3860 {
 3861         u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
 3862         int i, cnt;
 3863 
 3864         /* Read initial consumers & producers */
 3865         for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
 3866                 u32 prod;
 3867 
 3868                 cons[i] = ecore_rd(p_hwfn, p_ptt,
 3869                                    PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
 3870                                    i * 0x40);
 3871                 prod = ecore_rd(p_hwfn, p_ptt,
 3872                                 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
 3873                                 i * 0x40);
 3874                 distance[i] = prod - cons[i];
 3875         }
 3876 
 3877         /* Wait for consumers to pass the producers */
 3878         i = 0;
 3879         for (cnt = 0; cnt < 50; cnt++) {
 3880                 for (; i < MAX_NUM_VOQS_E4; i++) {
 3881                         u32 tmp;
 3882 
 3883                         tmp = ecore_rd(p_hwfn, p_ptt,
 3884                                        PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
 3885                                        i * 0x40);
 3886                         if (distance[i] > tmp - cons[i])
 3887                                 break;
 3888                 }
 3889 
 3890                 if (i == MAX_NUM_VOQS_E4)
 3891                         break;
 3892 
 3893                 OSAL_MSLEEP(20);
 3894         }
 3895 
 3896         if (cnt == 50) {
 3897                 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
 3898                        p_vf->abs_vf_id, i);
 3899                 return ECORE_TIMEOUT;
 3900         }
 3901 
 3902         return ECORE_SUCCESS;
 3903 }
 3904 
 3905 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
 3906                                                   struct ecore_vf_info *p_vf,
 3907                                                   struct ecore_ptt *p_ptt)
 3908 {
 3909         enum _ecore_status_t rc;
 3910 
 3911         /* TODO - add SRC and TM polling once we add storage IOV */
 3912 
 3913         rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
 3914         if (rc)
 3915                 return rc;
 3916 
 3917         rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
 3918         if (rc)
 3919                 return rc;
 3920 
 3921         return ECORE_SUCCESS;
 3922 }
 3923 
 3924 static enum _ecore_status_t
 3925 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
 3926                                  struct ecore_ptt  *p_ptt,
 3927                                  u16               rel_vf_id,
 3928                                  u32               *ack_vfs)
 3929 {
 3930         struct ecore_vf_info *p_vf;
 3931         enum _ecore_status_t rc = ECORE_SUCCESS;
 3932 
 3933         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
 3934         if (!p_vf)
 3935                 return ECORE_SUCCESS;
 3936 
 3937         if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
 3938             (1ULL << (rel_vf_id % 64))) {
 3939                 u16 vfid = p_vf->abs_vf_id;
 3940 
 3941                 /* TODO - should we lock channel? */
 3942 
 3943                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 3944                            "VF[%d] - Handling FLR\n", vfid);
 3945 
 3946                 ecore_iov_vf_cleanup(p_hwfn, p_vf);
 3947 
 3948                 /* If VF isn't active, no need for anything but SW */
 3949                 if (!p_vf->b_init)
 3950                         goto cleanup;
 3951 
 3952                 /* TODO - what to do in case of failure? */
 3953                 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
 3954                 if (rc != ECORE_SUCCESS)
 3955                         goto cleanup;
 3956 
 3957                 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
 3958                 if (rc) {
 3959                         /* TODO - what's now? What a mess.... */
 3960                         DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n",
 3961                                vfid);
 3962                         return rc;
 3963                 }
 3964 
 3965                 /* Workaround to make VF-PF channel ready, as FW
 3966                  * doesn't do that as a part of FLR.
 3967                  */
 3968                 REG_WR(p_hwfn,
 3969                        GTT_BAR0_MAP_REG_USDM_RAM +
 3970                        USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
 3971 
 3972                 /* VF_STOPPED has to be set only after final cleanup
 3973                  * but prior to re-enabling the VF.
 3974                  */
 3975                 p_vf->state = VF_STOPPED;
 3976 
 3977                 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
 3978                 if (rc) {
 3979                         /* TODO - again, a mess... */
 3980                         DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
 3981                                vfid);
 3982                         return rc;
 3983                 }
 3984 cleanup:
 3985                 /* Mark VF for ack and clean pending state */
 3986                 if (p_vf->state == VF_RESET)
 3987                         p_vf->state = VF_STOPPED;
 3988                 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
 3989                 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
 3990                                 ~(1ULL << (rel_vf_id % 64));
 3991                 p_vf->vf_mbx.b_pending_msg = false;
 3992         }
 3993 
 3994         return rc;
 3995 }
 3996 
 3997 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
 3998                                               struct ecore_ptt  *p_ptt)
 3999 
 4000 {
 4001         u32 ack_vfs[VF_MAX_STATIC / 32];
 4002         enum _ecore_status_t rc = ECORE_SUCCESS;
 4003         u16 i;
 4004 
 4005         OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
 4006 
 4007         /* Since BRB <-> PRS interface can't be tested as part of the flr
 4008          * polling due to HW limitations, simply sleep a bit. And since
 4009          * there's no need to wait per-vf, do it before looping.
 4010          */
 4011         OSAL_MSLEEP(100);
 4012 
 4013         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
 4014                 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
 4015 
 4016         rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
 4017         return rc;
 4018 }
 4019 
 4020 #ifndef LINUX_REMOVE
 4021 enum _ecore_status_t
 4022 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
 4023                                 struct ecore_ptt  *p_ptt,
 4024                                 u16               rel_vf_id)
 4025 
 4026 {
 4027         u32 ack_vfs[VF_MAX_STATIC / 32];
 4028         enum _ecore_status_t rc = ECORE_SUCCESS;
 4029 
 4030         OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
 4031 
 4032         /* Wait instead of polling the BRB <-> PRS interface */
 4033         OSAL_MSLEEP(100);
 4034 
 4035         ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
 4036 
 4037         rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
 4038         return rc;
 4039 }
 4040 #endif
 4041 
 4042 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
 4043                           u32 *p_disabled_vfs)
 4044 {
 4045         bool found = false;
 4046         u16 i;
 4047 
 4048         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
 4049         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
 4050                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4051                            "[%08x,...,%08x]: %08x\n",
 4052                            i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
 4053 
 4054         if (!p_hwfn->p_dev->p_iov_info) {
 4055                 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
 4056                 return false;
 4057         }
 4058 
 4059         /* Mark VFs */
 4060         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
 4061                 struct ecore_vf_info *p_vf;
 4062                 u8 vfid;
 4063 
 4064                 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
 4065                 if (!p_vf)
 4066                         continue;
 4067 
 4068                 vfid = p_vf->abs_vf_id;
 4069                 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
 4070                         u64 *p_flr =  p_hwfn->pf_iov_info->pending_flr;
 4071                         u16 rel_vf_id = p_vf->relative_vf_id;
 4072 
 4073                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4074                                    "VF[%d] [rel %d] got FLR-ed\n",
 4075                                    vfid, rel_vf_id);
 4076 
 4077                         p_vf->state = VF_RESET;
 4078 
 4079                         /* No need to lock here, since pending_flr should
 4080                          * only change here and before ACKing MFw. Since
 4081                          * MFW will not trigger an additional attention for
 4082                          * VF flr until ACKs, we're safe.
 4083                          */
 4084                         p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
 4085                         found = true;
 4086                 }
 4087         }
 4088 
 4089         return found;
 4090 }
 4091 
 4092 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
 4093                         u16 vfid,
 4094                         struct ecore_mcp_link_params *p_params,
 4095                         struct ecore_mcp_link_state *p_link,
 4096                         struct ecore_mcp_link_capabilities *p_caps)
 4097 {
 4098         struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
 4099         struct ecore_bulletin_content *p_bulletin;
 4100 
 4101         if (!p_vf)
 4102                 return;
 4103 
 4104         p_bulletin = p_vf->bulletin.p_virt;
 4105 
 4106         if (p_params)
 4107                 __ecore_vf_get_link_params(p_params, p_bulletin);
 4108         if (p_link)
 4109                 __ecore_vf_get_link_state(p_link, p_bulletin);
 4110         if (p_caps)
 4111                 __ecore_vf_get_link_caps(p_caps, p_bulletin);
 4112 }
 4113 
 4114 void ecore_iov_process_mbx_req(struct ecore_hwfn    *p_hwfn,
 4115                                struct ecore_ptt     *p_ptt,
 4116                                int vfid)
 4117 {
 4118         struct ecore_iov_vf_mbx *mbx;
 4119         struct ecore_vf_info *p_vf;
 4120 
 4121         p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4122         if (!p_vf)
 4123                 return;
 4124 
 4125         mbx = &p_vf->vf_mbx;
 4126 
 4127         /* ecore_iov_process_mbx_request */
 4128 #ifndef CONFIG_ECORE_SW_CHANNEL
 4129         if (!mbx->b_pending_msg) {
 4130                 DP_NOTICE(p_hwfn, true,
 4131                           "VF[%02x]: Trying to process mailbox message when none is pending\n",
 4132                           p_vf->abs_vf_id);
 4133                 return;
 4134         }
 4135         mbx->b_pending_msg = false;
 4136 #endif
 4137 
 4138         mbx->first_tlv = mbx->req_virt->first_tlv;
 4139 
 4140         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4141                    "VF[%02x]: Processing mailbox message [type %04x]\n",
 4142                    p_vf->abs_vf_id, mbx->first_tlv.tl.type);
 4143 
 4144         OSAL_IOV_VF_MSG_TYPE(p_hwfn,
 4145                              p_vf->relative_vf_id,
 4146                              mbx->first_tlv.tl.type);
 4147 
 4148         /* Lock the per vf op mutex and note the locker's identity.
 4149          * The unlock will take place in mbx response.
 4150          */
 4151         ecore_iov_lock_vf_pf_channel(p_hwfn, p_vf,
 4152                                      mbx->first_tlv.tl.type);
 4153 
 4154         /* check if tlv type is known */
 4155         if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
 4156             !p_vf->b_malicious) {
 4157                 /* switch on the opcode */
 4158                 switch (mbx->first_tlv.tl.type) {
 4159                 case CHANNEL_TLV_ACQUIRE:
 4160                         ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
 4161                         break;
 4162                 case CHANNEL_TLV_VPORT_START:
 4163                         ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
 4164                         break;
 4165                 case CHANNEL_TLV_VPORT_TEARDOWN:
 4166                         ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
 4167                         break;
 4168                 case CHANNEL_TLV_START_RXQ:
 4169                         ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
 4170                         break;
 4171                 case CHANNEL_TLV_START_TXQ:
 4172                         ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
 4173                         break;
 4174                 case CHANNEL_TLV_STOP_RXQS:
 4175                         ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
 4176                         break;
 4177                 case CHANNEL_TLV_STOP_TXQS:
 4178                         ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
 4179                         break;
 4180                 case CHANNEL_TLV_UPDATE_RXQ:
 4181                         ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
 4182                         break;
 4183                 case CHANNEL_TLV_VPORT_UPDATE:
 4184                         ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
 4185                         break;
 4186                 case CHANNEL_TLV_UCAST_FILTER:
 4187                         ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
 4188                         break;
 4189                 case CHANNEL_TLV_CLOSE:
 4190                         ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
 4191                         break;
 4192                 case CHANNEL_TLV_INT_CLEANUP:
 4193                         ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
 4194                         break;
 4195                 case CHANNEL_TLV_RELEASE:
 4196                         ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
 4197                         break;
 4198                 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
 4199                         ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
 4200                         break;
 4201                 case CHANNEL_TLV_COALESCE_UPDATE:
 4202                         ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
 4203                         break;
 4204                 case CHANNEL_TLV_COALESCE_READ:
 4205                         ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
 4206                         break;
 4207                 }
 4208         } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
 4209                 /* If we've received a message from a VF we consider malicious
 4210                  * we ignore the messasge unless it's one for RELEASE, in which
 4211                  * case we'll let it have the benefit of doubt, allowing the
 4212                  * next loaded driver to start again.
 4213                  */
 4214                 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
 4215                         /* TODO - initiate FLR, remove malicious indication */
 4216                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4217                                    "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
 4218                                    p_vf->abs_vf_id);
 4219                 } else {
 4220                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4221                                    "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
 4222                                    p_vf->abs_vf_id, mbx->first_tlv.tl.type);
 4223                 }
 4224 
 4225                 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
 4226                                        mbx->first_tlv.tl.type,
 4227                                        sizeof(struct pfvf_def_resp_tlv),
 4228                                        PFVF_STATUS_MALICIOUS);
 4229         } else {
 4230                 /* unknown TLV - this may belong to a VF driver from the future
 4231                  * - a version written after this PF driver was written, which
 4232                  * supports features unknown as of yet. Too bad since we don't
 4233                  * support them. Or this may be because someone wrote a crappy
 4234                  * VF driver and is sending garbage over the channel.
 4235                  */
 4236                 DP_NOTICE(p_hwfn, false,
 4237                           "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
 4238                           p_vf->abs_vf_id,
 4239                           mbx->first_tlv.tl.type,
 4240                           mbx->first_tlv.tl.length,
 4241                           mbx->first_tlv.padding,
 4242                           (unsigned long long)mbx->first_tlv.reply_address);
 4243 
 4244                 /* Try replying in case reply address matches the acquisition's
 4245                  * posted address.
 4246                  */
 4247                 if (p_vf->acquire.first_tlv.reply_address &&
 4248                     (mbx->first_tlv.reply_address ==
 4249                      p_vf->acquire.first_tlv.reply_address))
 4250                         ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
 4251                                                mbx->first_tlv.tl.type,
 4252                                                sizeof(struct pfvf_def_resp_tlv),
 4253                                                PFVF_STATUS_NOT_SUPPORTED);
 4254                 else
 4255                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4256                                    "VF[%02x]: Can't respond to TLV - no valid reply address\n",
 4257                                    p_vf->abs_vf_id);
 4258         }
 4259 
 4260         ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
 4261                                        mbx->first_tlv.tl.type);
 4262 
 4263 #ifdef CONFIG_ECORE_SW_CHANNEL
 4264         mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
 4265         mbx->sw_mbx.response_offset = 0;
 4266 #endif
 4267 }
 4268 
 4269 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
 4270                                      u64 *events)
 4271 {
 4272         int i;
 4273 
 4274         OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
 4275 
 4276         ecore_for_each_vf(p_hwfn, i) {
 4277                 struct ecore_vf_info *p_vf;
 4278 
 4279                 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
 4280                 if (p_vf->vf_mbx.b_pending_msg)
 4281                         events[i / 64] |= 1ULL << (i % 64);
 4282         }
 4283 }
 4284 
 4285 static struct ecore_vf_info *
 4286 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
 4287 {
 4288         u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
 4289 
 4290         if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
 4291                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4292                            "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
 4293                            abs_vfid);
 4294                 return OSAL_NULL;
 4295         }
 4296 
 4297         return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
 4298 }
 4299 
 4300 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
 4301                                                  u16 abs_vfid,
 4302                                                  struct regpair *vf_msg)
 4303 {
 4304         struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
 4305                                                                    abs_vfid);
 4306 
 4307         if (!p_vf)
 4308                 return ECORE_SUCCESS;
 4309 
 4310         /* List the physical address of the request so that handler
 4311          * could later on copy the message from it.
 4312          */
 4313         p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) |
 4314                                    vf_msg->lo;
 4315 
 4316         p_vf->vf_mbx.b_pending_msg = true;
 4317 
 4318         return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
 4319 }
 4320 
 4321 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
 4322                                        struct malicious_vf_eqe_data *p_data)
 4323 {
 4324         struct ecore_vf_info *p_vf;
 4325 
 4326         p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
 4327 
 4328         if (!p_vf)
 4329                 return;
 4330 
 4331         if (!p_vf->b_malicious) {
 4332                 DP_NOTICE(p_hwfn, false,
 4333                           "VF [%d] - Malicious behavior [%02x]\n",
 4334                           p_vf->abs_vf_id, p_data->err_id);
 4335 
 4336                 p_vf->b_malicious = true;
 4337         } else {
 4338                 DP_INFO(p_hwfn,
 4339                         "VF [%d] - Malicious behavior [%02x]\n",
 4340                         p_vf->abs_vf_id, p_data->err_id);
 4341         }
 4342 
 4343         OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
 4344 }
 4345 
 4346 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
 4347                                                   u8 opcode,
 4348                                                   __le16 echo,
 4349                                                   union event_ring_data *data,
 4350                                                   u8 OSAL_UNUSED fw_return_code)
 4351 {
 4352         switch (opcode) {
 4353         case COMMON_EVENT_VF_PF_CHANNEL:
 4354                 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
 4355                                             &data->vf_pf_channel.msg_addr);
 4356         case COMMON_EVENT_VF_FLR:
 4357                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4358                            "VF-FLR is still not supported\n");
 4359                 return ECORE_SUCCESS;
 4360         case COMMON_EVENT_MALICIOUS_VF:
 4361                 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
 4362                 return ECORE_SUCCESS;
 4363         default:
 4364                 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
 4365                         opcode);
 4366                 return ECORE_INVAL;
 4367         }
 4368 }
 4369 
 4370 #ifndef LINUX_REMOVE
 4371 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
 4372                                  u16               rel_vf_id)
 4373 {
 4374         return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
 4375                   (1ULL << (rel_vf_id % 64)));
 4376 }
 4377 #endif
 4378 
 4379 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
 4380 {
 4381         struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
 4382         u16 i;
 4383 
 4384         if (!p_iov)
 4385                 goto out;
 4386 
 4387         for (i = rel_vf_id; i < p_iov->total_vfs; i++)
 4388                 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
 4389                         return i;
 4390 
 4391 out:
 4392         return MAX_NUM_VFS_E4;
 4393 }
 4394 
 4395 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
 4396                                            struct ecore_ptt *ptt,
 4397                                            int vfid)
 4398 {
 4399         struct ecore_dmae_params params;
 4400         struct ecore_vf_info *vf_info;
 4401 
 4402         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4403         if (!vf_info)
 4404                 return ECORE_INVAL;
 4405 
 4406         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
 4407         params.flags = ECORE_DMAE_FLAG_VF_SRC |
 4408                        ECORE_DMAE_FLAG_COMPLETION_DST;
 4409         params.src_vfid = vf_info->abs_vf_id;
 4410 
 4411         if (ecore_dmae_host2host(p_hwfn, ptt,
 4412                                  vf_info->vf_mbx.pending_req,
 4413                                  vf_info->vf_mbx.req_phys,
 4414                                  sizeof(union vfpf_tlvs) / 4,
 4415                                  &params)) {
 4416                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4417                            "Failed to copy message from VF 0x%02x\n",
 4418                            vfid);
 4419 
 4420                 return ECORE_IO;
 4421         }
 4422 
 4423         return ECORE_SUCCESS;
 4424 }
 4425 
 4426 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
 4427                                        u8 *mac, int vfid)
 4428 {
 4429         struct ecore_vf_info *vf_info;
 4430         u64 feature;
 4431 
 4432         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4433         if (!vf_info) {
 4434                 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n",
 4435                           vfid);
 4436                 return;
 4437         }
 4438         if (vf_info->b_malicious) {
 4439                 DP_NOTICE(p_hwfn->p_dev, false, "Can't set forced MAC to malicious VF [%d]\n",
 4440                           vfid);
 4441                 return;
 4442         }
 4443 
 4444         feature = 1 << MAC_ADDR_FORCED;
 4445         OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
 4446                     mac, ETH_ALEN);
 4447 
 4448         vf_info->bulletin.p_virt->valid_bitmap |= feature;
 4449         /* Forced MAC will disable MAC_ADDR */
 4450         vf_info->bulletin.p_virt->valid_bitmap &= 
 4451                 ~(1 << VFPF_BULLETIN_MAC_ADDR);
 4452 
 4453         ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
 4454 }
 4455 
 4456 #ifndef LINUX_REMOVE
 4457 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
 4458                                                 u8 *mac, int vfid)
 4459 {
 4460         struct ecore_vf_info *vf_info;
 4461         u64 feature;
 4462 
 4463         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4464         if (!vf_info) {
 4465                 DP_NOTICE(p_hwfn->p_dev, true, "Can not set MAC, invalid vfid [%d]\n",
 4466                           vfid);
 4467                 return ECORE_INVAL;
 4468         }
 4469         if (vf_info->b_malicious) {
 4470                 DP_NOTICE(p_hwfn->p_dev, false, "Can't set MAC to malicious VF [%d]\n",
 4471                           vfid);
 4472                 return ECORE_INVAL;
 4473         }
 4474 
 4475         if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
 4476                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Can not set MAC, Forced MAC is configured\n");
 4477                 return ECORE_INVAL;
 4478         }
 4479 
 4480         feature = 1 << VFPF_BULLETIN_MAC_ADDR;
 4481         OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
 4482                     mac, ETH_ALEN);
 4483 
 4484         vf_info->bulletin.p_virt->valid_bitmap |= feature;
 4485 
 4486         return ECORE_SUCCESS;
 4487 }
 4488 
 4489 enum _ecore_status_t
 4490 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
 4491                                                bool b_untagged_only,
 4492                                                int vfid)
 4493 {
 4494         struct ecore_vf_info *vf_info;
 4495         u64 feature;
 4496 
 4497         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4498         if (!vf_info) {
 4499                 DP_NOTICE(p_hwfn->p_dev, true,
 4500                           "Can not set untagged default, invalid vfid [%d]\n",
 4501                           vfid);
 4502                 return ECORE_INVAL;
 4503         }
 4504         if (vf_info->b_malicious) {
 4505                 DP_NOTICE(p_hwfn->p_dev, false,
 4506                           "Can't set untagged default to malicious VF [%d]\n",
 4507                           vfid);
 4508                 return ECORE_INVAL;
 4509         }
 4510 
 4511         /* Since this is configurable only during vport-start, don't take it
 4512          * if we're past that point.
 4513          */
 4514         if (vf_info->state == VF_ENABLED) {
 4515                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4516                            "Can't support untagged change for vfid[%d] - VF is already active\n",
 4517                            vfid);
 4518                 return ECORE_INVAL;
 4519         }
 4520 
 4521         /* Set configuration; This will later be taken into account during the
 4522          * VF initialization.
 4523          */
 4524         feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
 4525                   (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
 4526         vf_info->bulletin.p_virt->valid_bitmap |= feature;
 4527 
 4528         vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
 4529                                                                           : 0;
 4530 
 4531         return ECORE_SUCCESS;
 4532 }
 4533 
 4534 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
 4535                                   u16 *opaque_fid)
 4536 {
 4537         struct ecore_vf_info *vf_info;
 4538 
 4539         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4540         if (!vf_info)
 4541                 return;
 4542 
 4543         *opaque_fid = vf_info->opaque_fid;
 4544 }
 4545 #endif
 4546 
 4547 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
 4548                                         u16 pvid, int vfid)
 4549 {
 4550         struct ecore_vf_info *vf_info;
 4551         u64 feature;
 4552 
 4553         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4554         if (!vf_info) {
 4555                 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n",
 4556                           vfid);
 4557                 return;
 4558         }
 4559         if (vf_info->b_malicious) {
 4560                 DP_NOTICE(p_hwfn->p_dev, false,
 4561                           "Can't set forced vlan to malicious VF [%d]\n",
 4562                           vfid);
 4563                 return;
 4564         }
 4565 
 4566         feature = 1 << VLAN_ADDR_FORCED;
 4567         vf_info->bulletin.p_virt->pvid = pvid;
 4568         if (pvid)
 4569                 vf_info->bulletin.p_virt->valid_bitmap |= feature;
 4570         else
 4571                 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
 4572 
 4573         ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
 4574 }
 4575 
 4576 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
 4577                                       int vfid, u16 vxlan_port, u16 geneve_port)
 4578 {
 4579         struct ecore_vf_info *vf_info;
 4580 
 4581         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4582         if (!vf_info) {
 4583                 DP_NOTICE(p_hwfn->p_dev, true,
 4584                           "Can not set udp ports, invalid vfid [%d]\n", vfid);
 4585                 return;
 4586         }
 4587 
 4588         if (vf_info->b_malicious) {
 4589                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 4590                            "Can not set udp ports to malicious VF [%d]\n",
 4591                            vfid);
 4592                 return;
 4593         }
 4594 
 4595         vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
 4596         vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
 4597 }
 4598 
 4599 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
 4600 {
 4601         struct ecore_vf_info *p_vf_info;
 4602 
 4603         p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4604         if (!p_vf_info)
 4605                 return false;
 4606 
 4607         return !!p_vf_info->vport_instance;
 4608 }
 4609 
 4610 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
 4611 {
 4612         struct ecore_vf_info *p_vf_info;
 4613 
 4614         p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4615         if (!p_vf_info)
 4616                 return true;
 4617 
 4618         return p_vf_info->state == VF_STOPPED;
 4619 }
 4620 
 4621 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
 4622 {
 4623         struct ecore_vf_info *vf_info;
 4624 
 4625         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4626         if (!vf_info)
 4627                 return false;
 4628 
 4629         return vf_info->spoof_chk;
 4630 }
 4631 
 4632 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
 4633                                             int vfid, bool val)
 4634 {
 4635         struct ecore_vf_info *vf;
 4636         enum _ecore_status_t rc = ECORE_INVAL;
 4637 
 4638         if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
 4639                 DP_NOTICE(p_hwfn, true,
 4640                           "SR-IOV sanity check failed, can't set spoofchk\n");
 4641                 goto out;
 4642         }
 4643 
 4644         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4645         if (!vf)
 4646                 goto out;
 4647 
 4648         if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
 4649                 /* After VF VPORT start PF will configure spoof check */
 4650                 vf->req_spoofchk_val = val;
 4651                 rc = ECORE_SUCCESS;
 4652                 goto out;
 4653         }
 4654 
 4655         rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
 4656 
 4657 out:
 4658         return rc;
 4659 }
 4660 
 4661 #ifndef LINUX_REMOVE
 4662 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
 4663 {
 4664         u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
 4665 
 4666         max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
 4667                                                 : ECORE_MAX_VF_CHAINS_PER_PF;
 4668 
 4669         return max_chains_per_vf;
 4670 }
 4671 
 4672 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
 4673                                           u16 rel_vf_id,
 4674                                           void **pp_req_virt_addr,
 4675                                           u16 *p_req_virt_size)
 4676 {
 4677         struct ecore_vf_info *vf_info =
 4678                 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4679 
 4680         if (!vf_info)
 4681                 return;
 4682 
 4683         if (pp_req_virt_addr)
 4684                 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
 4685 
 4686         if (p_req_virt_size)
 4687                 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
 4688 }
 4689 
 4690 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
 4691                                             u16 rel_vf_id,
 4692                                             void **pp_reply_virt_addr,
 4693                                             u16 *p_reply_virt_size)
 4694 {
 4695         struct ecore_vf_info *vf_info =
 4696                 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4697 
 4698         if (!vf_info)
 4699                 return;
 4700 
 4701         if (pp_reply_virt_addr)
 4702                 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
 4703 
 4704         if (p_reply_virt_size)
 4705                 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
 4706 }
 4707 
 4708 #ifdef CONFIG_ECORE_SW_CHANNEL
 4709 struct ecore_iov_sw_mbx*
 4710 ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
 4711                         u16 rel_vf_id)
 4712 {
 4713         struct ecore_vf_info *vf_info =
 4714                 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4715 
 4716         if (!vf_info)
 4717                 return OSAL_NULL;
 4718 
 4719         return &vf_info->vf_mbx.sw_mbx;
 4720 }
 4721 #endif
 4722 
 4723 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
 4724 {
 4725         return (length >= sizeof(struct vfpf_first_tlv) &&
 4726                 (length <= sizeof(union vfpf_tlvs)));
 4727 }
 4728 
 4729 u32 ecore_iov_pfvf_msg_length(void)
 4730 {
 4731         return sizeof(union pfvf_tlvs);
 4732 }
 4733 #endif
 4734 
 4735 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
 4736                                       u16 rel_vf_id)
 4737 {
 4738         struct ecore_vf_info *p_vf;
 4739 
 4740         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4741         if (!p_vf || !p_vf->bulletin.p_virt)
 4742                 return OSAL_NULL;
 4743 
 4744         if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
 4745                 return OSAL_NULL;
 4746 
 4747         return p_vf->bulletin.p_virt->mac;
 4748 }
 4749 
 4750 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
 4751                                        u16 rel_vf_id)
 4752 {
 4753         struct ecore_vf_info *p_vf;
 4754 
 4755         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4756         if (!p_vf || !p_vf->bulletin.p_virt)
 4757                 return 0;
 4758 
 4759         if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
 4760                 return 0;
 4761 
 4762         return p_vf->bulletin.p_virt->pvid;
 4763 }
 4764 
 4765 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
 4766                                                  struct ecore_ptt *p_ptt,
 4767                                                  int vfid, int val)
 4768 {
 4769         struct ecore_mcp_link_state *p_link;
 4770         struct ecore_vf_info *vf;
 4771         u8 abs_vp_id = 0;
 4772         enum _ecore_status_t rc;
 4773 
 4774         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4775 
 4776         if (!vf)
 4777                 return ECORE_INVAL;
 4778 
 4779         rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
 4780         if (rc != ECORE_SUCCESS)
 4781                 return rc;
 4782 
 4783         p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
 4784 
 4785         return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
 4786                                    p_link->speed);
 4787 }
 4788 
 4789 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
 4790                                                      int vfid, u32 rate)
 4791 {
 4792         struct ecore_vf_info *vf;
 4793         u8 vport_id;
 4794         int i;
 4795 
 4796         for_each_hwfn(p_dev, i) {
 4797                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 4798 
 4799                 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
 4800                         DP_NOTICE(p_hwfn, true,
 4801                                   "SR-IOV sanity check failed, can't set min rate\n");
 4802                         return ECORE_INVAL;
 4803                 }
 4804         }
 4805 
 4806         vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
 4807         vport_id = vf->vport_id;
 4808 
 4809         return ecore_configure_vport_wfq(p_dev, vport_id, rate);
 4810 }
 4811 
 4812 #ifndef LINUX_REMOVE
 4813 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
 4814                                             struct ecore_ptt *p_ptt,
 4815                                             int vfid,
 4816                                             struct ecore_eth_stats *p_stats)
 4817 {
 4818         struct ecore_vf_info *vf;
 4819 
 4820         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4821         if (!vf)
 4822                 return ECORE_INVAL;
 4823 
 4824         if (vf->state != VF_ENABLED)
 4825                 return ECORE_INVAL;
 4826 
 4827         __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
 4828                                 vf->abs_vf_id + 0x10, false);
 4829 
 4830         return ECORE_SUCCESS;
 4831 }
 4832 
 4833 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
 4834                              u16 rel_vf_id)
 4835 {
 4836         struct ecore_vf_info *p_vf;
 4837 
 4838         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4839         if (!p_vf)
 4840                 return 0;
 4841 
 4842         return p_vf->num_rxqs;
 4843 }
 4844 
 4845 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
 4846                                     u16 rel_vf_id)
 4847 {
 4848         struct ecore_vf_info *p_vf;
 4849 
 4850         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4851         if (!p_vf)
 4852                 return 0;
 4853 
 4854         return p_vf->num_active_rxqs;
 4855 }
 4856 
 4857 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
 4858                            u16 rel_vf_id)
 4859 {
 4860         struct ecore_vf_info *p_vf;
 4861 
 4862         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4863         if (!p_vf)
 4864                 return OSAL_NULL;
 4865 
 4866         return p_vf->ctx;
 4867 }
 4868 
 4869 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
 4870                             u16 rel_vf_id)
 4871 {
 4872         struct ecore_vf_info *p_vf;
 4873 
 4874         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4875         if (!p_vf)
 4876                 return 0;
 4877 
 4878         return p_vf->num_sbs;
 4879 }
 4880 
 4881 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
 4882                                       u16 rel_vf_id)
 4883 {
 4884         struct ecore_vf_info *p_vf;
 4885 
 4886         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4887         if (!p_vf)
 4888                 return false;
 4889 
 4890         return (p_vf->state == VF_FREE);
 4891 }
 4892 
 4893 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
 4894                                               u16 rel_vf_id)
 4895 {
 4896         struct ecore_vf_info *p_vf;
 4897 
 4898         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4899         if (!p_vf)
 4900                 return false;
 4901 
 4902         return (p_vf->state == VF_ACQUIRED);
 4903 }
 4904 
 4905 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
 4906                                  u16 rel_vf_id)
 4907 {
 4908         struct ecore_vf_info *p_vf;
 4909 
 4910         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4911         if (!p_vf)
 4912                 return false;
 4913 
 4914         return (p_vf->state == VF_ENABLED);
 4915 }
 4916 
 4917 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
 4918                              u16 rel_vf_id)
 4919 {
 4920         struct ecore_vf_info *p_vf;
 4921 
 4922         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
 4923         if (!p_vf)
 4924                 return false;
 4925 
 4926         return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
 4927 }
 4928 #endif
 4929 
 4930 enum _ecore_status_t
 4931 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
 4932 {
 4933         struct ecore_wfq_data *vf_vp_wfq;
 4934         struct ecore_vf_info *vf_info;
 4935 
 4936         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4937         if (!vf_info)
 4938                 return 0;
 4939 
 4940         vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
 4941 
 4942         if (vf_vp_wfq->configured)
 4943                 return vf_vp_wfq->min_speed;
 4944         else
 4945                 return 0;
 4946 }
 4947 
 4948 #ifdef CONFIG_ECORE_SW_CHANNEL
 4949 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
 4950                                  bool b_is_hw)
 4951 {
 4952         struct ecore_vf_info *vf_info;
 4953 
 4954         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 4955         if (!vf_info)
 4956                 return;
 4957 
 4958         vf_info->b_hw_channel = b_is_hw;
 4959 }
 4960 #endif

Cache object: 2ed9ce7004519799d715be5dbf309f84


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.