The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_dev_api.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2017-2018 Cavium, Inc. 
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  *
   27  * $FreeBSD$
   28  *
   29  */
   30 
   31 #ifndef __ECORE_DEV_API_H__
   32 #define __ECORE_DEV_API_H__
   33 
   34 #include "ecore_status.h"
   35 #include "ecore_chain.h"
   36 #include "ecore_int_api.h"
   37 
   38 #define ECORE_DEFAULT_ILT_PAGE_SIZE 4
   39 
   40 struct ecore_wake_info {
   41         u32 wk_info;
   42         u32 wk_details;
   43         u32 wk_pkt_len;
   44         u8  wk_buffer[256];
   45 };
   46 
   47 /**
   48  * @brief ecore_init_dp - initialize the debug level
   49  *
   50  * @param p_dev
   51  * @param dp_module
   52  * @param dp_level
   53  * @param dp_ctx
   54  */
   55 void ecore_init_dp(struct ecore_dev *p_dev,
   56                    u32 dp_module,
   57                    u8 dp_level,
   58                    void *dp_ctx);
   59 
   60 /**
   61  * @brief ecore_init_struct - initialize the device structure to
   62  *        its defaults
   63  *
   64  * @param p_dev
   65  */
   66 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
   67 
   68 /**
   69  * @brief ecore_resc_free -
   70  *
   71  * @param p_dev
   72  */
   73 void ecore_resc_free(struct ecore_dev *p_dev);
   74 
   75 /**
   76  * @brief ecore_resc_alloc -
   77  *
   78  * @param p_dev
   79  *
   80  * @return enum _ecore_status_t
   81  */
   82 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
   83 
   84 /**
   85  * @brief ecore_resc_setup -
   86  *
   87  * @param p_dev
   88  */
   89 void ecore_resc_setup(struct ecore_dev *p_dev);
   90 
   91 enum ecore_mfw_timeout_fallback {
   92         ECORE_TO_FALLBACK_TO_NONE,
   93         ECORE_TO_FALLBACK_TO_DEFAULT,
   94         ECORE_TO_FALLBACK_FAIL_LOAD,
   95 };
   96 
   97 enum ecore_override_force_load {
   98         ECORE_OVERRIDE_FORCE_LOAD_NONE,
   99         ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
  100         ECORE_OVERRIDE_FORCE_LOAD_NEVER,
  101 };
  102 
  103 struct ecore_drv_load_params {
  104         /* Indicates whether the driver is running over a crash kernel.
  105          * As part of the load request, this will be used for providing the
  106          * driver role to the MFW.
  107          * In case of a crash kernel over PDA - this should be set to false.
  108          */
  109         bool is_crash_kernel;
  110 
  111         /* The timeout value that the MFW should use when locking the engine for
  112          * the driver load process.
  113          * A value of '' means the default value, and '255' means no timeout.
  114          */
  115         u8 mfw_timeout_val;
  116 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT  0
  117 #define ECORE_LOAD_REQ_LOCK_TO_NONE     255
  118 
  119         /* Action to take in case the MFW doesn't support timeout values other
  120          * then default and none.
  121          */
  122         enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
  123 
  124         /* Avoid engine reset when first PF loads on it */
  125         bool avoid_eng_reset;
  126 
  127         /* Allow overriding the default force load behavior */
  128         enum ecore_override_force_load override_force_load;
  129 };
  130 
  131 struct ecore_hw_init_params {
  132         /* Tunneling parameters */
  133         struct ecore_tunnel_info *p_tunn;
  134 
  135         bool b_hw_start;
  136 
  137         /* Interrupt mode [msix, inta, etc.] to use */
  138         enum ecore_int_mode int_mode;
  139 
  140         /* NPAR tx switching to be used for vports configured for tx-switching */
  141         bool allow_npar_tx_switch;
  142 
  143         /* PCI relax ordering to be configured by MFW or ecore client */
  144         enum ecore_pci_rlx_odr pci_rlx_odr_mode;
  145 
  146         /* Binary fw data pointer in binary fw file */
  147         const u8 *bin_fw_data;
  148 
  149         /* Driver load parameters */
  150         struct ecore_drv_load_params *p_drv_load_params;
  151 
  152         /* Avoid engine affinity for RoCE/storage in case of CMT mode */
  153         bool avoid_eng_affin;
  154 };
  155 
  156 /**
  157  * @brief ecore_hw_init -
  158  *
  159  * @param p_dev
  160  * @param p_params
  161  *
  162  * @return enum _ecore_status_t
  163  */
  164 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
  165                                    struct ecore_hw_init_params *p_params);
  166 
  167 /**
  168  * @brief ecore_hw_timers_stop_all -
  169  *
  170  * @param p_dev
  171  *
  172  * @return void
  173  */
  174 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
  175 
  176 /**
  177  * @brief ecore_hw_stop -
  178  *
  179  * @param p_dev
  180  *
  181  * @return enum _ecore_status_t
  182  */
  183 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
  184 
  185 /**
  186  * @brief ecore_hw_stop_fastpath -should be called incase
  187  *        slowpath is still required for the device,
  188  *        but fastpath is not.
  189  *
  190  * @param p_dev
  191  *
  192  * @return enum _ecore_status_t
  193  */
  194 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
  195 
  196 #ifndef LINUX_REMOVE
  197 /**
  198  * @brief ecore_hw_hibernate_prepare -should be called when
  199  *        the system is going into the hibernate state
  200  *
  201  * @param p_dev
  202  *
  203  */
  204 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev);
  205 
  206 /**
  207  * @brief ecore_hw_hibernate_resume -should be called when the system is
  208           resuming from D3 power state and before calling ecore_hw_init.
  209  *
  210  * @param p_hwfn
  211  *
  212  */
  213 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev);
  214 
  215 #endif
  216 
  217 /**
  218  * @brief ecore_hw_start_fastpath -restart fastpath traffic,
  219  *        only if hw_stop_fastpath was called
  220 
  221  * @param p_hwfn
  222  *
  223  * @return enum _ecore_status_t
  224  */
  225 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
  226 
  227 enum ecore_hw_prepare_result {
  228         ECORE_HW_PREPARE_SUCCESS,
  229 
  230         /* FAILED results indicate probe has failed & cleaned up */
  231         ECORE_HW_PREPARE_FAILED_ENG2,
  232         ECORE_HW_PREPARE_FAILED_ME,
  233         ECORE_HW_PREPARE_FAILED_MEM,
  234         ECORE_HW_PREPARE_FAILED_DEV,
  235         ECORE_HW_PREPARE_FAILED_NVM,
  236 
  237         /* BAD results indicate probe is passed even though some wrongness
  238          * has occurred; Trying to actually use [I.e., hw_init()] might have
  239          * dire reprecautions.
  240          */
  241         ECORE_HW_PREPARE_BAD_IOV,
  242         ECORE_HW_PREPARE_BAD_MCP,
  243         ECORE_HW_PREPARE_BAD_IGU,
  244 };
  245 
  246 struct ecore_hw_prepare_params {
  247         /* Personality to initialize */
  248         int personality;
  249 
  250         /* Force the driver's default resource allocation */
  251         bool drv_resc_alloc;
  252 
  253         /* Check the reg_fifo after any register access */
  254         bool chk_reg_fifo;
  255 
  256         /* Request the MFW to initiate PF FLR */
  257         bool initiate_pf_flr;
  258 
  259         /* The OS Epoch time in seconds */
  260         u32 epoch;
  261 
  262         /* Allow the MFW to collect a crash dump */
  263         bool allow_mdump;
  264 
  265         /* Allow prepare to pass even if some initializations are failing.
  266          * If set, the `p_prepare_res' field would be set with the return,
  267          * and might allow probe to pass even if there are certain issues.
  268          */
  269         bool b_relaxed_probe;
  270         enum ecore_hw_prepare_result p_relaxed_res;
  271 };
  272 
  273 /**
  274  * @brief ecore_hw_prepare -
  275  *
  276  * @param p_dev
  277  * @param p_params
  278  *
  279  * @return enum _ecore_status_t
  280  */
  281 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
  282                                       struct ecore_hw_prepare_params *p_params);
  283 
  284 /**
  285  * @brief ecore_hw_remove -
  286  *
  287  * @param p_dev
  288  */
  289 void ecore_hw_remove(struct ecore_dev *p_dev);
  290 
  291 /**
  292 * @brief ecore_set_nwuf_reg -
  293 *
  294 * @param p_dev
  295 * @param reg_idx - Index of the pattern register
  296 * @param pattern_size - size of pattern
  297 * @param crc - CRC value of patter & mask
  298 *
  299 * @return enum _ecore_status_t
  300 */
  301 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev,
  302                                         u32 reg_idx, u32 pattern_size, u32 crc);
  303 
  304 /**
  305 * @brief ecore_get_wake_info - get magic packet buffer
  306 *
  307 * @param p_hwfn
  308 * @param p_ppt
  309 * @param wake_info - pointer to ecore_wake_info buffer
  310 *
  311 * @return enum _ecore_status_t
  312 */
  313 enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
  314                                          struct ecore_ptt *p_ptt,
  315                                          struct ecore_wake_info *wake_info);
  316 
  317 /**
  318 * @brief ecore_wol_buffer_clear - Clear magic package buffer
  319 *
  320 * @param p_hwfn
  321 * @param p_ptt
  322 *
  323 * @return void
  324 */
  325 void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
  326                             struct ecore_ptt *p_ptt);
  327 
  328 /**
  329  * @brief ecore_ptt_acquire - Allocate a PTT window
  330  *
  331  * Should be called at the entry point to the driver (at the beginning of an
  332  * exported function)
  333  *
  334  * @param p_hwfn
  335  *
  336  * @return struct ecore_ptt
  337  */
  338 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
  339 
  340 /**
  341  * @brief ecore_ptt_release - Release PTT Window
  342  *
  343  * Should be called at the end of a flow - at the end of the function that
  344  * acquired the PTT.
  345  *
  346  *
  347  * @param p_hwfn
  348  * @param p_ptt
  349  */
  350 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
  351                        struct ecore_ptt *p_ptt);
  352 
  353 /**
  354  * @brief ecore_get_dev_name - get device name, e.g., "BB B0"
  355  *
  356  * @param p_hwfn
  357  * @param name - this is where the name will be written to
  358  * @param max_chars - maximum chars that can be written to name including '\0'
  359  */
  360 void ecore_get_dev_name(struct ecore_dev *p_dev,
  361                         u8 *name,
  362                         u8 max_chars);
  363 
  364 #ifndef __EXTRACT__LINUX__IF__
  365 struct ecore_eth_stats_common {
  366         u64 no_buff_discards;
  367         u64 packet_too_big_discard;
  368         u64 ttl0_discard;
  369         u64 rx_ucast_bytes;
  370         u64 rx_mcast_bytes;
  371         u64 rx_bcast_bytes;
  372         u64 rx_ucast_pkts;
  373         u64 rx_mcast_pkts;
  374         u64 rx_bcast_pkts;
  375         u64 mftag_filter_discards;
  376         u64 mac_filter_discards;
  377         u64 tx_ucast_bytes;
  378         u64 tx_mcast_bytes;
  379         u64 tx_bcast_bytes;
  380         u64 tx_ucast_pkts;
  381         u64 tx_mcast_pkts;
  382         u64 tx_bcast_pkts;
  383         u64 tx_err_drop_pkts;
  384         u64 tpa_coalesced_pkts;
  385         u64 tpa_coalesced_events;
  386         u64 tpa_aborts_num;
  387         u64 tpa_not_coalesced_pkts;
  388         u64 tpa_coalesced_bytes;
  389 
  390         /* port */
  391         u64 rx_64_byte_packets;
  392         u64 rx_65_to_127_byte_packets;
  393         u64 rx_128_to_255_byte_packets;
  394         u64 rx_256_to_511_byte_packets;
  395         u64 rx_512_to_1023_byte_packets;
  396         u64 rx_1024_to_1518_byte_packets;
  397         u64 rx_crc_errors;
  398         u64 rx_mac_crtl_frames;
  399         u64 rx_pause_frames;
  400         u64 rx_pfc_frames;
  401         u64 rx_align_errors;
  402         u64 rx_carrier_errors;
  403         u64 rx_oversize_packets;
  404         u64 rx_jabbers;
  405         u64 rx_undersize_packets;
  406         u64 rx_fragments;
  407         u64 tx_64_byte_packets;
  408         u64 tx_65_to_127_byte_packets;
  409         u64 tx_128_to_255_byte_packets;
  410         u64 tx_256_to_511_byte_packets;
  411         u64 tx_512_to_1023_byte_packets;
  412         u64 tx_1024_to_1518_byte_packets;
  413         u64 tx_pause_frames;
  414         u64 tx_pfc_frames;
  415         u64 brb_truncates;
  416         u64 brb_discards;
  417         u64 rx_mac_bytes;
  418         u64 rx_mac_uc_packets;
  419         u64 rx_mac_mc_packets;
  420         u64 rx_mac_bc_packets;
  421         u64 rx_mac_frames_ok;
  422         u64 tx_mac_bytes;
  423         u64 tx_mac_uc_packets;
  424         u64 tx_mac_mc_packets;
  425         u64 tx_mac_bc_packets;
  426         u64 tx_mac_ctrl_frames;
  427         u64 link_change_count;
  428 };
  429 
  430 struct ecore_eth_stats_bb {
  431         u64 rx_1519_to_1522_byte_packets;
  432         u64 rx_1519_to_2047_byte_packets;
  433         u64 rx_2048_to_4095_byte_packets;
  434         u64 rx_4096_to_9216_byte_packets;
  435         u64 rx_9217_to_16383_byte_packets;
  436         u64 tx_1519_to_2047_byte_packets;
  437         u64 tx_2048_to_4095_byte_packets;
  438         u64 tx_4096_to_9216_byte_packets;
  439         u64 tx_9217_to_16383_byte_packets;
  440         u64 tx_lpi_entry_count;
  441         u64 tx_total_collisions;
  442 };
  443 
  444 struct ecore_eth_stats_ah {
  445         u64 rx_1519_to_max_byte_packets;
  446         u64 tx_1519_to_max_byte_packets;
  447 };
  448 
  449 struct ecore_eth_stats {
  450         struct ecore_eth_stats_common common;
  451         union {
  452                 struct ecore_eth_stats_bb bb;
  453                 struct ecore_eth_stats_ah ah;
  454         };
  455 };
  456 #endif
  457 
  458 enum ecore_dmae_address_type_t {
  459         ECORE_DMAE_ADDRESS_HOST_VIRT,
  460         ECORE_DMAE_ADDRESS_HOST_PHYS,
  461         ECORE_DMAE_ADDRESS_GRC
  462 };
  463 
  464 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
  465  * source is a block of length DMAE_MAX_RW_SIZE and the
  466  * destination is larger, the source block will be duplicated as
  467  * many times as required to fill the destination block. This is
  468  * used mostly to write a zeroed buffer to destination address
  469  * using DMA
  470  */
  471 #define ECORE_DMAE_FLAG_RW_REPL_SRC     0x00000001
  472 #define ECORE_DMAE_FLAG_VF_SRC          0x00000002
  473 #define ECORE_DMAE_FLAG_VF_DST          0x00000004
  474 #define ECORE_DMAE_FLAG_COMPLETION_DST  0x00000008
  475 #define ECORE_DMAE_FLAG_PORT            0x00000010
  476 #define ECORE_DMAE_FLAG_PF_SRC          0x00000020
  477 #define ECORE_DMAE_FLAG_PF_DST          0x00000040
  478 
  479 struct ecore_dmae_params {
  480         u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
  481         u8 src_vfid;
  482         u8 dst_vfid;
  483         u8 port_id;
  484         u8 src_pfid;
  485         u8 dst_pfid;
  486 };
  487 
  488 /**
  489  * @brief ecore_dmae_host2grc - copy data from source addr to
  490  * dmae registers using the given ptt
  491  *
  492  * @param p_hwfn
  493  * @param p_ptt
  494  * @param source_addr
  495  * @param grc_addr (dmae_data_offset)
  496  * @param size_in_dwords
  497  * @param p_params (default parameters will be used in case of OSAL_NULL)
  498  *
  499  * @return enum _ecore_status_t
  500  */
  501 enum _ecore_status_t
  502 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
  503                     struct ecore_ptt *p_ptt,
  504                     u64 source_addr,
  505                     u32 grc_addr,
  506                     u32 size_in_dwords,
  507                     struct ecore_dmae_params *p_params);
  508 
  509 /**
  510  * @brief ecore_dmae_grc2host - Read data from dmae data offset
  511  * to source address using the given ptt
  512  *
  513  * @param p_ptt
  514  * @param grc_addr (dmae_data_offset)
  515  * @param dest_addr
  516  * @param size_in_dwords
  517  * @param p_params (default parameters will be used in case of OSAL_NULL)
  518  *
  519  * @return enum _ecore_status_t
  520  */
  521 enum _ecore_status_t
  522 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
  523                     struct ecore_ptt *p_ptt,
  524                     u32 grc_addr,
  525                     dma_addr_t dest_addr,
  526                     u32 size_in_dwords,
  527                     struct ecore_dmae_params *p_params);
  528 
  529 /**
  530  * @brief ecore_dmae_host2host - copy data from to source address
  531  * to a destination adress (for SRIOV) using the given ptt
  532  *
  533  * @param p_hwfn
  534  * @param p_ptt
  535  * @param source_addr
  536  * @param dest_addr
  537  * @param size_in_dwords
  538  * @param p_params (default parameters will be used in case of OSAL_NULL)
  539  *
  540  * @return enum _ecore_status_t
  541  */
  542 enum _ecore_status_t
  543 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
  544                      struct ecore_ptt *p_ptt,
  545                      dma_addr_t source_addr,
  546                      dma_addr_t dest_addr,
  547                      u32 size_in_dwords,
  548                      struct ecore_dmae_params *p_params);
  549 
  550 /**
  551  * @brief ecore_chain_alloc - Allocate and initialize a chain
  552  *
  553  * @param p_hwfn
  554  * @param intended_use
  555  * @param mode
  556  * @param num_elems
  557  * @param elem_size
  558  * @param p_chain
  559  *
  560  * @return enum _ecore_status_t
  561  */
  562 enum _ecore_status_t
  563 ecore_chain_alloc(struct ecore_dev *p_dev,
  564                   enum ecore_chain_use_mode intended_use,
  565                   enum ecore_chain_mode mode,
  566                   enum ecore_chain_cnt_type cnt_type,
  567                   u32 num_elems,
  568                   osal_size_t elem_size,
  569                   struct ecore_chain *p_chain,
  570                   struct ecore_chain_ext_pbl *ext_pbl);
  571 
  572 /**
  573  * @brief ecore_chain_free - Free chain DMA memory
  574  *
  575  * @param p_hwfn
  576  * @param p_chain
  577  */
  578 void ecore_chain_free(struct ecore_dev *p_dev,
  579                       struct ecore_chain *p_chain);
  580 
  581 /**
  582  * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
  583  *
  584  *  @param p_hwfn
  585  *  @param src_id - relative to p_hwfn
  586  *  @param dst_id - absolute per engine
  587  *
  588  *  @return enum _ecore_status_t
  589  */
  590 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
  591                                        u16 src_id,
  592                                        u16 *dst_id);
  593 
  594 /**
  595  * @@brief ecore_fw_vport - Get absolute vport ID
  596  *
  597  *  @param p_hwfn
  598  *  @param src_id - relative to p_hwfn
  599  *  @param dst_id - absolute per engine
  600  *
  601  *  @return enum _ecore_status_t
  602  */
  603 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
  604                                     u8 src_id,
  605                                     u8 *dst_id);
  606 
  607 /**
  608  * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
  609  *
  610  *  @param p_hwfn
  611  *  @param src_id - relative to p_hwfn
  612  *  @param dst_id - absolute per engine
  613  *
  614  *  @return enum _ecore_status_t
  615  */
  616 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
  617                                       u8 src_id,
  618                                       u8 *dst_id);
  619 
  620 /**
  621  * @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter
  622  *      banks that are allocated to the PF.
  623  *
  624  * @param p_dev
  625  *
  626  * @return u8 - Number of LLH filter banks
  627  */
  628 u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev);
  629 
  630 enum ecore_eng {
  631         ECORE_ENG0,
  632         ECORE_ENG1,
  633         ECORE_BOTH_ENG,
  634 };
  635 
  636 /**
  637  * @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity
  638  *
  639  * @param p_dev
  640  *
  641  * @return enum ecore_eng - L2 affintiy hint
  642  */
  643 enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev);
  644 
  645 /**
  646  * @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given
  647  *      LLH filter bank.
  648  *
  649  * @param p_dev
  650  * @param ppfid - relative within the allocated ppfids ('' is the default one).
  651  * @param eng
  652  *
  653  * @return enum _ecore_status_t
  654  */
  655 enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
  656                                                   u8 ppfid, enum ecore_eng eng);
  657 
  658 /**
  659  * @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity
  660  *
  661  * @param p_dev
  662  * @param eng
  663  *
  664  * @return enum _ecore_status_t
  665  */
  666 enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
  667                                                  enum ecore_eng eng);
  668 
  669 /**
  670  * @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter
  671  *      bank.
  672  *
  673  * @param p_dev
  674  * @param ppfid - relative within the allocated ppfids ('' is the default one).
  675  * @param mac_addr - MAC to add
  676  *
  677  * @return enum _ecore_status_t
  678  */
  679 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
  680                                               u8 mac_addr[ETH_ALEN]);
  681 
  682 /**
  683  * @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given
  684  *      filter bank.
  685  *
  686  * @param p_dev
  687  * @param ppfid - relative within the allocated ppfids ('' is the default one).
  688  * @param mac_addr - MAC to remove
  689  */
  690 void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
  691                                  u8 mac_addr[ETH_ALEN]);
  692 
  693 enum ecore_llh_prot_filter_type_t {
  694         ECORE_LLH_FILTER_ETHERTYPE,
  695         ECORE_LLH_FILTER_TCP_SRC_PORT,
  696         ECORE_LLH_FILTER_TCP_DEST_PORT,
  697         ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
  698         ECORE_LLH_FILTER_UDP_SRC_PORT,
  699         ECORE_LLH_FILTER_UDP_DEST_PORT,
  700         ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
  701 };
  702 
  703 /**
  704  * @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the
  705  *      given filter bank.
  706  *
  707  * @param p_dev
  708  * @param ppfid - relative within the allocated ppfids ('' is the default one).
  709  * @param type - type of filters and comparing
  710  * @param source_port_or_eth_type - source port or ethertype to add
  711  * @param dest_port - destination port to add
  712  *
  713  * @return enum _ecore_status_t
  714  */
  715 enum _ecore_status_t
  716 ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
  717                               enum ecore_llh_prot_filter_type_t type,
  718                               u16 source_port_or_eth_type, u16 dest_port);
  719 
  720 /**
  721  * @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from
  722  *      the given filter bank.
  723  *
  724  * @param p_dev
  725  * @param ppfid - relative within the allocated ppfids ('' is the default one).
  726  * @param type - type of filters and comparing
  727  * @param source_port_or_eth_type - source port or ethertype to add
  728  * @param dest_port - destination port to add
  729  */
  730 void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
  731                                       enum ecore_llh_prot_filter_type_t type,
  732                                       u16 source_port_or_eth_type,
  733                                       u16 dest_port);
  734 
  735 /**
  736  * @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given
  737  *      filter bank.
  738  *
  739  * @param p_dev
  740  * @param ppfid - relative within the allocated ppfids ('' is the default one).
  741  */
  742 void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid);
  743 
  744 /**
  745  * @brief ecore_llh_clear_all_filters - Remove all LLH filters
  746  *
  747  * @param p_dev
  748  */
  749 void ecore_llh_clear_all_filters(struct ecore_dev *p_dev);
  750 
  751 /**
  752  * @brief ecore_llh_set_function_as_default - set function as default per port
  753  *
  754  * @param p_hwfn
  755  * @param p_ptt
  756  */
  757 enum _ecore_status_t
  758 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
  759                                   struct ecore_ptt *p_ptt);
  760 
  761 /**
  762  *@brief Cleanup of previous driver remains prior to load
  763  *
  764  * @param p_hwfn
  765  * @param p_ptt
  766  * @param id - For PF, engine-relative. For VF, PF-relative.
  767  * @param is_vf - true iff cleanup is made for a VF.
  768  *
  769  * @return enum _ecore_status_t
  770  */
  771 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn      *p_hwfn,
  772                                          struct ecore_ptt       *p_ptt,
  773                                          u16                    id,
  774                                          bool                   is_vf);
  775 
  776 /**
  777  * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
  778  *
  779  * @param p_hwfn
  780  * @param p_coal - store coalesce value read from the hardware.
  781  * @param p_handle
  782  *
  783  * @return enum _ecore_status_t
  784  **/
  785 enum _ecore_status_t
  786 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
  787                          void *handle);
  788 
  789 /**
  790  * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
  791  *    Tx queue. The fact that we can configure coalescing to up to 511, but on
  792  *    varying accuracy [the bigger the value the less accurate] up to a mistake
  793  *    of 3usec for the highest values.
  794  *    While the API allows setting coalescing per-qid, all queues sharing a SB
  795  *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
  796  *    otherwise configuration would break.
  797  *
  798  * @param p_hwfn
  799  * @param rx_coal - Rx Coalesce value in micro seconds.
  800  * @param tx_coal - TX Coalesce value in micro seconds.
  801  * @param p_handle
  802  *
  803  * @return enum _ecore_status_t
  804  **/
  805 enum _ecore_status_t
  806 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
  807                          u16 tx_coal, void *p_handle);
  808 
  809 /**
  810  * @brief - Recalculate feature distributions based on HW resources and
  811  * user inputs. Currently this affects RDMA_CNQ, PF_L2_QUE and VF_L2_QUE.
  812  * As a result, this must not be called while RDMA is active or while VFs
  813  * are enabled.
  814  *
  815  * @param p_hwfn
  816  */
  817 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn);
  818 
  819 /**
  820  * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
  821  *
  822  * @param p_hwfn
  823  * @param p_ptt
  824  * @param b_enable - true/false
  825  *
  826  * @return enum _ecore_status_t
  827  */
  828 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
  829                                                   struct ecore_ptt *p_ptt,
  830                                                   bool b_enable);
  831 
  832 #ifndef __EXTRACT__LINUX__IF__
  833 enum ecore_db_rec_width {
  834         DB_REC_WIDTH_32B,
  835         DB_REC_WIDTH_64B,
  836 };
  837 
  838 enum ecore_db_rec_space {
  839         DB_REC_KERNEL,
  840         DB_REC_USER,
  841 };
  842 #endif
  843 
  844 /**
  845  * @brief db_recovery_add - add doorbell information to the doorbell
  846  * recovery mechanism.
  847  *
  848  * @param p_dev
  849  * @param db_addr - doorbell address
  850  * @param db_data - address of where db_data is stored
  851  * @param db_width - doorbell is 32b pr 64b
  852  * @param db_space - doorbell recovery addresses are user or kernel space
  853  */
  854 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
  855                                            void OSAL_IOMEM *db_addr,
  856                                            void *db_data,
  857                                            enum ecore_db_rec_width db_width,
  858                                            enum ecore_db_rec_space db_space);
  859 
  860 /**
  861  * @brief db_recovery_del - remove doorbell information from the doorbell
  862  * recovery mechanism. db_data serves as key (db_addr is not unique).
  863  *
  864  * @param cdev
  865  * @param db_addr - doorbell address
  866  * @param db_data - address where db_data is stored. Serves as key for the
  867  *                  entry to delete.
  868  */
  869 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
  870                                            void OSAL_IOMEM *db_addr,
  871                                            void *db_data);
  872 
  873 #ifndef __EXTRACT__LINUX__THROW__
  874 static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
  875 {
  876         return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
  877 }
  878 #endif
  879 
  880 /**
  881  * @brief ecore_set_dev_access_enable - Enable or disable access to the device
  882  *
  883  * @param p_hwfn
  884  * @param b_enable - true/false
  885  */
  886 void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable);
  887 
  888 /**
  889  * @brief ecore_set_ilt_page_size - Set ILT page size
  890  *
  891  * @param p_dev
  892  * @param ilt_size
  893  *
  894  * @return enum _ecore_status_t
  895  */
  896 void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_size);
  897 
  898 #endif

Cache object: 0929255a7ed67e12b12115908d675f89


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.