The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bxe/ecore_sp.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  *
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
   20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   26  * THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #ifndef ECORE_SP_H
   33 #define ECORE_SP_H
   34 
   35 
   36 #include <sys/types.h>
   37 #include <sys/endian.h>
   38 #include <sys/param.h>
   39 #include <sys/lock.h>
   40 #include <sys/mutex.h>
   41 #include <sys/malloc.h>
   42 #include <sys/kernel.h>
   43 #include <machine/bus.h>
   44 #include <net/ethernet.h>
   45 
   46 #if _BYTE_ORDER == _LITTLE_ENDIAN
   47 #ifndef LITTLE_ENDIAN
   48 #define LITTLE_ENDIAN
   49 #endif
   50 #ifndef __LITTLE_ENDIAN
   51 #define __LITTLE_ENDIAN
   52 #endif
   53 #undef BIG_ENDIAN
   54 #undef __BIG_ENDIAN
   55 #else /* _BIG_ENDIAN */
   56 #ifndef BIG_ENDIAN
   57 #define BIG_ENDIAN
   58 #endif
   59 #ifndef __BIG_ENDIAN
   60 #define __BIG_ENDIAN
   61 #endif
   62 #undef LITTLE_ENDIAN
   63 #undef __LITTLE_ENDIAN
   64 #endif
   65 
   66 #include "ecore_mfw_req.h"
   67 #include "ecore_fw_defs.h"
   68 #include "ecore_hsi.h"
   69 #include "ecore_reg.h"
   70 
   71 struct bxe_softc;
   72 typedef bus_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */
   73 typedef volatile int ecore_atomic_t;
   74 
   75 #ifndef __bool_true_false_are_defined
   76 #ifndef __cplusplus
   77 #define bool _Bool
   78 #endif /* !__cplusplus */
   79 #endif /* !__bool_true_false_are_defined$ */
   80 
   81 #define ETH_ALEN ETHER_ADDR_LEN /* 6 */
   82 
   83 #define ECORE_SWCID_SHIFT   17
   84 #define ECORE_SWCID_MASK    ((0x1 << ECORE_SWCID_SHIFT) - 1)
   85 
   86 #define ECORE_MC_HASH_SIZE 8
   87 #define ECORE_MC_HASH_OFFSET(sc, i)                                          \
   88     (BAR_TSTRORM_INTMEM +                                                    \
   89      TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4)
   90 
   91 #define ECORE_MAX_MULTICAST   64
   92 #define ECORE_MAX_EMUL_MULTI  1
   93 
   94 #define IRO sc->iro_array
   95 
   96 typedef struct mtx ECORE_MUTEX;
   97 #define ECORE_MUTEX_INIT(_mutex) \
   98     mtx_init(_mutex, "ecore_lock", "ECORE Lock", MTX_DEF)
   99 #define ECORE_MUTEX_LOCK(_mutex)   mtx_lock(_mutex)
  100 #define ECORE_MUTEX_UNLOCK(_mutex) mtx_unlock(_mutex)
  101 
  102 typedef struct mtx ECORE_MUTEX_SPIN;
  103 #define ECORE_SPIN_LOCK_INIT(_spin, _sc) \
  104     mtx_init(_spin, "ecore_lock", "ECORE Lock", MTX_DEF)
  105 #define ECORE_SPIN_LOCK_BH(_spin)   mtx_lock(_spin) /* bh = bottom-half */
  106 #define ECORE_SPIN_UNLOCK_BH(_spin) mtx_unlock(_spin) /* bh = bottom-half */
  107 
  108 #define ECORE_SMP_MB_AFTER_CLEAR_BIT()  mb()
  109 #define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb()
  110 #define ECORE_SMP_MB()                  mb()
  111 #define ECORE_SMP_RMB()                 rmb()
  112 #define ECORE_SMP_WMB()                 wmb()
  113 #define ECORE_MMIOWB()                  wmb()
  114 
  115 #define ECORE_SET_BIT_NA(bit, var)   bit_set(var, bit) /* non-atomic */
  116 #define ECORE_CLEAR_BIT_NA(bit, var) bit_clear(var, bit) /* non-atomic */
  117 #define ECORE_TEST_BIT(bit, var)     bxe_test_bit(bit, var)
  118 #define ECORE_SET_BIT(bit, var)      bxe_set_bit(bit, var)
  119 #define ECORE_CLEAR_BIT(bit, var)    bxe_clear_bit(bit, var)
  120 #define ECORE_TEST_AND_CLEAR_BIT(bit, var) bxe_test_and_clear_bit(bit, var)
  121 
  122 #define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a)
  123 #define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v)
  124 #define ECORE_ATOMIC_CMPXCHG(a, o, n) bxe_cmpxchg((volatile int *)a, o, n)
  125 
  126 #define ECORE_RET_PENDING(pending_bit, pending) \
  127     (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS)
  128 
  129 #define ECORE_SET_FLAG(value, mask, flag)      \
  130     do {                                       \
  131         (value) &= ~(mask);                    \
  132         (value) |= ((flag) << (mask##_SHIFT)); \
  133     } while (0)
  134 
  135 #define ECORE_GET_FLAG(value, mask) \
  136     (((value) &= (mask)) >> (mask##_SHIFT))
  137 
  138 #define ECORE_MIGHT_SLEEP()
  139 
  140 #define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id)
  141 
  142 #define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s)
  143 #define ECORE_MEMCPY(_a, _b, _s) memcpy(_a, _b, _s)
  144 #define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s)
  145 
  146 #define ECORE_CPU_TO_LE16(x) htole16(x)
  147 #define ECORE_CPU_TO_LE32(x) htole32(x)
  148 
  149 #define ECORE_WAIT(_s, _t) DELAY(1000)
  150 #define ECORE_MSLEEP(_t)   DELAY((_t) * 1000)
  151 
  152 #define ECORE_LIKELY(x)   __predict_true(x)
  153 #define ECORE_UNLIKELY(x) __predict_false(x)
  154 
  155 #define ECORE_ZALLOC(_size, _flags, _sc) \
  156     malloc(_size, M_TEMP, (M_NOWAIT | M_ZERO))
  157 
  158 #define ECORE_CALLOC(_len, _size, _flags, _sc) \
  159     mallocarray(_len, _size, M_TEMP, (M_NOWAIT | M_ZERO))
  160 
  161 #define ECORE_FREE(_s, _buf, _size) free(_buf, M_TEMP)
  162 
  163 #define SC_ILT(sc)  ((sc)->ilt)
  164 #define ILOG2(x)    bxe_ilog2(x)
  165 
  166 #define ECORE_ILT_ZALLOC(x, y, size)                                       \
  167     do {                                                                   \
  168         x = malloc(sizeof(struct bxe_dma), M_DEVBUF, (M_NOWAIT | M_ZERO)); \
  169         if (x) {                                                           \
  170             if (bxe_dma_alloc((struct bxe_softc *)sc,                      \
  171                               size, (struct bxe_dma *)x,                   \
  172                               "ECORE_ILT") != 0) {                         \
  173                 free(x, M_DEVBUF);                                         \
  174                 x = NULL;                                                  \
  175                 *y = 0;                                                    \
  176             } else {                                                       \
  177                 *y = ((struct bxe_dma *)x)->paddr;                         \
  178             }                                                              \
  179         }                                                                  \
  180     } while (0)
  181 
  182 #define ECORE_ILT_FREE(x, y, size)                   \
  183     do {                                             \
  184         if (x) {                                     \
  185             bxe_dma_free((struct bxe_softc *)sc, x); \
  186             free(x, M_DEVBUF);                       \
  187             x = NULL;                                \
  188             y = 0;                                   \
  189         }                                            \
  190     } while (0)
  191 
  192 #define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
  193 
  194 #define ECORE_IS_MF_SD_MODE   IS_MF_SD_MODE
  195 #define ECORE_IS_MF_SI_MODE   IS_MF_SI_MODE
  196 #define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE
  197 
  198 #define ECORE_SET_CTX_VALIDATION bxe_set_ctx_validation
  199 
  200 #define ECORE_UPDATE_COALESCE_SB_INDEX bxe_update_coalesce_sb_index
  201 
  202 #define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a))
  203 
  204 #define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN
  205 
  206 #define ECORE_PATH_ID     SC_PATH
  207 #define ECORE_PORT_ID     SC_PORT
  208 #define ECORE_FUNC_ID     SC_FUNC
  209 #define ECORE_ABS_FUNC_ID SC_ABS_FUNC
  210 
  211 uint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length,
  212                     uint32_t crc32_seed, uint8_t complement);
  213 static inline uint32_t
  214 ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
  215 {
  216     uint32_t packet_buf[2] = {0};
  217     memcpy(((uint8_t *)(&packet_buf[0]))+2, &mac[0], 2);
  218     memcpy(&packet_buf[1], &mac[2], 4);
  219     return bswap32(calc_crc32((uint8_t *)packet_buf, 8, seed, 0));
  220 }
  221 
  222 #define ecore_sp_post(_sc, _a, _b, _c, _d) \
  223     bxe_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d)
  224 
  225 #ifdef ECORE_STOP_ON_ERROR
  226 
  227 #define ECORE_DBG_BREAK_IF(exp)     \
  228     do {                            \
  229         if (__predict_false(exp)) { \
  230             panic("ECORE");         \
  231         }                           \
  232     } while (0)
  233 
  234 #define ECORE_BUG()                               \
  235     do {                                          \
  236         panic("BUG (%s:%d)", __FILE__, __LINE__); \
  237     } while(0);
  238 
  239 #define ECORE_BUG_ON(exp)                                \
  240     do {                                                 \
  241         if (__predict_true(exp)) {                       \
  242             panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \
  243         }                                                \
  244     } while (0)
  245 
  246 #else
  247 
  248 
  249 extern unsigned long bxe_debug;
  250 #define BXE_DEBUG_ECORE_DBG_BREAK_IF   0x01
  251 #define BXE_DEBUG_ECORE_BUG            0x02
  252 #define BXE_DEBUG_ECORE_BUG_ON         0x04
  253 
  254 #define ECORE_DBG_BREAK_IF(exp)     \
  255     if (bxe_debug & BXE_DEBUG_ECORE_DBG_BREAK_IF) \
  256         printf("%s (%s,%d)\n", __FUNCTION__, __FILE__, __LINE__);
  257 
  258 #define ECORE_BUG(exp)     \
  259     if (bxe_debug & BXE_DEBUG_ECORE_BUG) \
  260         printf("%s (%s,%d)\n", __FUNCTION__, __FILE__, __LINE__);
  261 
  262 #define ECORE_BUG_ON(exp)     \
  263     if (bxe_debug & BXE_DEBUG_ECORE_BUG_ON) \
  264         printf("%s (%s,%d)\n", __FUNCTION__, __FILE__, __LINE__);
  265 
  266 
  267 #endif /* #ifdef ECORE_STOP_ON_ERROR */
  268 
  269 #define ECORE_ERR(str, ...) \
  270     BLOGE(sc, "ECORE: " str, ##__VA_ARGS__)
  271 
  272 #define DBG_SP 0x00000004 /* defined in bxe.h */
  273 
  274 #define ECORE_MSG(sc, m, ...) \
  275     BLOGD(sc, DBG_SP, "ECORE: " m, ##__VA_ARGS__)
  276 
  277 typedef struct _ecore_list_entry_t
  278 {
  279     struct _ecore_list_entry_t *next, *prev;
  280 } ecore_list_entry_t;
  281 
  282 typedef struct ecore_list_t
  283 {
  284     ecore_list_entry_t *head, *tail;
  285     unsigned long cnt;
  286 } ecore_list_t;
  287 
  288 /* initialize the list */
  289 #define ECORE_LIST_INIT(_list) \
  290     do {                       \
  291         (_list)->head = NULL;  \
  292         (_list)->tail = NULL;  \
  293         (_list)->cnt  = 0;     \
  294     } while (0)
  295 
  296 /* return TRUE if the element is the last on the list */
  297 #define ECORE_LIST_IS_LAST(_elem, _list) \
  298     (_elem == (_list)->tail)
  299 
  300 /* return TRUE if the list is empty */
  301 #define ECORE_LIST_IS_EMPTY(_list) \
  302     ((_list)->cnt == 0)
  303 
  304 /* return the first element */
  305 #define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \
  306     (cast *)((_list)->head)
  307 
  308 /* return the next element */
  309 #define ECORE_LIST_NEXT(_elem, _link, cast) \
  310     (cast *)((&((_elem)->_link))->next)
  311 
  312 /* push an element on the head of the list */
  313 #define ECORE_LIST_PUSH_HEAD(_elem, _list)              \
  314     do {                                                \
  315         (_elem)->prev = (ecore_list_entry_t *)0;        \
  316         (_elem)->next = (_list)->head;                  \
  317         if ((_list)->tail == (ecore_list_entry_t *)0) { \
  318             (_list)->tail = (_elem);                    \
  319         } else {                                        \
  320             (_list)->head->prev = (_elem);              \
  321         }                                               \
  322         (_list)->head = (_elem);                        \
  323         (_list)->cnt++;                                 \
  324     } while (0)
  325 
  326 /* push an element on the tail of the list */
  327 #define ECORE_LIST_PUSH_TAIL(_elem, _list)       \
  328     do {                                         \
  329         (_elem)->next = (ecore_list_entry_t *)0; \
  330         (_elem)->prev = (_list)->tail;           \
  331         if ((_list)->tail) {                     \
  332             (_list)->tail->next = (_elem);       \
  333         } else {                                 \
  334             (_list)->head = (_elem);             \
  335         }                                        \
  336         (_list)->tail = (_elem);                 \
  337         (_list)->cnt++;                          \
  338     } while (0)
  339 
  340 /* push list1 on the head of list2 and return with list1 as empty */
  341 #define ECORE_LIST_SPLICE_INIT(_list1, _list2)     \
  342     do {                                           \
  343         (_list1)->tail->next = (_list2)->head;     \
  344         if ((_list2)->head) {                      \
  345             (_list2)->head->prev = (_list1)->tail; \
  346         } else {                                   \
  347             (_list2)->tail = (_list1)->tail;       \
  348         }                                          \
  349         (_list2)->head = (_list1)->head;           \
  350         (_list2)->cnt += (_list1)->cnt;            \
  351         (_list1)->head = NULL;                     \
  352         (_list1)->tail = NULL;                     \
  353         (_list1)->cnt  = 0;                        \
  354     } while (0)
  355 
  356 /* remove an element from the list */
  357 #define ECORE_LIST_REMOVE_ENTRY(_elem, _list)                      \
  358     do {                                                           \
  359         if ((_list)->head == (_elem)) {                            \
  360             if ((_list)->head) {                                   \
  361                 (_list)->head = (_list)->head->next;               \
  362                 if ((_list)->head) {                               \
  363                     (_list)->head->prev = (ecore_list_entry_t *)0; \
  364                 } else {                                           \
  365                     (_list)->tail = (ecore_list_entry_t *)0;       \
  366                 }                                                  \
  367                 (_list)->cnt--;                                    \
  368             }                                                      \
  369         } else if ((_list)->tail == (_elem)) {                     \
  370             if ((_list)->tail) {                                   \
  371                 (_list)->tail = (_list)->tail->prev;               \
  372                 if ((_list)->tail) {                               \
  373                     (_list)->tail->next = (ecore_list_entry_t *)0; \
  374                 } else {                                           \
  375                     (_list)->head = (ecore_list_entry_t *)0;       \
  376                 }                                                  \
  377                 (_list)->cnt--;                                    \
  378             }                                                      \
  379         } else {                                                   \
  380             (_elem)->prev->next = (_elem)->next;                   \
  381             (_elem)->next->prev = (_elem)->prev;                   \
  382             (_list)->cnt--;                                        \
  383         }                                                          \
  384     } while (0)
  385 
  386 /* walk the list */
  387 #define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \
  388     for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \
  389          pos;                                              \
  390          pos = ECORE_LIST_NEXT(pos, _link, cast))
  391 
  392 /* walk the list (safely) */
  393 #define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \
  394      for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint),        \
  395           n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL;    \
  396           pos != NULL;                                             \
  397           pos = (cast *)n,                                         \
  398           n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL)
  399 
  400 
  401 /* Manipulate a bit vector defined as an array of uint64_t */
  402 
  403 /* Number of bits in one sge_mask array element */
  404 #define BIT_VEC64_ELEM_SZ     64
  405 #define BIT_VEC64_ELEM_SHIFT  6
  406 #define BIT_VEC64_ELEM_MASK   ((uint64_t)BIT_VEC64_ELEM_SZ - 1)
  407 
  408 #define __BIT_VEC64_SET_BIT(el, bit)            \
  409     do {                                        \
  410         el = ((el) | ((uint64_t)0x1 << (bit))); \
  411     } while (0)
  412 
  413 #define __BIT_VEC64_CLEAR_BIT(el, bit)             \
  414     do {                                           \
  415         el = ((el) & (~((uint64_t)0x1 << (bit)))); \
  416     } while (0)
  417 
  418 #define BIT_VEC64_SET_BIT(vec64, idx)                           \
  419     __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
  420                         (idx) & BIT_VEC64_ELEM_MASK)
  421 
  422 #define BIT_VEC64_CLEAR_BIT(vec64, idx)                           \
  423     __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
  424                           (idx) & BIT_VEC64_ELEM_MASK)
  425 
  426 #define BIT_VEC64_TEST_BIT(vec64, idx)          \
  427     (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
  428       ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
  429 
  430 /*
  431  * Creates a bitmask of all ones in less significant bits.
  432  * idx - index of the most significant bit in the created mask
  433  */
  434 #define BIT_VEC64_ONES_MASK(idx)                                 \
  435     (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
  436 #define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0))
  437 
  438 /* fill in a MAC address the way the FW likes it */
  439 static inline void
  440 ecore_set_fw_mac_addr(uint16_t *fw_hi,
  441                       uint16_t *fw_mid,
  442                       uint16_t *fw_lo,
  443                       uint8_t  *mac)
  444 {
  445     ((uint8_t *)fw_hi)[0]  = mac[1];
  446     ((uint8_t *)fw_hi)[1]  = mac[0];
  447     ((uint8_t *)fw_mid)[0] = mac[3];
  448     ((uint8_t *)fw_mid)[1] = mac[2];
  449     ((uint8_t *)fw_lo)[0]  = mac[5];
  450     ((uint8_t *)fw_lo)[1]  = mac[4];
  451 }
  452 
  453 
  454 enum ecore_status_t {
  455     ECORE_EXISTS  = -6,
  456     ECORE_IO      = -5,
  457     ECORE_TIMEOUT = -4,
  458     ECORE_INVAL   = -3,
  459     ECORE_BUSY    = -2,
  460     ECORE_NOMEM   = -1,
  461     ECORE_SUCCESS = 0,
  462     /* PENDING is not an error and should be positive */
  463     ECORE_PENDING = 1,
  464 };
  465 
  466 enum {
  467     SWITCH_UPDATE,
  468     AFEX_UPDATE,
  469 };
  470 
  471 
  472 
  473 
  474 struct bxe_softc;
  475 struct eth_context;
  476 
  477 /* Bits representing general command's configuration */
  478 enum {
  479         RAMROD_TX,
  480         RAMROD_RX,
  481         /* Wait until all pending commands complete */
  482         RAMROD_COMP_WAIT,
  483         /* Don't send a ramrod, only update a registry */
  484         RAMROD_DRV_CLR_ONLY,
  485         /* Configure HW according to the current object state */
  486         RAMROD_RESTORE,
  487          /* Execute the next command now */
  488         RAMROD_EXEC,
  489         /* Don't add a new command and continue execution of posponed
  490          * commands. If not set a new command will be added to the
  491          * pending commands list.
  492          */
  493         RAMROD_CONT,
  494         /* If there is another pending ramrod, wait until it finishes and
  495          * re-try to submit this one. This flag can be set only in sleepable
  496          * context, and should not be set from the context that completes the
  497          * ramrods as deadlock will occur.
  498          */
  499         RAMROD_RETRY,
  500 };
  501 
  502 typedef enum {
  503         ECORE_OBJ_TYPE_RX,
  504         ECORE_OBJ_TYPE_TX,
  505         ECORE_OBJ_TYPE_RX_TX,
  506 } ecore_obj_type;
  507 
  508 /* Public slow path states */
  509 enum {
  510         ECORE_FILTER_MAC_PENDING,
  511         ECORE_FILTER_VLAN_PENDING,
  512         ECORE_FILTER_VLAN_MAC_PENDING,
  513         ECORE_FILTER_RX_MODE_PENDING,
  514         ECORE_FILTER_RX_MODE_SCHED,
  515         ECORE_FILTER_ISCSI_ETH_START_SCHED,
  516         ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
  517         ECORE_FILTER_FCOE_ETH_START_SCHED,
  518         ECORE_FILTER_FCOE_ETH_STOP_SCHED,
  519         ECORE_FILTER_BYPASS_RX_MODE_PENDING,
  520         ECORE_FILTER_BYPASS_MAC_PENDING,
  521         ECORE_FILTER_BYPASS_RSS_CONF_PENDING,
  522         ECORE_FILTER_MCAST_PENDING,
  523         ECORE_FILTER_MCAST_SCHED,
  524         ECORE_FILTER_RSS_CONF_PENDING,
  525         ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
  526         ECORE_AFEX_PENDING_VIFSET_MCP_ACK,
  527         ECORE_FILTER_VXLAN_PENDING
  528 };
  529 
  530 struct ecore_raw_obj {
  531         uint8_t         func_id;
  532 
  533         /* Queue params */
  534         uint8_t         cl_id;
  535         uint32_t                cid;
  536 
  537         /* Ramrod data buffer params */
  538         void            *rdata;
  539         ecore_dma_addr_t        rdata_mapping;
  540 
  541         /* Ramrod state params */
  542         int             state;   /* "ramrod is pending" state bit */
  543         unsigned long   *pstate; /* pointer to state buffer */
  544 
  545         ecore_obj_type  obj_type;
  546 
  547         int (*wait_comp)(struct bxe_softc *sc,
  548                          struct ecore_raw_obj *o);
  549 
  550         bool (*check_pending)(struct ecore_raw_obj *o);
  551         void (*clear_pending)(struct ecore_raw_obj *o);
  552         void (*set_pending)(struct ecore_raw_obj *o);
  553 };
  554 
  555 /************************* VLAN-MAC commands related parameters ***************/
  556 struct ecore_mac_ramrod_data {
  557         uint8_t mac[ETH_ALEN];
  558         uint8_t is_inner_mac;
  559 };
  560 
  561 struct ecore_vlan_ramrod_data {
  562         uint16_t vlan;
  563 };
  564 
  565 struct ecore_vlan_mac_ramrod_data {
  566         uint8_t mac[ETH_ALEN];
  567         uint8_t is_inner_mac;
  568         uint16_t vlan;
  569 };
  570 
  571 struct ecore_vxlan_fltr_ramrod_data {
  572         uint8_t innermac[ETH_ALEN];
  573         uint32_t vni;
  574 };
  575 
  576 union ecore_classification_ramrod_data {
  577         struct ecore_mac_ramrod_data mac;
  578         struct ecore_vlan_ramrod_data vlan;
  579         struct ecore_vlan_mac_ramrod_data vlan_mac;
  580         struct ecore_vxlan_fltr_ramrod_data vxlan_fltr;
  581 };
  582 
  583 /* VLAN_MAC commands */
  584 enum ecore_vlan_mac_cmd {
  585         ECORE_VLAN_MAC_ADD,
  586         ECORE_VLAN_MAC_DEL,
  587         ECORE_VLAN_MAC_MOVE,
  588 };
  589 
  590 struct ecore_vlan_mac_data {
  591         /* Requested command: ECORE_VLAN_MAC_XX */
  592         enum ecore_vlan_mac_cmd cmd;
  593         /* used to contain the data related vlan_mac_flags bits from
  594          * ramrod parameters.
  595          */
  596         unsigned long vlan_mac_flags;
  597 
  598         /* Needed for MOVE command */
  599         struct ecore_vlan_mac_obj *target_obj;
  600 
  601         union ecore_classification_ramrod_data u;
  602 };
  603 
  604 /*************************** Exe Queue obj ************************************/
  605 union ecore_exe_queue_cmd_data {
  606         struct ecore_vlan_mac_data vlan_mac;
  607 
  608         struct {
  609                 /* TODO */
  610         } mcast;
  611 };
  612 
  613 struct ecore_exeq_elem {
  614         ecore_list_entry_t              link;
  615 
  616         /* Length of this element in the exe_chunk. */
  617         int                             cmd_len;
  618 
  619         union ecore_exe_queue_cmd_data  cmd_data;
  620 };
  621 
  622 union ecore_qable_obj;
  623 
  624 union ecore_exeq_comp_elem {
  625         union event_ring_elem *elem;
  626 };
  627 
  628 struct ecore_exe_queue_obj;
  629 
  630 typedef int (*exe_q_validate)(struct bxe_softc *sc,
  631                               union ecore_qable_obj *o,
  632                               struct ecore_exeq_elem *elem);
  633 
  634 typedef int (*exe_q_remove)(struct bxe_softc *sc,
  635                             union ecore_qable_obj *o,
  636                             struct ecore_exeq_elem *elem);
  637 
  638 /* Return positive if entry was optimized, 0 - if not, negative
  639  * in case of an error.
  640  */
  641 typedef int (*exe_q_optimize)(struct bxe_softc *sc,
  642                               union ecore_qable_obj *o,
  643                               struct ecore_exeq_elem *elem);
  644 typedef int (*exe_q_execute)(struct bxe_softc *sc,
  645                              union ecore_qable_obj *o,
  646                              ecore_list_t *exe_chunk,
  647                              unsigned long *ramrod_flags);
  648 typedef struct ecore_exeq_elem *
  649                         (*exe_q_get)(struct ecore_exe_queue_obj *o,
  650                                      struct ecore_exeq_elem *elem);
  651 
  652 struct ecore_exe_queue_obj {
  653         /* Commands pending for an execution. */
  654         ecore_list_t    exe_queue;
  655 
  656         /* Commands pending for an completion. */
  657         ecore_list_t    pending_comp;
  658 
  659         ECORE_MUTEX_SPIN                lock;
  660 
  661         /* Maximum length of commands' list for one execution */
  662         int                     exe_chunk_len;
  663 
  664         union ecore_qable_obj   *owner;
  665 
  666         /****** Virtual functions ******/
  667         /**
  668          * Called before commands execution for commands that are really
  669          * going to be executed (after 'optimize').
  670          *
  671          * Must run under exe_queue->lock
  672          */
  673         exe_q_validate          validate;
  674 
  675         /**
  676          * Called before removing pending commands, cleaning allocated
  677          * resources (e.g., credits from validate)
  678          */
  679          exe_q_remove           remove;
  680 
  681         /**
  682          * This will try to cancel the current pending commands list
  683          * considering the new command.
  684          *
  685          * Returns the number of optimized commands or a negative error code
  686          *
  687          * Must run under exe_queue->lock
  688          */
  689         exe_q_optimize          optimize;
  690 
  691         /**
  692          * Run the next commands chunk (owner specific).
  693          */
  694         exe_q_execute           execute;
  695 
  696         /**
  697          * Return the exe_queue element containing the specific command
  698          * if any. Otherwise return NULL.
  699          */
  700         exe_q_get               get;
  701 };
  702 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
  703 /*
  704  * Element in the VLAN_MAC registry list having all current configured
  705  * rules.
  706  */
  707 struct ecore_vlan_mac_registry_elem {
  708         ecore_list_entry_t      link;
  709 
  710         /* Used to store the cam offset used for the mac/vlan/vlan-mac.
  711          * Relevant for 57710 and 57711 only. VLANs and MACs share the
  712          * same CAM for these chips.
  713          */
  714         int                     cam_offset;
  715 
  716         /* Needed for DEL and RESTORE flows */
  717         unsigned long           vlan_mac_flags;
  718 
  719         union ecore_classification_ramrod_data u;
  720 };
  721 
  722 /* Bits representing VLAN_MAC commands specific flags */
  723 enum {
  724         ECORE_UC_LIST_MAC,
  725         ECORE_ETH_MAC,
  726         ECORE_ISCSI_ETH_MAC,
  727         ECORE_NETQ_ETH_MAC,
  728         ECORE_DONT_CONSUME_CAM_CREDIT,
  729         ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
  730 };
  731 /* When looking for matching filters, some flags are not interesting */
  732 #define ECORE_VLAN_MAC_CMP_MASK (1 << ECORE_UC_LIST_MAC | \
  733                                  1 << ECORE_ETH_MAC | \
  734                                  1 << ECORE_ISCSI_ETH_MAC | \
  735                                  1 << ECORE_NETQ_ETH_MAC)
  736 #define ECORE_VLAN_MAC_CMP_FLAGS(flags) \
  737         ((flags) & ECORE_VLAN_MAC_CMP_MASK)
  738 
  739 struct ecore_vlan_mac_ramrod_params {
  740         /* Object to run the command from */
  741         struct ecore_vlan_mac_obj *vlan_mac_obj;
  742 
  743         /* General command flags: COMP_WAIT, etc. */
  744         unsigned long ramrod_flags;
  745 
  746         /* Command specific configuration request */
  747         struct ecore_vlan_mac_data user_req;
  748 };
  749 
  750 struct ecore_vlan_mac_obj {
  751         struct ecore_raw_obj raw;
  752 
  753         /* Bookkeeping list: will prevent the addition of already existing
  754          * entries.
  755          */
  756         ecore_list_t            head;
  757         /* Implement a simple reader/writer lock on the head list.
  758          * all these fields should only be accessed under the exe_queue lock
  759          */
  760         uint8_t         head_reader; /* Num. of readers accessing head list */
  761         bool            head_exe_request; /* Pending execution request. */
  762         unsigned long   saved_ramrod_flags; /* Ramrods of pending execution */
  763 
  764         /* Execution queue interface instance */
  765         struct ecore_exe_queue_obj      exe_queue;
  766 
  767         /* MACs credit pool */
  768         struct ecore_credit_pool_obj    *macs_pool;
  769 
  770         /* VLANs credit pool */
  771         struct ecore_credit_pool_obj    *vlans_pool;
  772 
  773         /* RAMROD command to be used */
  774         int                             ramrod_cmd;
  775 
  776         /* copy first n elements onto preallocated buffer
  777          *
  778          * @param n number of elements to get
  779          * @param buf buffer preallocated by caller into which elements
  780          *            will be copied. Note elements are 4-byte aligned
  781          *            so buffer size must be able to accommodate the
  782          *            aligned elements.
  783          *
  784          * @return number of copied bytes
  785          */
  786 
  787         int (*get_n_elements)(struct bxe_softc *sc,
  788                               struct ecore_vlan_mac_obj *o, int n, uint8_t *base,
  789                               uint8_t stride, uint8_t size);
  790 
  791         /**
  792          * Checks if ADD-ramrod with the given params may be performed.
  793          *
  794          * @return zero if the element may be added
  795          */
  796 
  797         int (*check_add)(struct bxe_softc *sc,
  798                          struct ecore_vlan_mac_obj *o,
  799                          union ecore_classification_ramrod_data *data);
  800 
  801         /**
  802          * Checks if DEL-ramrod with the given params may be performed.
  803          *
  804          * @return TRUE if the element may be deleted
  805          */
  806         struct ecore_vlan_mac_registry_elem *
  807                 (*check_del)(struct bxe_softc *sc,
  808                              struct ecore_vlan_mac_obj *o,
  809                              union ecore_classification_ramrod_data *data);
  810 
  811         /**
  812          * Checks if DEL-ramrod with the given params may be performed.
  813          *
  814          * @return TRUE if the element may be deleted
  815          */
  816         bool (*check_move)(struct bxe_softc *sc,
  817                            struct ecore_vlan_mac_obj *src_o,
  818                            struct ecore_vlan_mac_obj *dst_o,
  819                            union ecore_classification_ramrod_data *data);
  820 
  821         /**
  822          *  Update the relevant credit object(s) (consume/return
  823          *  correspondingly).
  824          */
  825         bool (*get_credit)(struct ecore_vlan_mac_obj *o);
  826         bool (*put_credit)(struct ecore_vlan_mac_obj *o);
  827         bool (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
  828         bool (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
  829 
  830         /**
  831          * Configures one rule in the ramrod data buffer.
  832          */
  833         void (*set_one_rule)(struct bxe_softc *sc,
  834                              struct ecore_vlan_mac_obj *o,
  835                              struct ecore_exeq_elem *elem, int rule_idx,
  836                              int cam_offset);
  837 
  838         /**
  839         *  Delete all configured elements having the given
  840         *  vlan_mac_flags specification. Assumes no pending for
  841         *  execution commands. Will schedule all all currently
  842         *  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
  843         *  specification for deletion and will use the given
  844         *  ramrod_flags for the last DEL operation.
  845          *
  846          * @param sc
  847          * @param o
  848          * @param ramrod_flags RAMROD_XX flags
  849          *
  850          * @return 0 if the last operation has completed successfully
  851          *         and there are no more elements left, positive value
  852          *         if there are pending for completion commands,
  853          *         negative value in case of failure.
  854          */
  855         int (*delete_all)(struct bxe_softc *sc,
  856                           struct ecore_vlan_mac_obj *o,
  857                           unsigned long *vlan_mac_flags,
  858                           unsigned long *ramrod_flags);
  859 
  860         /**
  861          * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
  862          * configured elements list.
  863          *
  864          * @param sc
  865          * @param p Command parameters (RAMROD_COMP_WAIT bit in
  866          *          ramrod_flags is only taken into an account)
  867          * @param ppos a pointer to the cookie that should be given back in the
  868          *        next call to make function handle the next element. If
  869          *        *ppos is set to NULL it will restart the iterator.
  870          *        If returned *ppos == NULL this means that the last
  871          *        element has been handled.
  872          *
  873          * @return int
  874          */
  875         int (*restore)(struct bxe_softc *sc,
  876                        struct ecore_vlan_mac_ramrod_params *p,
  877                        struct ecore_vlan_mac_registry_elem **ppos);
  878 
  879         /**
  880          * Should be called on a completion arrival.
  881          *
  882          * @param sc
  883          * @param o
  884          * @param cqe Completion element we are handling
  885          * @param ramrod_flags if RAMROD_CONT is set the next bulk of
  886          *                     pending commands will be executed.
  887          *                     RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
  888          *                     may also be set if needed.
  889          *
  890          * @return 0 if there are neither pending nor waiting for
  891          *         completion commands. Positive value if there are
  892          *         pending for execution or for completion commands.
  893          *         Negative value in case of an error (including an
  894          *         error in the cqe).
  895          */
  896         int (*complete)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
  897                         union event_ring_elem *cqe,
  898                         unsigned long *ramrod_flags);
  899 
  900         /**
  901          * Wait for completion of all commands. Don't schedule new ones,
  902          * just wait. It assumes that the completion code will schedule
  903          * for new commands.
  904          */
  905         int (*wait)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o);
  906 };
  907 
  908 enum {
  909         ECORE_LLH_CAM_ISCSI_ETH_LINE = 0,
  910         ECORE_LLH_CAM_ETH_LINE,
  911         ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
  912 };
  913 
  914 void ecore_set_mac_in_nig(struct bxe_softc *sc,
  915                           bool add, unsigned char *dev_addr, int index);
  916 
  917 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
  918 
  919 /* RX_MODE ramrod special flags: set in rx_mode_flags field in
  920  * a ecore_rx_mode_ramrod_params.
  921  */
  922 enum {
  923         ECORE_RX_MODE_FCOE_ETH,
  924         ECORE_RX_MODE_ISCSI_ETH,
  925 };
  926 
  927 enum {
  928         ECORE_ACCEPT_UNICAST,
  929         ECORE_ACCEPT_MULTICAST,
  930         ECORE_ACCEPT_ALL_UNICAST,
  931         ECORE_ACCEPT_ALL_MULTICAST,
  932         ECORE_ACCEPT_BROADCAST,
  933         ECORE_ACCEPT_UNMATCHED,
  934         ECORE_ACCEPT_ANY_VLAN
  935 };
  936 
  937 struct ecore_rx_mode_ramrod_params {
  938         struct ecore_rx_mode_obj *rx_mode_obj;
  939         unsigned long *pstate;
  940         int state;
  941         uint8_t cl_id;
  942         uint32_t cid;
  943         uint8_t func_id;
  944         unsigned long ramrod_flags;
  945         unsigned long rx_mode_flags;
  946 
  947         /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
  948          * a tstorm_eth_mac_filter_config (e1x).
  949          */
  950         void *rdata;
  951         ecore_dma_addr_t rdata_mapping;
  952 
  953         /* Rx mode settings */
  954         unsigned long rx_accept_flags;
  955 
  956         /* internal switching settings */
  957         unsigned long tx_accept_flags;
  958 };
  959 
  960 struct ecore_rx_mode_obj {
  961         int (*config_rx_mode)(struct bxe_softc *sc,
  962                               struct ecore_rx_mode_ramrod_params *p);
  963 
  964         int (*wait_comp)(struct bxe_softc *sc,
  965                          struct ecore_rx_mode_ramrod_params *p);
  966 };
  967 
  968 /********************** Set multicast group ***********************************/
  969 
  970 struct ecore_mcast_list_elem {
  971         ecore_list_entry_t link;
  972         uint8_t *mac;
  973 };
  974 
  975 union ecore_mcast_config_data {
  976         uint8_t *mac;
  977         uint8_t bin; /* used in a RESTORE flow */
  978 };
  979 
  980 struct ecore_mcast_ramrod_params {
  981         struct ecore_mcast_obj *mcast_obj;
  982 
  983         /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
  984         unsigned long ramrod_flags;
  985 
  986         ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */
  987         /** TODO:
  988          *      - rename it to macs_num.
  989          *      - Add a new command type for handling pending commands
  990          *        (remove "zero semantics").
  991          *
  992          *  Length of mcast_list. If zero and ADD_CONT command - post
  993          *  pending commands.
  994          */
  995         int mcast_list_len;
  996 };
  997 
  998 enum ecore_mcast_cmd {
  999         ECORE_MCAST_CMD_ADD,
 1000         ECORE_MCAST_CMD_CONT,
 1001         ECORE_MCAST_CMD_DEL,
 1002         ECORE_MCAST_CMD_RESTORE,
 1003 };
 1004 
 1005 struct ecore_mcast_obj {
 1006         struct ecore_raw_obj raw;
 1007 
 1008         union {
 1009                 struct {
 1010                 #define ECORE_MCAST_BINS_NUM    256
 1011                 #define ECORE_MCAST_VEC_SZ      (ECORE_MCAST_BINS_NUM / 64)
 1012                         uint64_t vec[ECORE_MCAST_VEC_SZ];
 1013 
 1014                         /** Number of BINs to clear. Should be updated
 1015                          *  immediately when a command arrives in order to
 1016                          *  properly create DEL commands.
 1017                          */
 1018                         int num_bins_set;
 1019                 } aprox_match;
 1020 
 1021                 struct {
 1022                         ecore_list_t macs;
 1023                         int num_macs_set;
 1024                 } exact_match;
 1025         } registry;
 1026 
 1027         /* Pending commands */
 1028         ecore_list_t pending_cmds_head;
 1029 
 1030         /* A state that is set in raw.pstate, when there are pending commands */
 1031         int sched_state;
 1032 
 1033         /* Maximal number of mcast MACs configured in one command */
 1034         int max_cmd_len;
 1035 
 1036         /* Total number of currently pending MACs to configure: both
 1037          * in the pending commands list and in the current command.
 1038          */
 1039         int total_pending_num;
 1040 
 1041         uint8_t engine_id;
 1042 
 1043         /**
 1044          * @param cmd command to execute (ECORE_MCAST_CMD_X, see above)
 1045          */
 1046         int (*config_mcast)(struct bxe_softc *sc,
 1047                             struct ecore_mcast_ramrod_params *p,
 1048                             enum ecore_mcast_cmd cmd);
 1049 
 1050         /**
 1051          * Fills the ramrod data during the RESTORE flow.
 1052          *
 1053          * @param sc
 1054          * @param o
 1055          * @param start_idx Registry index to start from
 1056          * @param rdata_idx Index in the ramrod data to start from
 1057          *
 1058          * @return -1 if we handled the whole registry or index of the last
 1059          *         handled registry element.
 1060          */
 1061         int (*hdl_restore)(struct bxe_softc *sc, struct ecore_mcast_obj *o,
 1062                            int start_bin, int *rdata_idx);
 1063 
 1064         int (*enqueue_cmd)(struct bxe_softc *sc, struct ecore_mcast_obj *o,
 1065                            struct ecore_mcast_ramrod_params *p,
 1066                            enum ecore_mcast_cmd cmd);
 1067 
 1068         void (*set_one_rule)(struct bxe_softc *sc,
 1069                              struct ecore_mcast_obj *o, int idx,
 1070                              union ecore_mcast_config_data *cfg_data,
 1071                              enum ecore_mcast_cmd cmd);
 1072 
 1073         /** Checks if there are more mcast MACs to be set or a previous
 1074          *  command is still pending.
 1075          */
 1076         bool (*check_pending)(struct ecore_mcast_obj *o);
 1077 
 1078         /**
 1079          * Set/Clear/Check SCHEDULED state of the object
 1080          */
 1081         void (*set_sched)(struct ecore_mcast_obj *o);
 1082         void (*clear_sched)(struct ecore_mcast_obj *o);
 1083         bool (*check_sched)(struct ecore_mcast_obj *o);
 1084 
 1085         /* Wait until all pending commands complete */
 1086         int (*wait_comp)(struct bxe_softc *sc, struct ecore_mcast_obj *o);
 1087 
 1088         /**
 1089          * Handle the internal object counters needed for proper
 1090          * commands handling. Checks that the provided parameters are
 1091          * feasible.
 1092          */
 1093         int (*validate)(struct bxe_softc *sc,
 1094                         struct ecore_mcast_ramrod_params *p,
 1095                         enum ecore_mcast_cmd cmd);
 1096 
 1097         /**
 1098          * Restore the values of internal counters in case of a failure.
 1099          */
 1100         void (*revert)(struct bxe_softc *sc,
 1101                        struct ecore_mcast_ramrod_params *p,
 1102                        int old_num_bins);
 1103 
 1104         int (*get_registry_size)(struct ecore_mcast_obj *o);
 1105         void (*set_registry_size)(struct ecore_mcast_obj *o, int n);
 1106 };
 1107 
 1108 /*************************** Credit handling **********************************/
 1109 struct ecore_credit_pool_obj {
 1110 
 1111         /* Current amount of credit in the pool */
 1112         ecore_atomic_t  credit;
 1113 
 1114         /* Maximum allowed credit. put() will check against it. */
 1115         int             pool_sz;
 1116 
 1117         /* Allocate a pool table statically.
 1118          *
 1119          * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
 1120          *
 1121          * The set bit in the table will mean that the entry is available.
 1122          */
 1123 #define ECORE_POOL_VEC_SIZE     (MAX_MAC_CREDIT_E2 / 64)
 1124         uint64_t                pool_mirror[ECORE_POOL_VEC_SIZE];
 1125 
 1126         /* Base pool offset (initialized differently */
 1127         int             base_pool_offset;
 1128 
 1129         /**
 1130          * Get the next free pool entry.
 1131          *
 1132          * @return TRUE if there was a free entry in the pool
 1133          */
 1134         bool (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
 1135 
 1136         /**
 1137          * Return the entry back to the pool.
 1138          *
 1139          * @return TRUE if entry is legal and has been successfully
 1140          *         returned to the pool.
 1141          */
 1142         bool (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
 1143 
 1144         /**
 1145          * Get the requested amount of credit from the pool.
 1146          *
 1147          * @param cnt Amount of requested credit
 1148          * @return TRUE if the operation is successful
 1149          */
 1150         bool (*get)(struct ecore_credit_pool_obj *o, int cnt);
 1151 
 1152         /**
 1153          * Returns the credit to the pool.
 1154          *
 1155          * @param cnt Amount of credit to return
 1156          * @return TRUE if the operation is successful
 1157          */
 1158         bool (*put)(struct ecore_credit_pool_obj *o, int cnt);
 1159 
 1160         /**
 1161          * Reads the current amount of credit.
 1162          */
 1163         int (*check)(struct ecore_credit_pool_obj *o);
 1164 };
 1165 
 1166 /*************************** RSS configuration ********************************/
 1167 enum {
 1168         /* RSS_MODE bits are mutually exclusive */
 1169         ECORE_RSS_MODE_DISABLED,
 1170         ECORE_RSS_MODE_REGULAR,
 1171 
 1172         ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
 1173 
 1174         ECORE_RSS_IPV4,
 1175         ECORE_RSS_IPV4_TCP,
 1176         ECORE_RSS_IPV4_UDP,
 1177         ECORE_RSS_IPV6,
 1178         ECORE_RSS_IPV6_TCP,
 1179         ECORE_RSS_IPV6_UDP,
 1180 
 1181         ECORE_RSS_IPV4_VXLAN,
 1182         ECORE_RSS_IPV6_VXLAN,
 1183         ECORE_RSS_TUNN_INNER_HDRS,
 1184 };
 1185 
 1186 struct ecore_config_rss_params {
 1187         struct ecore_rss_config_obj *rss_obj;
 1188 
 1189         /* may have RAMROD_COMP_WAIT set only */
 1190         unsigned long   ramrod_flags;
 1191 
 1192         /* ECORE_RSS_X bits */
 1193         unsigned long   rss_flags;
 1194 
 1195         /* Number hash bits to take into an account */
 1196         uint8_t         rss_result_mask;
 1197 
 1198         /* Indirection table */
 1199         uint8_t         ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
 1200 
 1201         /* RSS hash values */
 1202         uint32_t                rss_key[10];
 1203 
 1204         /* valid only iff ECORE_RSS_UPDATE_TOE is set */
 1205         uint16_t                toe_rss_bitmap;
 1206 };
 1207 
 1208 struct ecore_rss_config_obj {
 1209         struct ecore_raw_obj    raw;
 1210 
 1211         /* RSS engine to use */
 1212         uint8_t                 engine_id;
 1213 
 1214         /* Last configured indirection table */
 1215         uint8_t                 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
 1216 
 1217         /* flags for enabling 4-tupple hash on UDP */
 1218         uint8_t                 udp_rss_v4;
 1219         uint8_t                 udp_rss_v6;
 1220 
 1221         int (*config_rss)(struct bxe_softc *sc,
 1222                           struct ecore_config_rss_params *p);
 1223 };
 1224 
 1225 /*********************** Queue state update ***********************************/
 1226 
 1227 /* UPDATE command options */
 1228 enum {
 1229         ECORE_Q_UPDATE_IN_VLAN_REM,
 1230         ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
 1231         ECORE_Q_UPDATE_OUT_VLAN_REM,
 1232         ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
 1233         ECORE_Q_UPDATE_ANTI_SPOOF,
 1234         ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
 1235         ECORE_Q_UPDATE_ACTIVATE,
 1236         ECORE_Q_UPDATE_ACTIVATE_CHNG,
 1237         ECORE_Q_UPDATE_DEF_VLAN_EN,
 1238         ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
 1239         ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
 1240         ECORE_Q_UPDATE_SILENT_VLAN_REM,
 1241         ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
 1242         ECORE_Q_UPDATE_TX_SWITCHING,
 1243         ECORE_Q_UPDATE_PTP_PKTS_CHNG,
 1244         ECORE_Q_UPDATE_PTP_PKTS,
 1245 };
 1246 
 1247 /* Allowed Queue states */
 1248 enum ecore_q_state {
 1249         ECORE_Q_STATE_RESET,
 1250         ECORE_Q_STATE_INITIALIZED,
 1251         ECORE_Q_STATE_ACTIVE,
 1252         ECORE_Q_STATE_MULTI_COS,
 1253         ECORE_Q_STATE_MCOS_TERMINATED,
 1254         ECORE_Q_STATE_INACTIVE,
 1255         ECORE_Q_STATE_STOPPED,
 1256         ECORE_Q_STATE_TERMINATED,
 1257         ECORE_Q_STATE_FLRED,
 1258         ECORE_Q_STATE_MAX,
 1259 };
 1260 
 1261 /* Allowed Queue states */
 1262 enum ecore_q_logical_state {
 1263         ECORE_Q_LOGICAL_STATE_ACTIVE,
 1264         ECORE_Q_LOGICAL_STATE_STOPPED,
 1265 };
 1266 
 1267 /* Allowed commands */
 1268 enum ecore_queue_cmd {
 1269         ECORE_Q_CMD_INIT,
 1270         ECORE_Q_CMD_SETUP,
 1271         ECORE_Q_CMD_SETUP_TX_ONLY,
 1272         ECORE_Q_CMD_DEACTIVATE,
 1273         ECORE_Q_CMD_ACTIVATE,
 1274         ECORE_Q_CMD_UPDATE,
 1275         ECORE_Q_CMD_UPDATE_TPA,
 1276         ECORE_Q_CMD_HALT,
 1277         ECORE_Q_CMD_CFC_DEL,
 1278         ECORE_Q_CMD_TERMINATE,
 1279         ECORE_Q_CMD_EMPTY,
 1280         ECORE_Q_CMD_MAX,
 1281 };
 1282 
 1283 /* queue SETUP + INIT flags */
 1284 enum {
 1285         ECORE_Q_FLG_TPA,
 1286         ECORE_Q_FLG_TPA_IPV6,
 1287         ECORE_Q_FLG_TPA_GRO,
 1288         ECORE_Q_FLG_STATS,
 1289         ECORE_Q_FLG_ZERO_STATS,
 1290         ECORE_Q_FLG_ACTIVE,
 1291         ECORE_Q_FLG_OV,
 1292         ECORE_Q_FLG_VLAN,
 1293         ECORE_Q_FLG_COS,
 1294         ECORE_Q_FLG_HC,
 1295         ECORE_Q_FLG_HC_EN,
 1296         ECORE_Q_FLG_DHC,
 1297         ECORE_Q_FLG_OOO,
 1298         ECORE_Q_FLG_FCOE,
 1299         ECORE_Q_FLG_LEADING_RSS,
 1300         ECORE_Q_FLG_MCAST,
 1301         ECORE_Q_FLG_DEF_VLAN,
 1302         ECORE_Q_FLG_TX_SWITCH,
 1303         ECORE_Q_FLG_TX_SEC,
 1304         ECORE_Q_FLG_ANTI_SPOOF,
 1305         ECORE_Q_FLG_SILENT_VLAN_REM,
 1306         ECORE_Q_FLG_FORCE_DEFAULT_PRI,
 1307         ECORE_Q_FLG_REFUSE_OUTBAND_VLAN,
 1308         ECORE_Q_FLG_PCSUM_ON_PKT,
 1309         ECORE_Q_FLG_TUN_INC_INNER_IP_ID
 1310 };
 1311 
 1312 /* Queue type options: queue type may be a combination of below. */
 1313 enum ecore_q_type {
 1314         ECORE_Q_TYPE_FWD,
 1315         /** TODO: Consider moving both these flags into the init()
 1316          *        ramrod params.
 1317          */
 1318         ECORE_Q_TYPE_HAS_RX,
 1319         ECORE_Q_TYPE_HAS_TX,
 1320 };
 1321 
 1322 #define ECORE_PRIMARY_CID_INDEX                 0
 1323 #define ECORE_MULTI_TX_COS_E1X                  3 /* QM only */
 1324 #define ECORE_MULTI_TX_COS_E2_E3A0              2
 1325 #define ECORE_MULTI_TX_COS_E3B0                 3
 1326 #define ECORE_MULTI_TX_COS                      3 /* Maximum possible */
 1327 #define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
 1328 /* DMAE channel to be used by FW for timesync workaroun. A driver that sends
 1329  * timesync-related ramrods must not use this DMAE command ID.
 1330  */
 1331 #define FW_DMAE_CMD_ID 6
 1332 
 1333 struct ecore_queue_init_params {
 1334         struct {
 1335                 unsigned long   flags;
 1336                 uint16_t                hc_rate;
 1337                 uint8_t         fw_sb_id;
 1338                 uint8_t         sb_cq_index;
 1339         } tx;
 1340 
 1341         struct {
 1342                 unsigned long   flags;
 1343                 uint16_t                hc_rate;
 1344                 uint8_t         fw_sb_id;
 1345                 uint8_t         sb_cq_index;
 1346         } rx;
 1347 
 1348         /* CID context in the host memory */
 1349         struct eth_context *cxts[ECORE_MULTI_TX_COS];
 1350 
 1351         /* maximum number of cos supported by hardware */
 1352         uint8_t max_cos;
 1353 };
 1354 
 1355 struct ecore_queue_terminate_params {
 1356         /* index within the tx_only cids of this queue object */
 1357         uint8_t cid_index;
 1358 };
 1359 
 1360 struct ecore_queue_cfc_del_params {
 1361         /* index within the tx_only cids of this queue object */
 1362         uint8_t cid_index;
 1363 };
 1364 
 1365 struct ecore_queue_update_params {
 1366         unsigned long   update_flags; /* ECORE_Q_UPDATE_XX bits */
 1367         uint16_t                def_vlan;
 1368         uint16_t                silent_removal_value;
 1369         uint16_t                silent_removal_mask;
 1370 /* index within the tx_only cids of this queue object */
 1371         uint8_t         cid_index;
 1372 };
 1373 
 1374 struct ecore_queue_update_tpa_params {
 1375         ecore_dma_addr_t sge_map;
 1376         uint8_t update_ipv4;
 1377         uint8_t update_ipv6;
 1378         uint8_t max_tpa_queues;
 1379         uint8_t max_sges_pkt;
 1380         uint8_t complete_on_both_clients;
 1381         uint8_t dont_verify_thr;
 1382         uint8_t tpa_mode;
 1383         uint8_t _pad;
 1384 
 1385         uint16_t sge_buff_sz;
 1386         uint16_t max_agg_sz;
 1387 
 1388         uint16_t sge_pause_thr_low;
 1389         uint16_t sge_pause_thr_high;
 1390 };
 1391 
 1392 struct rxq_pause_params {
 1393         uint16_t                bd_th_lo;
 1394         uint16_t                bd_th_hi;
 1395         uint16_t                rcq_th_lo;
 1396         uint16_t                rcq_th_hi;
 1397         uint16_t                sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */
 1398         uint16_t                sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */
 1399         uint16_t                pri_map;
 1400 };
 1401 
 1402 /* general */
 1403 struct ecore_general_setup_params {
 1404         /* valid iff ECORE_Q_FLG_STATS */
 1405         uint8_t         stat_id;
 1406 
 1407         uint8_t         spcl_id;
 1408         uint16_t                mtu;
 1409         uint8_t         cos;
 1410 
 1411         uint8_t         fp_hsi;
 1412 };
 1413 
 1414 struct ecore_rxq_setup_params {
 1415         /* dma */
 1416         ecore_dma_addr_t        dscr_map;
 1417         ecore_dma_addr_t        sge_map;
 1418         ecore_dma_addr_t        rcq_map;
 1419         ecore_dma_addr_t        rcq_np_map;
 1420 
 1421         uint16_t                drop_flags;
 1422         uint16_t                buf_sz;
 1423         uint8_t         fw_sb_id;
 1424         uint8_t         cl_qzone_id;
 1425 
 1426         /* valid iff ECORE_Q_FLG_TPA */
 1427         uint16_t                tpa_agg_sz;
 1428         uint16_t                sge_buf_sz;
 1429         uint8_t         max_sges_pkt;
 1430         uint8_t         max_tpa_queues;
 1431         uint8_t         rss_engine_id;
 1432 
 1433         /* valid iff ECORE_Q_FLG_MCAST */
 1434         uint8_t         mcast_engine_id;
 1435 
 1436         uint8_t         cache_line_log;
 1437 
 1438         uint8_t         sb_cq_index;
 1439 
 1440         /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
 1441         uint16_t silent_removal_value;
 1442         uint16_t silent_removal_mask;
 1443 };
 1444 
 1445 struct ecore_txq_setup_params {
 1446         /* dma */
 1447         ecore_dma_addr_t        dscr_map;
 1448 
 1449         uint8_t         fw_sb_id;
 1450         uint8_t         sb_cq_index;
 1451         uint8_t         cos;            /* valid iff ECORE_Q_FLG_COS */
 1452         uint16_t                traffic_type;
 1453         /* equals to the leading rss client id, used for TX classification*/
 1454         uint8_t         tss_leading_cl_id;
 1455 
 1456         /* valid iff ECORE_Q_FLG_DEF_VLAN */
 1457         uint16_t                default_vlan;
 1458 };
 1459 
 1460 struct ecore_queue_setup_params {
 1461         struct ecore_general_setup_params gen_params;
 1462         struct ecore_txq_setup_params txq_params;
 1463         struct ecore_rxq_setup_params rxq_params;
 1464         struct rxq_pause_params pause_params;
 1465         unsigned long flags;
 1466 };
 1467 
 1468 struct ecore_queue_setup_tx_only_params {
 1469         struct ecore_general_setup_params       gen_params;
 1470         struct ecore_txq_setup_params           txq_params;
 1471         unsigned long                           flags;
 1472         /* index within the tx_only cids of this queue object */
 1473         uint8_t                                 cid_index;
 1474 };
 1475 
 1476 struct ecore_queue_state_params {
 1477         struct ecore_queue_sp_obj *q_obj;
 1478 
 1479         /* Current command */
 1480         enum ecore_queue_cmd cmd;
 1481 
 1482         /* may have RAMROD_COMP_WAIT set only */
 1483         unsigned long ramrod_flags;
 1484 
 1485         /* Params according to the current command */
 1486         union {
 1487                 struct ecore_queue_update_params        update;
 1488                 struct ecore_queue_update_tpa_params    update_tpa;
 1489                 struct ecore_queue_setup_params         setup;
 1490                 struct ecore_queue_init_params          init;
 1491                 struct ecore_queue_setup_tx_only_params tx_only;
 1492                 struct ecore_queue_terminate_params     terminate;
 1493                 struct ecore_queue_cfc_del_params       cfc_del;
 1494         } params;
 1495 };
 1496 
 1497 struct ecore_viflist_params {
 1498         uint8_t echo_res;
 1499         uint8_t func_bit_map_res;
 1500 };
 1501 
 1502 struct ecore_queue_sp_obj {
 1503         uint32_t                cids[ECORE_MULTI_TX_COS];
 1504         uint8_t         cl_id;
 1505         uint8_t         func_id;
 1506 
 1507         /* number of traffic classes supported by queue.
 1508          * The primary connection of the queue supports the first traffic
 1509          * class. Any further traffic class is supported by a tx-only
 1510          * connection.
 1511          *
 1512          * Therefore max_cos is also a number of valid entries in the cids
 1513          * array.
 1514          */
 1515         uint8_t max_cos;
 1516         uint8_t num_tx_only, next_tx_only;
 1517 
 1518         enum ecore_q_state state, next_state;
 1519 
 1520         /* bits from enum ecore_q_type */
 1521         unsigned long   type;
 1522 
 1523         /* ECORE_Q_CMD_XX bits. This object implements "one
 1524          * pending" paradigm but for debug and tracing purposes it's
 1525          * more convenient to have different bits for different
 1526          * commands.
 1527          */
 1528         unsigned long   pending;
 1529 
 1530         /* Buffer to use as a ramrod data and its mapping */
 1531         void            *rdata;
 1532         ecore_dma_addr_t        rdata_mapping;
 1533 
 1534         /**
 1535          * Performs one state change according to the given parameters.
 1536          *
 1537          * @return 0 in case of success and negative value otherwise.
 1538          */
 1539         int (*send_cmd)(struct bxe_softc *sc,
 1540                         struct ecore_queue_state_params *params);
 1541 
 1542         /**
 1543          * Sets the pending bit according to the requested transition.
 1544          */
 1545         int (*set_pending)(struct ecore_queue_sp_obj *o,
 1546                            struct ecore_queue_state_params *params);
 1547 
 1548         /**
 1549          * Checks that the requested state transition is legal.
 1550          */
 1551         int (*check_transition)(struct bxe_softc *sc,
 1552                                 struct ecore_queue_sp_obj *o,
 1553                                 struct ecore_queue_state_params *params);
 1554 
 1555         /**
 1556          * Completes the pending command.
 1557          */
 1558         int (*complete_cmd)(struct bxe_softc *sc,
 1559                             struct ecore_queue_sp_obj *o,
 1560                             enum ecore_queue_cmd);
 1561 
 1562         int (*wait_comp)(struct bxe_softc *sc,
 1563                          struct ecore_queue_sp_obj *o,
 1564                          enum ecore_queue_cmd cmd);
 1565 };
 1566 
 1567 /********************** Function state update *********************************/
 1568 
 1569 /* UPDATE command options */
 1570 enum {
 1571         ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
 1572         ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
 1573         ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
 1574         ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
 1575         ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
 1576         ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
 1577         ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
 1578         ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
 1579         ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
 1580         ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
 1581         ECORE_F_UPDATE_TUNNEL_INNER_RSS,
 1582 };
 1583 
 1584 /* Allowed Function states */
 1585 enum ecore_func_state {
 1586         ECORE_F_STATE_RESET,
 1587         ECORE_F_STATE_INITIALIZED,
 1588         ECORE_F_STATE_STARTED,
 1589         ECORE_F_STATE_TX_STOPPED,
 1590         ECORE_F_STATE_MAX,
 1591 };
 1592 
 1593 /* Allowed Function commands */
 1594 enum ecore_func_cmd {
 1595         ECORE_F_CMD_HW_INIT,
 1596         ECORE_F_CMD_START,
 1597         ECORE_F_CMD_STOP,
 1598         ECORE_F_CMD_HW_RESET,
 1599         ECORE_F_CMD_AFEX_UPDATE,
 1600         ECORE_F_CMD_AFEX_VIFLISTS,
 1601         ECORE_F_CMD_TX_STOP,
 1602         ECORE_F_CMD_TX_START,
 1603         ECORE_F_CMD_SWITCH_UPDATE,
 1604         ECORE_F_CMD_SET_TIMESYNC,
 1605         ECORE_F_CMD_MAX,
 1606 };
 1607 
 1608 struct ecore_func_hw_init_params {
 1609         /* A load phase returned by MCP.
 1610          *
 1611          * May be:
 1612          *              FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
 1613          *              FW_MSG_CODE_DRV_LOAD_COMMON
 1614          *              FW_MSG_CODE_DRV_LOAD_PORT
 1615          *              FW_MSG_CODE_DRV_LOAD_FUNCTION
 1616          */
 1617         uint32_t load_phase;
 1618 };
 1619 
 1620 struct ecore_func_hw_reset_params {
 1621         /* A load phase returned by MCP.
 1622          *
 1623          * May be:
 1624          *              FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
 1625          *              FW_MSG_CODE_DRV_LOAD_COMMON
 1626          *              FW_MSG_CODE_DRV_LOAD_PORT
 1627          *              FW_MSG_CODE_DRV_LOAD_FUNCTION
 1628          */
 1629         uint32_t reset_phase;
 1630 };
 1631 
 1632 struct ecore_func_start_params {
 1633         /* Multi Function mode:
 1634          *      - Single Function
 1635          *      - Switch Dependent
 1636          *      - Switch Independent
 1637          */
 1638         uint16_t mf_mode;
 1639 
 1640         /* Switch Dependent mode outer VLAN tag */
 1641         uint16_t sd_vlan_tag;
 1642 
 1643         /* Function cos mode */
 1644         uint8_t network_cos_mode;
 1645 
 1646         /* UDP dest port for VXLAN */
 1647         uint16_t vxlan_dst_port;
 1648 
 1649         /* UDP dest port for Geneve */
 1650         uint16_t geneve_dst_port;
 1651 
 1652         /* Enable inner Rx classifications for L2GRE packets */
 1653         uint8_t inner_clss_l2gre;
 1654 
 1655         /* Enable inner Rx classifications for L2-Geneve packets */
 1656         uint8_t inner_clss_l2geneve;
 1657 
 1658         /* Enable inner Rx classification for vxlan packets */
 1659         uint8_t inner_clss_vxlan;
 1660 
 1661         /* Enable RSS according to inner header */
 1662         uint8_t inner_rss; 
 1663 
 1664         /** Allows accepting of packets failing MF classification, possibly
 1665          * only matching a given ethertype
 1666          */
 1667         uint8_t class_fail;
 1668         uint16_t class_fail_ethtype;
 1669 
 1670         /* Override priority of output packets */
 1671         uint8_t sd_vlan_force_pri;
 1672         uint8_t sd_vlan_force_pri_val;
 1673 
 1674         /* Replace vlan's ethertype */
 1675         uint16_t sd_vlan_eth_type;
 1676 
 1677         /* Prevent inner vlans from being added by FW */
 1678         uint8_t no_added_tags;
 1679 
 1680         /* Inner-to-Outer vlan priority mapping */
 1681         uint8_t c2s_pri[MAX_VLAN_PRIORITIES];
 1682         uint8_t c2s_pri_default;
 1683         uint8_t c2s_pri_valid;
 1684 };
 1685 
 1686 struct ecore_func_switch_update_params {
 1687         unsigned long changes; /* ECORE_F_UPDATE_XX bits */
 1688         uint16_t vlan;
 1689         uint16_t vlan_eth_type;
 1690         uint8_t vlan_force_prio;
 1691         uint16_t vxlan_dst_port;
 1692         uint16_t geneve_dst_port;
 1693 };
 1694 
 1695 struct ecore_func_afex_update_params {
 1696         uint16_t vif_id;
 1697         uint16_t afex_default_vlan;
 1698         uint8_t allowed_priorities;
 1699 };
 1700 
 1701 struct ecore_func_afex_viflists_params {
 1702         uint16_t vif_list_index;
 1703         uint8_t func_bit_map;
 1704         uint8_t afex_vif_list_command;
 1705         uint8_t func_to_clear;
 1706 };
 1707 
 1708 struct ecore_func_tx_start_params {
 1709         struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
 1710         uint8_t dcb_enabled;
 1711         uint8_t dcb_version;
 1712         uint8_t dont_add_pri_0;
 1713         uint8_t dcb_outer_pri[MAX_TRAFFIC_TYPES];
 1714 };
 1715 
 1716 struct ecore_func_set_timesync_params {
 1717         /* Reset, set or keep the current drift value */
 1718         uint8_t drift_adjust_cmd;
 1719         /* Dec, inc or keep the current offset */
 1720         uint8_t offset_cmd;
 1721         /* Drift value direction */
 1722         uint8_t add_sub_drift_adjust_value;
 1723         /* Drift, period and offset values to be used according to the commands
 1724          * above.
 1725          */
 1726         uint8_t drift_adjust_value;
 1727         uint32_t drift_adjust_period;
 1728         uint64_t offset_delta;
 1729 };
 1730 
 1731 struct ecore_func_state_params {
 1732         struct ecore_func_sp_obj *f_obj;
 1733 
 1734         /* Current command */
 1735         enum ecore_func_cmd cmd;
 1736 
 1737         /* may have RAMROD_COMP_WAIT set only */
 1738         unsigned long   ramrod_flags;
 1739 
 1740         /* Params according to the current command */
 1741         union {
 1742                 struct ecore_func_hw_init_params hw_init;
 1743                 struct ecore_func_hw_reset_params hw_reset;
 1744                 struct ecore_func_start_params start;
 1745                 struct ecore_func_switch_update_params switch_update;
 1746                 struct ecore_func_afex_update_params afex_update;
 1747                 struct ecore_func_afex_viflists_params afex_viflists;
 1748                 struct ecore_func_tx_start_params tx_start;
 1749                 struct ecore_func_set_timesync_params set_timesync;
 1750         } params;
 1751 };
 1752 
 1753 struct ecore_func_sp_drv_ops {
 1754         /* Init tool + runtime initialization:
 1755          *      - Common Chip
 1756          *      - Common (per Path)
 1757          *      - Port
 1758          *      - Function phases
 1759          */
 1760         int (*init_hw_cmn_chip)(struct bxe_softc *sc);
 1761         int (*init_hw_cmn)(struct bxe_softc *sc);
 1762         int (*init_hw_port)(struct bxe_softc *sc);
 1763         int (*init_hw_func)(struct bxe_softc *sc);
 1764 
 1765         /* Reset Function HW: Common, Port, Function phases. */
 1766         void (*reset_hw_cmn)(struct bxe_softc *sc);
 1767         void (*reset_hw_port)(struct bxe_softc *sc);
 1768         void (*reset_hw_func)(struct bxe_softc *sc);
 1769 
 1770         /* Init/Free GUNZIP resources */
 1771         int (*gunzip_init)(struct bxe_softc *sc);
 1772         void (*gunzip_end)(struct bxe_softc *sc);
 1773 
 1774         /* Prepare/Release FW resources */
 1775         int (*init_fw)(struct bxe_softc *sc);
 1776         void (*release_fw)(struct bxe_softc *sc);
 1777 };
 1778 
 1779 struct ecore_func_sp_obj {
 1780         enum ecore_func_state   state, next_state;
 1781 
 1782         /* ECORE_FUNC_CMD_XX bits. This object implements "one
 1783          * pending" paradigm but for debug and tracing purposes it's
 1784          * more convenient to have different bits for different
 1785          * commands.
 1786          */
 1787         unsigned long           pending;
 1788 
 1789         /* Buffer to use as a ramrod data and its mapping */
 1790         void                    *rdata;
 1791         ecore_dma_addr_t                rdata_mapping;
 1792 
 1793         /* Buffer to use as a afex ramrod data and its mapping.
 1794          * This can't be same rdata as above because afex ramrod requests
 1795          * can arrive to the object in parallel to other ramrod requests.
 1796          */
 1797         void                    *afex_rdata;
 1798         ecore_dma_addr_t                afex_rdata_mapping;
 1799 
 1800         /* this mutex validates that when pending flag is taken, the next
 1801          * ramrod to be sent will be the one set the pending bit
 1802          */
 1803         ECORE_MUTEX             one_pending_mutex;
 1804 
 1805         /* Driver interface */
 1806         struct ecore_func_sp_drv_ops    *drv;
 1807 
 1808         /**
 1809          * Performs one state change according to the given parameters.
 1810          *
 1811          * @return 0 in case of success and negative value otherwise.
 1812          */
 1813         int (*send_cmd)(struct bxe_softc *sc,
 1814                         struct ecore_func_state_params *params);
 1815 
 1816         /**
 1817          * Checks that the requested state transition is legal.
 1818          */
 1819         int (*check_transition)(struct bxe_softc *sc,
 1820                                 struct ecore_func_sp_obj *o,
 1821                                 struct ecore_func_state_params *params);
 1822 
 1823         /**
 1824          * Completes the pending command.
 1825          */
 1826         int (*complete_cmd)(struct bxe_softc *sc,
 1827                             struct ecore_func_sp_obj *o,
 1828                             enum ecore_func_cmd cmd);
 1829 
 1830         int (*wait_comp)(struct bxe_softc *sc, struct ecore_func_sp_obj *o,
 1831                          enum ecore_func_cmd cmd);
 1832 };
 1833 
 1834 /********************** Interfaces ********************************************/
 1835 /* Queueable objects set */
 1836 union ecore_qable_obj {
 1837         struct ecore_vlan_mac_obj vlan_mac;
 1838 };
 1839 /************** Function state update *********/
 1840 void ecore_init_func_obj(struct bxe_softc *sc,
 1841                          struct ecore_func_sp_obj *obj,
 1842                          void *rdata, ecore_dma_addr_t rdata_mapping,
 1843                          void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
 1844                          struct ecore_func_sp_drv_ops *drv_iface);
 1845 
 1846 int ecore_func_state_change(struct bxe_softc *sc,
 1847                             struct ecore_func_state_params *params);
 1848 
 1849 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
 1850                                            struct ecore_func_sp_obj *o);
 1851 /******************* Queue State **************/
 1852 void ecore_init_queue_obj(struct bxe_softc *sc,
 1853                           struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids,
 1854                           uint8_t cid_cnt, uint8_t func_id, void *rdata,
 1855                           ecore_dma_addr_t rdata_mapping, unsigned long type);
 1856 
 1857 int ecore_queue_state_change(struct bxe_softc *sc,
 1858                              struct ecore_queue_state_params *params);
 1859 
 1860 int ecore_get_q_logical_state(struct bxe_softc *sc,
 1861                                struct ecore_queue_sp_obj *obj);
 1862 
 1863 /********************* VLAN-MAC ****************/
 1864 void ecore_init_mac_obj(struct bxe_softc *sc,
 1865                         struct ecore_vlan_mac_obj *mac_obj,
 1866                         uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 1867                         ecore_dma_addr_t rdata_mapping, int state,
 1868                         unsigned long *pstate, ecore_obj_type type,
 1869                         struct ecore_credit_pool_obj *macs_pool);
 1870 
 1871 void ecore_init_vlan_obj(struct bxe_softc *sc,
 1872                          struct ecore_vlan_mac_obj *vlan_obj,
 1873                          uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 1874                          ecore_dma_addr_t rdata_mapping, int state,
 1875                          unsigned long *pstate, ecore_obj_type type,
 1876                          struct ecore_credit_pool_obj *vlans_pool);
 1877 
 1878 void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
 1879                              struct ecore_vlan_mac_obj *vlan_mac_obj,
 1880                              uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 1881                              ecore_dma_addr_t rdata_mapping, int state,
 1882                              unsigned long *pstate, ecore_obj_type type,
 1883                              struct ecore_credit_pool_obj *macs_pool,
 1884                              struct ecore_credit_pool_obj *vlans_pool);
 1885 
 1886 void ecore_init_vxlan_fltr_obj(struct bxe_softc *sc,
 1887                                struct ecore_vlan_mac_obj *vlan_mac_obj,
 1888                                uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 1889                                ecore_dma_addr_t rdata_mapping, int state,
 1890                                unsigned long *pstate, ecore_obj_type type,
 1891                                struct ecore_credit_pool_obj *macs_pool,
 1892                                struct ecore_credit_pool_obj *vlans_pool);
 1893 
 1894 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
 1895                                         struct ecore_vlan_mac_obj *o);
 1896 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
 1897                                   struct ecore_vlan_mac_obj *o);
 1898 int ecore_vlan_mac_h_write_lock(struct bxe_softc *sc,
 1899                                 struct ecore_vlan_mac_obj *o);
 1900 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
 1901                                           struct ecore_vlan_mac_obj *o);
 1902 int ecore_config_vlan_mac(struct bxe_softc *sc,
 1903                            struct ecore_vlan_mac_ramrod_params *p);
 1904 
 1905 int ecore_vlan_mac_move(struct bxe_softc *sc,
 1906                         struct ecore_vlan_mac_ramrod_params *p,
 1907                         struct ecore_vlan_mac_obj *dest_o);
 1908 
 1909 /********************* RX MODE ****************/
 1910 
 1911 void ecore_init_rx_mode_obj(struct bxe_softc *sc,
 1912                             struct ecore_rx_mode_obj *o);
 1913 
 1914 /**
 1915  * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
 1916  *
 1917  * @p: Command parameters
 1918  *
 1919  * Return: 0 - if operation was successful and there is no pending completions,
 1920  *         positive number - if there are pending completions,
 1921  *         negative - if there were errors
 1922  */
 1923 int ecore_config_rx_mode(struct bxe_softc *sc,
 1924                          struct ecore_rx_mode_ramrod_params *p);
 1925 
 1926 /****************** MULTICASTS ****************/
 1927 
 1928 void ecore_init_mcast_obj(struct bxe_softc *sc,
 1929                           struct ecore_mcast_obj *mcast_obj,
 1930                           uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
 1931                           uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
 1932                           int state, unsigned long *pstate,
 1933                           ecore_obj_type type);
 1934 
 1935 /**
 1936  * ecore_config_mcast - Configure multicast MACs list.
 1937  *
 1938  * @cmd: command to execute: BNX2X_MCAST_CMD_X
 1939  *
 1940  * May configure a new list
 1941  * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up
 1942  * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current
 1943  * configuration, continue to execute the pending commands
 1944  * (ECORE_MCAST_CMD_CONT).
 1945  *
 1946  * If previous command is still pending or if number of MACs to
 1947  * configure is more that maximum number of MACs in one command,
 1948  * the current command will be enqueued to the tail of the
 1949  * pending commands list.
 1950  *
 1951  * Return: 0 is operation was successful and there are no pending completions,
 1952  *         negative if there were errors, positive if there are pending
 1953  *         completions.
 1954  */
 1955 int ecore_config_mcast(struct bxe_softc *sc,
 1956                        struct ecore_mcast_ramrod_params *p,
 1957                        enum ecore_mcast_cmd cmd);
 1958 
 1959 /****************** CREDIT POOL ****************/
 1960 void ecore_init_mac_credit_pool(struct bxe_softc *sc,
 1961                                 struct ecore_credit_pool_obj *p, uint8_t func_id,
 1962                                 uint8_t func_num);
 1963 void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
 1964                                  struct ecore_credit_pool_obj *p, uint8_t func_id,
 1965                                  uint8_t func_num);
 1966 void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
 1967                             int base, int credit);
 1968 
 1969 /****************** RSS CONFIGURATION ****************/
 1970 void ecore_init_rss_config_obj(struct bxe_softc *sc,
 1971                                struct ecore_rss_config_obj *rss_obj,
 1972                                uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
 1973                                void *rdata, ecore_dma_addr_t rdata_mapping,
 1974                                int state, unsigned long *pstate,
 1975                                ecore_obj_type type);
 1976 
 1977 /**
 1978  * ecore_config_rss - Updates RSS configuration according to provided parameters
 1979  *
 1980  * Return: 0 in case of success
 1981  */
 1982 int ecore_config_rss(struct bxe_softc *sc,
 1983                      struct ecore_config_rss_params *p);
 1984 
 1985 /**
 1986  * ecore_get_rss_ind_table - Return the current ind_table configuration.
 1987  *
 1988  * @ind_table: buffer to fill with the current indirection
 1989  *                  table content. Should be at least
 1990  *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
 1991  */
 1992 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
 1993                              uint8_t *ind_table);
 1994 
 1995 #define PF_MAC_CREDIT_E2(sc, func_num)                                  \
 1996         ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_MAC_CREDIT_CNT) /   \
 1997          func_num + GET_NUM_VFS_PER_PF(sc) * VF_MAC_CREDIT_CNT)
 1998 
 1999 #define PF_VLAN_CREDIT_E2(sc, func_num)                                  \
 2000         ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_VLAN_CREDIT_CNT) / \
 2001          func_num + GET_NUM_VFS_PER_PF(sc) * VF_VLAN_CREDIT_CNT)
 2002 
 2003 
 2004 #endif /* ECORE_SP_H */
 2005 

Cache object: a589ee5a1215801b8938ec0fb9dd938b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.