The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bxe/ecore_sp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  *
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
   20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   26  * THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include "bxe.h"
   33 #include "ecore_init.h"
   34 
   35 
   36 
   37 
   38 /**** Exe Queue interfaces ****/
   39 
   40 /**
   41  * ecore_exe_queue_init - init the Exe Queue object
   42  *
   43  * @o:          pointer to the object
   44  * @exe_len:    length
   45  * @owner:      pointer to the owner
   46  * @validate:   validate function pointer
   47  * @optimize:   optimize function pointer
   48  * @exec:       execute function pointer
   49  * @get:        get function pointer
   50  */
   51 static inline void ecore_exe_queue_init(struct bxe_softc *sc,
   52                                         struct ecore_exe_queue_obj *o,
   53                                         int exe_len,
   54                                         union ecore_qable_obj *owner,
   55                                         exe_q_validate validate,
   56                                         exe_q_remove remove,
   57                                         exe_q_optimize optimize,
   58                                         exe_q_execute exec,
   59                                         exe_q_get get)
   60 {
   61         ECORE_MEMSET(o, 0, sizeof(*o));
   62 
   63         ECORE_LIST_INIT(&o->exe_queue);
   64         ECORE_LIST_INIT(&o->pending_comp);
   65 
   66         ECORE_SPIN_LOCK_INIT(&o->lock, sc);
   67 
   68         o->exe_chunk_len = exe_len;
   69         o->owner         = owner;
   70 
   71         /* Owner specific callbacks */
   72         o->validate      = validate;
   73         o->remove        = remove;
   74         o->optimize      = optimize;
   75         o->execute       = exec;
   76         o->get           = get;
   77 
   78         ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
   79                   exe_len);
   80 }
   81 
   82 static inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
   83                                              struct ecore_exeq_elem *elem)
   84 {
   85         ECORE_MSG(sc, "Deleting an exe_queue element\n");
   86         ECORE_FREE(sc, elem, sizeof(*elem));
   87 }
   88 
   89 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
   90 {
   91         struct ecore_exeq_elem *elem;
   92         int cnt = 0;
   93 
   94         ECORE_SPIN_LOCK_BH(&o->lock);
   95 
   96         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
   97                                   struct ecore_exeq_elem)
   98                 cnt++;
   99 
  100         ECORE_SPIN_UNLOCK_BH(&o->lock);
  101 
  102         return cnt;
  103 }
  104 
  105 /**
  106  * ecore_exe_queue_add - add a new element to the execution queue
  107  *
  108  * @sc:         driver handle
  109  * @o:          queue
  110  * @cmd:        new command to add
  111  * @restore:    true - do not optimize the command
  112  *
  113  * If the element is optimized or is illegal, frees it.
  114  */
  115 static inline int ecore_exe_queue_add(struct bxe_softc *sc,
  116                                       struct ecore_exe_queue_obj *o,
  117                                       struct ecore_exeq_elem *elem,
  118                                       bool restore)
  119 {
  120         int rc;
  121 
  122         ECORE_SPIN_LOCK_BH(&o->lock);
  123 
  124         if (!restore) {
  125                 /* Try to cancel this element queue */
  126                 rc = o->optimize(sc, o->owner, elem);
  127                 if (rc)
  128                         goto free_and_exit;
  129 
  130                 /* Check if this request is ok */
  131                 rc = o->validate(sc, o->owner, elem);
  132                 if (rc) {
  133                         ECORE_MSG(sc, "Preamble failed: %d\n", rc);
  134                         goto free_and_exit;
  135                 }
  136         }
  137 
  138         /* If so, add it to the execution queue */
  139         ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
  140 
  141         ECORE_SPIN_UNLOCK_BH(&o->lock);
  142 
  143         return ECORE_SUCCESS;
  144 
  145 free_and_exit:
  146         ecore_exe_queue_free_elem(sc, elem);
  147 
  148         ECORE_SPIN_UNLOCK_BH(&o->lock);
  149 
  150         return rc;
  151 }
  152 
  153 static inline void __ecore_exe_queue_reset_pending(
  154         struct bxe_softc *sc,
  155         struct ecore_exe_queue_obj *o)
  156 {
  157         struct ecore_exeq_elem *elem;
  158 
  159         while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
  160                 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
  161                                               struct ecore_exeq_elem,
  162                                               link);
  163 
  164                 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
  165                 ecore_exe_queue_free_elem(sc, elem);
  166         }
  167 }
  168 
  169 /**
  170  * ecore_exe_queue_step - execute one execution chunk atomically
  171  *
  172  * @sc:                 driver handle
  173  * @o:                  queue
  174  * @ramrod_flags:       flags
  175  *
  176  * (Should be called while holding the exe_queue->lock).
  177  */
  178 static inline int ecore_exe_queue_step(struct bxe_softc *sc,
  179                                        struct ecore_exe_queue_obj *o,
  180                                        unsigned long *ramrod_flags)
  181 {
  182         struct ecore_exeq_elem *elem, spacer;
  183         int cur_len = 0, rc;
  184 
  185         ECORE_MEMSET(&spacer, 0, sizeof(spacer));
  186 
  187         /* Next step should not be performed until the current is finished,
  188          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
  189          * properly clear object internals without sending any command to the FW
  190          * which also implies there won't be any completion to clear the
  191          * 'pending' list.
  192          */
  193         if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
  194                 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
  195                         ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
  196                         __ecore_exe_queue_reset_pending(sc, o);
  197                 } else {
  198                         return ECORE_PENDING;
  199                 }
  200         }
  201 
  202         /* Run through the pending commands list and create a next
  203          * execution chunk.
  204          */
  205         while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
  206                 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
  207                                               struct ecore_exeq_elem,
  208                                               link);
  209                 ECORE_DBG_BREAK_IF(!elem->cmd_len);
  210 
  211                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
  212                         cur_len += elem->cmd_len;
  213                         /* Prevent from both lists being empty when moving an
  214                          * element. This will allow the call of
  215                          * ecore_exe_queue_empty() without locking.
  216                          */
  217                         ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
  218                         mb();
  219                         ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
  220                         ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
  221                         ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
  222                 } else
  223                         break;
  224         }
  225 
  226         /* Sanity check */
  227         if (!cur_len)
  228                 return ECORE_SUCCESS;
  229 
  230         rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
  231         if (rc < 0)
  232                 /* In case of an error return the commands back to the queue
  233                  *  and reset the pending_comp.
  234                  */
  235                 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
  236         else if (!rc)
  237                 /* If zero is returned, means there are no outstanding pending
  238                  * completions and we may dismiss the pending list.
  239                  */
  240                 __ecore_exe_queue_reset_pending(sc, o);
  241 
  242         return rc;
  243 }
  244 
  245 static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
  246 {
  247         bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
  248 
  249         /* Don't reorder!!! */
  250         mb();
  251 
  252         return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
  253 }
  254 
  255 static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
  256         struct bxe_softc *sc)
  257 {
  258         ECORE_MSG(sc, "Allocating a new exe_queue element\n");
  259         return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
  260                             sc);
  261 }
  262 
  263 /************************ raw_obj functions ***********************************/
  264 static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
  265 {
  266         /*
  267      * !! converts the value returned by ECORE_TEST_BIT such that it
  268      * is guaranteed not to be truncated regardless of bool definition.
  269          *
  270          * Note we cannot simply define the function's return value type
  271      * to match the type returned by ECORE_TEST_BIT, as it varies by
  272      * platform/implementation.
  273          */
  274 
  275         return !!ECORE_TEST_BIT(o->state, o->pstate);
  276 }
  277 
  278 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
  279 {
  280         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
  281         ECORE_CLEAR_BIT(o->state, o->pstate);
  282         ECORE_SMP_MB_AFTER_CLEAR_BIT();
  283 }
  284 
  285 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
  286 {
  287         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
  288         ECORE_SET_BIT(o->state, o->pstate);
  289         ECORE_SMP_MB_AFTER_CLEAR_BIT();
  290 }
  291 
  292 /**
  293  * ecore_state_wait - wait until the given bit(state) is cleared
  294  *
  295  * @sc:         device handle
  296  * @state:      state which is to be cleared
  297  * @state_p:    state buffer
  298  *
  299  */
  300 static inline int ecore_state_wait(struct bxe_softc *sc, int state,
  301                                    unsigned long *pstate)
  302 {
  303         /* can take a while if any port is running */
  304         int cnt = 5000;
  305 
  306 
  307         if (CHIP_REV_IS_EMUL(sc))
  308                 cnt *= 20;
  309 
  310         ECORE_MSG(sc, "waiting for state to become %d\n", state);
  311 
  312         ECORE_MIGHT_SLEEP();
  313         while (cnt--) {
  314                 if (!ECORE_TEST_BIT(state, pstate)) {
  315 #ifdef ECORE_STOP_ON_ERROR
  316                         ECORE_MSG(sc, "exit  (cnt %d)\n", 5000 - cnt);
  317 #endif
  318                         return ECORE_SUCCESS;
  319                 }
  320 
  321                 ECORE_WAIT(sc, delay_us);
  322 
  323                 if (sc->panic)
  324                         return ECORE_IO;
  325         }
  326 
  327         /* timeout! */
  328         ECORE_ERR("timeout waiting for state %d\n", state);
  329 #ifdef ECORE_STOP_ON_ERROR
  330         ecore_panic();
  331 #endif
  332 
  333         return ECORE_TIMEOUT;
  334 }
  335 
  336 static int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
  337 {
  338         return ecore_state_wait(sc, raw->state, raw->pstate);
  339 }
  340 
  341 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
  342 /* credit handling callbacks */
  343 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
  344 {
  345         struct ecore_credit_pool_obj *mp = o->macs_pool;
  346 
  347         ECORE_DBG_BREAK_IF(!mp);
  348 
  349         return mp->get_entry(mp, offset);
  350 }
  351 
  352 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
  353 {
  354         struct ecore_credit_pool_obj *mp = o->macs_pool;
  355 
  356         ECORE_DBG_BREAK_IF(!mp);
  357 
  358         return mp->get(mp, 1);
  359 }
  360 
  361 static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
  362 {
  363         struct ecore_credit_pool_obj *vp = o->vlans_pool;
  364 
  365         ECORE_DBG_BREAK_IF(!vp);
  366 
  367         return vp->get_entry(vp, offset);
  368 }
  369 
  370 static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
  371 {
  372         struct ecore_credit_pool_obj *vp = o->vlans_pool;
  373 
  374         ECORE_DBG_BREAK_IF(!vp);
  375 
  376         return vp->get(vp, 1);
  377 }
  378 
  379 static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
  380 {
  381         struct ecore_credit_pool_obj *mp = o->macs_pool;
  382         struct ecore_credit_pool_obj *vp = o->vlans_pool;
  383 
  384         if (!mp->get(mp, 1))
  385                 return FALSE;
  386 
  387         if (!vp->get(vp, 1)) {
  388                 mp->put(mp, 1);
  389                 return FALSE;
  390         }
  391 
  392         return TRUE;
  393 }
  394 
  395 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
  396 {
  397         struct ecore_credit_pool_obj *mp = o->macs_pool;
  398 
  399         return mp->put_entry(mp, offset);
  400 }
  401 
  402 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
  403 {
  404         struct ecore_credit_pool_obj *mp = o->macs_pool;
  405 
  406         return mp->put(mp, 1);
  407 }
  408 
  409 static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
  410 {
  411         struct ecore_credit_pool_obj *vp = o->vlans_pool;
  412 
  413         return vp->put_entry(vp, offset);
  414 }
  415 
  416 static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
  417 {
  418         struct ecore_credit_pool_obj *vp = o->vlans_pool;
  419 
  420         return vp->put(vp, 1);
  421 }
  422 
  423 static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
  424 {
  425         struct ecore_credit_pool_obj *mp = o->macs_pool;
  426         struct ecore_credit_pool_obj *vp = o->vlans_pool;
  427 
  428         if (!mp->put(mp, 1))
  429                 return FALSE;
  430 
  431         if (!vp->put(vp, 1)) {
  432                 mp->get(mp, 1);
  433                 return FALSE;
  434         }
  435 
  436         return TRUE;
  437 }
  438 
  439 /**
  440  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
  441  * head list.
  442  *
  443  * @sc:         device handle
  444  * @o:          vlan_mac object
  445  *
  446  * @details: Non-blocking implementation; should be called under execution
  447  *           queue lock.
  448  */
  449 static int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
  450                                             struct ecore_vlan_mac_obj *o)
  451 {
  452         if (o->head_reader) {
  453                 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
  454                 return ECORE_BUSY;
  455         }
  456 
  457         ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
  458         return ECORE_SUCCESS;
  459 }
  460 
  461 /**
  462  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
  463  * which wasn't able to run due to a taken lock on vlan mac head list.
  464  *
  465  * @sc:         device handle
  466  * @o:          vlan_mac object
  467  *
  468  * @details Should be called under execution queue lock; notice it might release
  469  *          and reclaim it during its run.
  470  */
  471 static void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
  472                                             struct ecore_vlan_mac_obj *o)
  473 {
  474         int rc;
  475         unsigned long ramrod_flags = o->saved_ramrod_flags;
  476 
  477         ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
  478                   ramrod_flags);
  479         o->head_exe_request = FALSE;
  480         o->saved_ramrod_flags = 0;
  481         rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
  482         if ((rc != ECORE_SUCCESS) && (rc != ECORE_PENDING)) {
  483                 ECORE_ERR("execution of pending commands failed with rc %d\n",
  484                           rc);
  485 #ifdef ECORE_STOP_ON_ERROR
  486                 ecore_panic();
  487 #endif
  488         }
  489 }
  490 
  491 /**
  492  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
  493  * called due to vlan mac head list lock being taken.
  494  *
  495  * @sc:                 device handle
  496  * @o:                  vlan_mac object
  497  * @ramrod_flags:       ramrod flags of missed execution
  498  *
  499  * @details Should be called under execution queue lock.
  500  */
  501 static void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
  502                                     struct ecore_vlan_mac_obj *o,
  503                                     unsigned long ramrod_flags)
  504 {
  505         o->head_exe_request = TRUE;
  506         o->saved_ramrod_flags = ramrod_flags;
  507         ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
  508                   ramrod_flags);
  509 }
  510 
  511 /**
  512  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
  513  *
  514  * @sc:                 device handle
  515  * @o:                  vlan_mac object
  516  *
  517  * @details Should be called under execution queue lock. Notice if a pending
  518  *          execution exists, it would perform it - possibly releasing and
  519  *          reclaiming the execution queue lock.
  520  */
  521 static void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
  522                                             struct ecore_vlan_mac_obj *o)
  523 {
  524         /* It's possible a new pending execution was added since this writer
  525          * executed. If so, execute again. [Ad infinitum]
  526          */
  527         while(o->head_exe_request) {
  528                 ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
  529                 __ecore_vlan_mac_h_exec_pending(sc, o);
  530         }
  531 }
  532 
  533 /**
  534  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
  535  *
  536  * @sc:                 device handle
  537  * @o:                  vlan_mac object
  538  *
  539  * @details Notice if a pending execution exists, it would perform it -
  540  *          possibly releasing and reclaiming the execution queue lock.
  541  */
  542 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
  543                                    struct ecore_vlan_mac_obj *o)
  544 {
  545         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
  546         __ecore_vlan_mac_h_write_unlock(sc, o);
  547         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
  548 }
  549 
  550 /**
  551  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
  552  *
  553  * @sc:                 device handle
  554  * @o:                  vlan_mac object
  555  *
  556  * @details Should be called under the execution queue lock. May sleep. May
  557  *          release and reclaim execution queue lock during its run.
  558  */
  559 static int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
  560                                         struct ecore_vlan_mac_obj *o)
  561 {
  562         /* If we got here, we're holding lock --> no WRITER exists */
  563         o->head_reader++;
  564         ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
  565                   o->head_reader);
  566 
  567         return ECORE_SUCCESS;
  568 }
  569 
  570 /**
  571  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
  572  *
  573  * @sc:                 device handle
  574  * @o:                  vlan_mac object
  575  *
  576  * @details May sleep. Claims and releases execution queue lock during its run.
  577  */
  578 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
  579                                struct ecore_vlan_mac_obj *o)
  580 {
  581         int rc;
  582 
  583         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
  584         rc = __ecore_vlan_mac_h_read_lock(sc, o);
  585         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
  586 
  587         return rc;
  588 }
  589 
  590 /**
  591  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
  592  *
  593  * @sc:                 device handle
  594  * @o:                  vlan_mac object
  595  *
  596  * @details Should be called under execution queue lock. Notice if a pending
  597  *          execution exists, it would be performed if this was the last
  598  *          reader. possibly releasing and reclaiming the execution queue lock.
  599  */
  600 static void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
  601                                           struct ecore_vlan_mac_obj *o)
  602 {
  603         if (!o->head_reader) {
  604                 ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
  605 #ifdef ECORE_STOP_ON_ERROR
  606                 ecore_panic();
  607 #endif
  608         } else {
  609                 o->head_reader--;
  610                 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
  611                           o->head_reader);
  612         }
  613 
  614         /* It's possible a new pending execution was added, and that this reader
  615          * was last - if so we need to execute the command.
  616          */
  617         if (!o->head_reader && o->head_exe_request) {
  618                 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
  619 
  620                 /* Writer release will do the trick */
  621                 __ecore_vlan_mac_h_write_unlock(sc, o);
  622         }
  623 }
  624 
  625 /**
  626  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
  627  *
  628  * @sc:                 device handle
  629  * @o:                  vlan_mac object
  630  *
  631  * @details Notice if a pending execution exists, it would be performed if this
  632  *          was the last reader. Claims and releases the execution queue lock
  633  *          during its run.
  634  */
  635 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
  636                                   struct ecore_vlan_mac_obj *o)
  637 {
  638         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
  639         __ecore_vlan_mac_h_read_unlock(sc, o);
  640         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
  641 }
  642 
  643 /**
  644  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
  645  *
  646  * @sc:                 device handle
  647  * @o:                  vlan_mac object
  648  * @n:                  number of elements to get
  649  * @base:               base address for element placement
  650  * @stride:             stride between elements (in bytes)
  651  */
  652 static int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
  653                                  int n, uint8_t *base, uint8_t stride, uint8_t size)
  654 {
  655         struct ecore_vlan_mac_registry_elem *pos;
  656         uint8_t *next = base;
  657         int counter = 0;
  658         int read_lock;
  659 
  660         ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
  661         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
  662         if (read_lock != ECORE_SUCCESS)
  663                 ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
  664 
  665         /* traverse list */
  666         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  667                                   struct ecore_vlan_mac_registry_elem) {
  668                 if (counter < n) {
  669                         ECORE_MEMCPY(next, &pos->u, size);
  670                         counter++;
  671                         ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
  672                                   counter, next);
  673                         next += stride + size;
  674                 }
  675         }
  676 
  677         if (read_lock == ECORE_SUCCESS) {
  678                 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
  679                 ecore_vlan_mac_h_read_unlock(sc, o);
  680         }
  681 
  682         return counter * ETH_ALEN;
  683 }
  684 
  685 /* check_add() callbacks */
  686 static int ecore_check_mac_add(struct bxe_softc *sc,
  687                                struct ecore_vlan_mac_obj *o,
  688                                union ecore_classification_ramrod_data *data)
  689 {
  690         struct ecore_vlan_mac_registry_elem *pos;
  691 
  692         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
  693 
  694         if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
  695                 return ECORE_INVAL;
  696 
  697         /* Check if a requested MAC already exists */
  698         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  699                                   struct ecore_vlan_mac_registry_elem)
  700                 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
  701                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
  702                         return ECORE_EXISTS;
  703 
  704         return ECORE_SUCCESS;
  705 }
  706 
  707 static int ecore_check_vlan_add(struct bxe_softc *sc,
  708                                 struct ecore_vlan_mac_obj *o,
  709                                 union ecore_classification_ramrod_data *data)
  710 {
  711         struct ecore_vlan_mac_registry_elem *pos;
  712 
  713         ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
  714 
  715         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  716                                   struct ecore_vlan_mac_registry_elem)
  717                 if (data->vlan.vlan == pos->u.vlan.vlan)
  718                         return ECORE_EXISTS;
  719 
  720         return ECORE_SUCCESS;
  721 }
  722 
  723 static int ecore_check_vlan_mac_add(struct bxe_softc *sc,
  724                                     struct ecore_vlan_mac_obj *o,
  725                                    union ecore_classification_ramrod_data *data)
  726 {
  727         struct ecore_vlan_mac_registry_elem *pos;
  728 
  729         ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
  730                   data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
  731 
  732         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  733                                   struct ecore_vlan_mac_registry_elem)
  734                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
  735                     (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
  736                                   ETH_ALEN)) &&
  737                     (data->vlan_mac.is_inner_mac ==
  738                      pos->u.vlan_mac.is_inner_mac))
  739                         return ECORE_EXISTS;
  740 
  741         return ECORE_SUCCESS;
  742 }
  743 
  744 static int ecore_check_vxlan_fltr_add(struct bxe_softc *sc,
  745                                 struct ecore_vlan_mac_obj *o,
  746                                 union ecore_classification_ramrod_data *data)
  747 {
  748         struct ecore_vlan_mac_registry_elem *pos;
  749 
  750         ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for ADD command\n",
  751                   data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
  752 
  753         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  754                                   struct ecore_vlan_mac_registry_elem)
  755                 if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
  756                                pos->u.vxlan_fltr.innermac,
  757                                ETH_ALEN)) &&
  758                              (data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
  759                         return ECORE_EXISTS;
  760 
  761         return ECORE_SUCCESS;
  762 }
  763 
  764 /* check_del() callbacks */
  765 static struct ecore_vlan_mac_registry_elem *
  766         ecore_check_mac_del(struct bxe_softc *sc,
  767                             struct ecore_vlan_mac_obj *o,
  768                             union ecore_classification_ramrod_data *data)
  769 {
  770         struct ecore_vlan_mac_registry_elem *pos;
  771 
  772         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
  773 
  774         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  775                                   struct ecore_vlan_mac_registry_elem)
  776                 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
  777                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
  778                         return pos;
  779 
  780         return NULL;
  781 }
  782 
  783 static struct ecore_vlan_mac_registry_elem *
  784         ecore_check_vlan_del(struct bxe_softc *sc,
  785                              struct ecore_vlan_mac_obj *o,
  786                              union ecore_classification_ramrod_data *data)
  787 {
  788         struct ecore_vlan_mac_registry_elem *pos;
  789 
  790         ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
  791 
  792         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  793                                   struct ecore_vlan_mac_registry_elem)
  794                 if (data->vlan.vlan == pos->u.vlan.vlan)
  795                         return pos;
  796 
  797         return NULL;
  798 }
  799 
  800 static struct ecore_vlan_mac_registry_elem *
  801         ecore_check_vlan_mac_del(struct bxe_softc *sc,
  802                                  struct ecore_vlan_mac_obj *o,
  803                                  union ecore_classification_ramrod_data *data)
  804 {
  805         struct ecore_vlan_mac_registry_elem *pos;
  806 
  807         ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
  808                   data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
  809 
  810         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  811                                   struct ecore_vlan_mac_registry_elem)
  812                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
  813                     (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
  814                              ETH_ALEN)) &&
  815                     (data->vlan_mac.is_inner_mac ==
  816                      pos->u.vlan_mac.is_inner_mac))
  817                         return pos;
  818 
  819         return NULL;
  820 }
  821 
  822 static struct ecore_vlan_mac_registry_elem *
  823         ecore_check_vxlan_fltr_del
  824                         (struct bxe_softc *sc,
  825                         struct ecore_vlan_mac_obj *o,
  826                         union ecore_classification_ramrod_data *data)
  827 {
  828         struct ecore_vlan_mac_registry_elem *pos;
  829 
  830         ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for DEL command\n",
  831                   data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
  832 
  833         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
  834                                   struct ecore_vlan_mac_registry_elem)
  835                 if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
  836                                pos->u.vxlan_fltr.innermac,
  837                                ETH_ALEN)) &&
  838                                (data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
  839                         return pos;
  840 
  841         return NULL;
  842 }
  843 
  844 /* check_move() callback */
  845 static bool ecore_check_move(struct bxe_softc *sc,
  846                              struct ecore_vlan_mac_obj *src_o,
  847                              struct ecore_vlan_mac_obj *dst_o,
  848                              union ecore_classification_ramrod_data *data)
  849 {
  850         struct ecore_vlan_mac_registry_elem *pos;
  851         int rc;
  852 
  853         /* Check if we can delete the requested configuration from the first
  854          * object.
  855          */
  856         pos = src_o->check_del(sc, src_o, data);
  857 
  858         /*  check if configuration can be added */
  859         rc = dst_o->check_add(sc, dst_o, data);
  860 
  861         /* If this classification can not be added (is already set)
  862          * or can't be deleted - return an error.
  863          */
  864         if (rc || !pos)
  865                 return FALSE;
  866 
  867         return TRUE;
  868 }
  869 
  870 static bool ecore_check_move_always_err(
  871         struct bxe_softc *sc,
  872         struct ecore_vlan_mac_obj *src_o,
  873         struct ecore_vlan_mac_obj *dst_o,
  874         union ecore_classification_ramrod_data *data)
  875 {
  876         return FALSE;
  877 }
  878 
  879 static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
  880 {
  881         struct ecore_raw_obj *raw = &o->raw;
  882         uint8_t rx_tx_flag = 0;
  883 
  884         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
  885             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
  886                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
  887 
  888         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
  889             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
  890                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
  891 
  892         return rx_tx_flag;
  893 }
  894 
  895 void ecore_set_mac_in_nig(struct bxe_softc *sc,
  896                           bool add, unsigned char *dev_addr, int index)
  897 {
  898         uint32_t wb_data[2];
  899         uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
  900                          NIG_REG_LLH0_FUNC_MEM;
  901 
  902         if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
  903                 return;
  904 
  905         if (index > ECORE_LLH_CAM_MAX_PF_LINE)
  906                 return;
  907 
  908         ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
  909                   (add ? "ADD" : "DELETE"), index);
  910 
  911         if (add) {
  912                 /* LLH_FUNC_MEM is a uint64_t WB register */
  913                 reg_offset += 8*index;
  914 
  915                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
  916                               (dev_addr[4] <<  8) |  dev_addr[5]);
  917                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
  918 
  919                 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
  920         }
  921 
  922         REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
  923                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
  924 }
  925 
  926 /**
  927  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
  928  *
  929  * @sc:         device handle
  930  * @o:          queue for which we want to configure this rule
  931  * @add:        if TRUE the command is an ADD command, DEL otherwise
  932  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
  933  * @hdr:        pointer to a header to setup
  934  *
  935  */
  936 static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
  937         struct ecore_vlan_mac_obj *o, bool add, int opcode,
  938         struct eth_classify_cmd_header *hdr)
  939 {
  940         struct ecore_raw_obj *raw = &o->raw;
  941 
  942         hdr->client_id = raw->cl_id;
  943         hdr->func_id = raw->func_id;
  944 
  945         /* Rx or/and Tx (internal switching) configuration ? */
  946         hdr->cmd_general_data |=
  947                 ecore_vlan_mac_get_rx_tx_flag(o);
  948 
  949         if (add)
  950                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
  951 
  952         hdr->cmd_general_data |=
  953                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
  954 }
  955 
  956 /**
  957  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
  958  *
  959  * @cid:        connection id
  960  * @type:       ECORE_FILTER_XXX_PENDING
  961  * @hdr:        pointer to header to setup
  962  * @rule_cnt:
  963  *
  964  * currently we always configure one rule and echo field to contain a CID and an
  965  * opcode type.
  966  */
  967 static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
  968                                 struct eth_classify_header *hdr, int rule_cnt)
  969 {
  970         hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
  971                                 (type << ECORE_SWCID_SHIFT));
  972         hdr->rule_cnt = (uint8_t)rule_cnt;
  973 }
  974 
  975 /* hw_config() callbacks */
  976 static void ecore_set_one_mac_e2(struct bxe_softc *sc,
  977                                  struct ecore_vlan_mac_obj *o,
  978                                  struct ecore_exeq_elem *elem, int rule_idx,
  979                                  int cam_offset)
  980 {
  981         struct ecore_raw_obj *raw = &o->raw;
  982         struct eth_classify_rules_ramrod_data *data =
  983                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
  984         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
  985         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
  986         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
  987         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
  988         uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
  989 
  990         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
  991          * relevant. In addition, current implementation is tuned for a
  992          * single ETH MAC.
  993          *
  994          * When multiple unicast ETH MACs PF configuration in switch
  995          * independent mode is required (NetQ, multiple netdev MACs,
  996          * etc.), consider better utilisation of 8 per function MAC
  997          * entries in the LLH register. There is also
  998          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
  999          * total number of CAM entries to 16.
 1000          *
 1001          * Currently we won't configure NIG for MACs other than a primary ETH
 1002          * MAC and iSCSI L2 MAC.
 1003          *
 1004          * If this MAC is moving from one Queue to another, no need to change
 1005          * NIG configuration.
 1006          */
 1007         if (cmd != ECORE_VLAN_MAC_MOVE) {
 1008                 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
 1009                         ecore_set_mac_in_nig(sc, add, mac,
 1010                                              ECORE_LLH_CAM_ISCSI_ETH_LINE);
 1011                 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
 1012                         ecore_set_mac_in_nig(sc, add, mac,
 1013                                              ECORE_LLH_CAM_ETH_LINE);
 1014         }
 1015 
 1016         /* Reset the ramrod data buffer for the first rule */
 1017         if (rule_idx == 0)
 1018                 ECORE_MEMSET(data, 0, sizeof(*data));
 1019 
 1020         /* Setup a command header */
 1021         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
 1022                                       &rule_entry->mac.header);
 1023 
 1024         ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
 1025                   (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
 1026 
 1027         /* Set a MAC itself */
 1028         ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
 1029                               &rule_entry->mac.mac_mid,
 1030                               &rule_entry->mac.mac_lsb, mac);
 1031         rule_entry->mac.inner_mac =
 1032                 ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
 1033 
 1034         /* MOVE: Add a rule that will add this MAC to the target Queue */
 1035         if (cmd == ECORE_VLAN_MAC_MOVE) {
 1036                 rule_entry++;
 1037                 rule_cnt++;
 1038 
 1039                 /* Setup ramrod data */
 1040                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
 1041                                         elem->cmd_data.vlan_mac.target_obj,
 1042                                               TRUE, CLASSIFY_RULE_OPCODE_MAC,
 1043                                               &rule_entry->mac.header);
 1044 
 1045                 /* Set a MAC itself */
 1046                 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
 1047                                       &rule_entry->mac.mac_mid,
 1048                                       &rule_entry->mac.mac_lsb, mac);
 1049                 rule_entry->mac.inner_mac =
 1050                         ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.
 1051                                        u.mac.is_inner_mac);
 1052         }
 1053 
 1054         /* Set the ramrod data header */
 1055         /* TODO: take this to the higher level in order to prevent multiple
 1056                  writing */
 1057         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
 1058                                         rule_cnt);
 1059 }
 1060 
 1061 /**
 1062  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
 1063  *
 1064  * @sc:         device handle
 1065  * @o:          queue
 1066  * @type:
 1067  * @cam_offset: offset in cam memory
 1068  * @hdr:        pointer to a header to setup
 1069  *
 1070  * E1/E1H
 1071  */
 1072 static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
 1073         struct ecore_vlan_mac_obj *o, int type, int cam_offset,
 1074         struct mac_configuration_hdr *hdr)
 1075 {
 1076         struct ecore_raw_obj *r = &o->raw;
 1077 
 1078         hdr->length = 1;
 1079         hdr->offset = (uint8_t)cam_offset;
 1080         hdr->client_id = ECORE_CPU_TO_LE16(0xff);
 1081         hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
 1082                                 (type << ECORE_SWCID_SHIFT));
 1083 }
 1084 
 1085 static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
 1086         struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
 1087         uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
 1088 {
 1089         struct ecore_raw_obj *r = &o->raw;
 1090         uint32_t cl_bit_vec = (1 << r->cl_id);
 1091 
 1092         cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
 1093         cfg_entry->pf_id = r->func_id;
 1094         cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
 1095 
 1096         if (add) {
 1097                 ECORE_SET_FLAG(cfg_entry->flags,
 1098                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 1099                                T_ETH_MAC_COMMAND_SET);
 1100                 ECORE_SET_FLAG(cfg_entry->flags,
 1101                                MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
 1102                                opcode);
 1103 
 1104                 /* Set a MAC in a ramrod data */
 1105                 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
 1106                                       &cfg_entry->middle_mac_addr,
 1107                                       &cfg_entry->lsb_mac_addr, mac);
 1108         } else
 1109                 ECORE_SET_FLAG(cfg_entry->flags,
 1110                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 1111                                T_ETH_MAC_COMMAND_INVALIDATE);
 1112 }
 1113 
 1114 static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
 1115         struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
 1116         uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
 1117 {
 1118         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
 1119         struct ecore_raw_obj *raw = &o->raw;
 1120 
 1121         ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
 1122                                          &config->hdr);
 1123         ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
 1124                                          cfg_entry);
 1125 
 1126         ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
 1127                   (add ? "setting" : "clearing"),
 1128                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
 1129 }
 1130 
 1131 /**
 1132  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
 1133  *
 1134  * @sc:         device handle
 1135  * @o:          ecore_vlan_mac_obj
 1136  * @elem:       ecore_exeq_elem
 1137  * @rule_idx:   rule_idx
 1138  * @cam_offset: cam_offset
 1139  */
 1140 static void ecore_set_one_mac_e1x(struct bxe_softc *sc,
 1141                                   struct ecore_vlan_mac_obj *o,
 1142                                   struct ecore_exeq_elem *elem, int rule_idx,
 1143                                   int cam_offset)
 1144 {
 1145         struct ecore_raw_obj *raw = &o->raw;
 1146         struct mac_configuration_cmd *config =
 1147                 (struct mac_configuration_cmd *)(raw->rdata);
 1148         /* 57710 and 57711 do not support MOVE command,
 1149          * so it's either ADD or DEL
 1150          */
 1151         bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
 1152                 TRUE : FALSE;
 1153 
 1154         /* Reset the ramrod data buffer */
 1155         ECORE_MEMSET(config, 0, sizeof(*config));
 1156 
 1157         ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
 1158                                      cam_offset, add,
 1159                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
 1160                                      ETH_VLAN_FILTER_ANY_VLAN, config);
 1161 }
 1162 
 1163 static void ecore_set_one_vlan_e2(struct bxe_softc *sc,
 1164                                   struct ecore_vlan_mac_obj *o,
 1165                                   struct ecore_exeq_elem *elem, int rule_idx,
 1166                                   int cam_offset)
 1167 {
 1168         struct ecore_raw_obj *raw = &o->raw;
 1169         struct eth_classify_rules_ramrod_data *data =
 1170                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
 1171         int rule_cnt = rule_idx + 1;
 1172         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
 1173         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
 1174         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
 1175         uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
 1176 
 1177         /* Reset the ramrod data buffer for the first rule */
 1178         if (rule_idx == 0)
 1179                 ECORE_MEMSET(data, 0, sizeof(*data));
 1180 
 1181         /* Set a rule header */
 1182         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
 1183                                       &rule_entry->vlan.header);
 1184 
 1185         ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
 1186                   vlan);
 1187 
 1188         /* Set a VLAN itself */
 1189         rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
 1190 
 1191         /* MOVE: Add a rule that will add this MAC to the target Queue */
 1192         if (cmd == ECORE_VLAN_MAC_MOVE) {
 1193                 rule_entry++;
 1194                 rule_cnt++;
 1195 
 1196                 /* Setup ramrod data */
 1197                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
 1198                                         elem->cmd_data.vlan_mac.target_obj,
 1199                                               TRUE, CLASSIFY_RULE_OPCODE_VLAN,
 1200                                               &rule_entry->vlan.header);
 1201 
 1202                 /* Set a VLAN itself */
 1203                 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
 1204         }
 1205 
 1206         /* Set the ramrod data header */
 1207         /* TODO: take this to the higher level in order to prevent multiple
 1208                  writing */
 1209         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
 1210                                         rule_cnt);
 1211 }
 1212 
 1213 static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
 1214                                       struct ecore_vlan_mac_obj *o,
 1215                                       struct ecore_exeq_elem *elem,
 1216                                       int rule_idx, int cam_offset)
 1217 {
 1218         struct ecore_raw_obj *raw = &o->raw;
 1219         struct eth_classify_rules_ramrod_data *data =
 1220                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
 1221         int rule_cnt = rule_idx + 1;
 1222         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
 1223         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
 1224         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
 1225         uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
 1226         uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
 1227 
 1228         /* Reset the ramrod data buffer for the first rule */
 1229         if (rule_idx == 0)
 1230                 ECORE_MEMSET(data, 0, sizeof(*data));
 1231 
 1232         /* Set a rule header */
 1233         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
 1234                                       &rule_entry->pair.header);
 1235 
 1236         /* Set VLAN and MAC themselves */
 1237         rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
 1238         ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
 1239                               &rule_entry->pair.mac_mid,
 1240                               &rule_entry->pair.mac_lsb, mac);
 1241         rule_entry->pair.inner_mac =
 1242                         elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
 1243         /* MOVE: Add a rule that will add this MAC to the target Queue */
 1244         if (cmd == ECORE_VLAN_MAC_MOVE) {
 1245                 rule_entry++;
 1246                 rule_cnt++;
 1247 
 1248                 /* Setup ramrod data */
 1249                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
 1250                                         elem->cmd_data.vlan_mac.target_obj,
 1251                                               TRUE, CLASSIFY_RULE_OPCODE_PAIR,
 1252                                               &rule_entry->pair.header);
 1253 
 1254                 /* Set a VLAN itself */
 1255                 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
 1256                 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
 1257                                       &rule_entry->pair.mac_mid,
 1258                                       &rule_entry->pair.mac_lsb, mac);
 1259                 rule_entry->pair.inner_mac =
 1260                         elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
 1261         }
 1262 
 1263         /* Set the ramrod data header */
 1264         /* TODO: take this to the higher level in order to prevent multiple
 1265                  writing */
 1266         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
 1267                                         rule_cnt);
 1268 }
 1269 
 1270 static void ecore_set_one_vxlan_fltr_e2(struct bxe_softc *sc,
 1271                                                 struct ecore_vlan_mac_obj *o,
 1272                                                 struct ecore_exeq_elem *elem,
 1273                                                 int rule_idx, int cam_offset)
 1274 {
 1275         struct ecore_raw_obj *raw = &o->raw;
 1276         struct eth_classify_rules_ramrod_data *data =
 1277                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
 1278         int rule_cnt = rule_idx + 1;
 1279         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
 1280         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
 1281         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
 1282         uint32_t vni = elem->cmd_data.vlan_mac.u.vxlan_fltr.vni;
 1283         uint8_t *mac = elem->cmd_data.vlan_mac.u.vxlan_fltr.innermac;
 1284 
 1285         /* Reset the ramrod data buffer for the first rule */
 1286         if (rule_idx == 0)
 1287                 ECORE_MEMSET(data, 0, sizeof(*data));
 1288 
 1289         /* Set a rule header */
 1290         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add,
 1291                                       CLASSIFY_RULE_OPCODE_IMAC_VNI,
 1292                                       &rule_entry->imac_vni.header);
 1293 
 1294         /* Set VLAN and MAC themselves */
 1295         rule_entry->imac_vni.vni = vni;
 1296         ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
 1297                               &rule_entry->imac_vni.imac_mid,
 1298                               &rule_entry->imac_vni.imac_lsb, mac);
 1299 
 1300         /* MOVE: Add a rule that will add this MAC to the target Queue */
 1301         if (cmd == ECORE_VLAN_MAC_MOVE) {
 1302                 rule_entry++;
 1303                 rule_cnt++;
 1304 
 1305                 /* Setup ramrod data */
 1306                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
 1307                                               elem->cmd_data.vlan_mac.target_obj,
 1308                                               TRUE, CLASSIFY_RULE_OPCODE_IMAC_VNI,
 1309                                               &rule_entry->imac_vni.header);
 1310 
 1311                 /* Set a VLAN itself */
 1312                 rule_entry->imac_vni.vni = vni;
 1313                 ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
 1314                                       &rule_entry->imac_vni.imac_mid,
 1315                                       &rule_entry->imac_vni.imac_lsb, mac);
 1316         }
 1317 
 1318         /* Set the ramrod data header */
 1319         /* TODO: take this to the higher level in order to prevent multiple
 1320            * writing
 1321         */
 1322         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state,
 1323                                         &data->header, rule_cnt);
 1324 }
 1325 
 1326 /**
 1327  * ecore_set_one_vlan_mac_e1h -
 1328  *
 1329  * @sc:         device handle
 1330  * @o:          ecore_vlan_mac_obj
 1331  * @elem:       ecore_exeq_elem
 1332  * @rule_idx:   rule_idx
 1333  * @cam_offset: cam_offset
 1334  */
 1335 static void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
 1336                                        struct ecore_vlan_mac_obj *o,
 1337                                        struct ecore_exeq_elem *elem,
 1338                                        int rule_idx, int cam_offset)
 1339 {
 1340         struct ecore_raw_obj *raw = &o->raw;
 1341         struct mac_configuration_cmd *config =
 1342                 (struct mac_configuration_cmd *)(raw->rdata);
 1343         /* 57710 and 57711 do not support MOVE command,
 1344          * so it's either ADD or DEL
 1345          */
 1346         bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
 1347                 TRUE : FALSE;
 1348 
 1349         /* Reset the ramrod data buffer */
 1350         ECORE_MEMSET(config, 0, sizeof(*config));
 1351 
 1352         ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
 1353                                      cam_offset, add,
 1354                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
 1355                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
 1356                                      ETH_VLAN_FILTER_CLASSIFY, config);
 1357 }
 1358 
 1359 #define list_next_entry(pos, member) \
 1360         list_entry((pos)->member.next, typeof(*(pos)), member)
 1361 
 1362 /**
 1363  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
 1364  *
 1365  * @sc:         device handle
 1366  * @p:          command parameters
 1367  * @ppos:       pointer to the cookie
 1368  *
 1369  * reconfigure next MAC/VLAN/VLAN-MAC element from the
 1370  * previously configured elements list.
 1371  *
 1372  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
 1373  * into an account
 1374  *
 1375  * pointer to the cookie  - that should be given back in the next call to make
 1376  * function handle the next element. If *ppos is set to NULL it will restart the
 1377  * iterator. If returned *ppos == NULL this means that the last element has been
 1378  * handled.
 1379  *
 1380  */
 1381 static int ecore_vlan_mac_restore(struct bxe_softc *sc,
 1382                            struct ecore_vlan_mac_ramrod_params *p,
 1383                            struct ecore_vlan_mac_registry_elem **ppos)
 1384 {
 1385         struct ecore_vlan_mac_registry_elem *pos;
 1386         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
 1387 
 1388         /* If list is empty - there is nothing to do here */
 1389         if (ECORE_LIST_IS_EMPTY(&o->head)) {
 1390                 *ppos = NULL;
 1391                 return 0;
 1392         }
 1393 
 1394         /* make a step... */
 1395         if (*ppos == NULL)
 1396                 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
 1397                                             struct ecore_vlan_mac_registry_elem,
 1398                                                link);
 1399         else
 1400                 *ppos = ECORE_LIST_NEXT(*ppos, link,
 1401                                         struct ecore_vlan_mac_registry_elem);
 1402 
 1403         pos = *ppos;
 1404 
 1405         /* If it's the last step - return NULL */
 1406         if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
 1407                 *ppos = NULL;
 1408 
 1409         /* Prepare a 'user_req' */
 1410         ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
 1411 
 1412         /* Set the command */
 1413         p->user_req.cmd = ECORE_VLAN_MAC_ADD;
 1414 
 1415         /* Set vlan_mac_flags */
 1416         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
 1417 
 1418         /* Set a restore bit */
 1419         ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
 1420 
 1421         return ecore_config_vlan_mac(sc, p);
 1422 }
 1423 
 1424 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
 1425  * pointer to an element with a specific criteria and NULL if such an element
 1426  * hasn't been found.
 1427  */
 1428 static struct ecore_exeq_elem *ecore_exeq_get_mac(
 1429         struct ecore_exe_queue_obj *o,
 1430         struct ecore_exeq_elem *elem)
 1431 {
 1432         struct ecore_exeq_elem *pos;
 1433         struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
 1434 
 1435         /* Check pending for execution commands */
 1436         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
 1437                                   struct ecore_exeq_elem)
 1438                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
 1439                               sizeof(*data)) &&
 1440                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
 1441                         return pos;
 1442 
 1443         return NULL;
 1444 }
 1445 
 1446 static struct ecore_exeq_elem *ecore_exeq_get_vlan(
 1447         struct ecore_exe_queue_obj *o,
 1448         struct ecore_exeq_elem *elem)
 1449 {
 1450         struct ecore_exeq_elem *pos;
 1451         struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
 1452 
 1453         /* Check pending for execution commands */
 1454         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
 1455                                   struct ecore_exeq_elem)
 1456                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
 1457                               sizeof(*data)) &&
 1458                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
 1459                         return pos;
 1460 
 1461         return NULL;
 1462 }
 1463 
 1464 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
 1465         struct ecore_exe_queue_obj *o,
 1466         struct ecore_exeq_elem *elem)
 1467 {
 1468         struct ecore_exeq_elem *pos;
 1469         struct ecore_vlan_mac_ramrod_data *data =
 1470                 &elem->cmd_data.vlan_mac.u.vlan_mac;
 1471 
 1472         /* Check pending for execution commands */
 1473         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
 1474                                   struct ecore_exeq_elem)
 1475                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
 1476                               sizeof(*data)) &&
 1477                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
 1478                         return pos;
 1479 
 1480         return NULL;
 1481 }
 1482 
 1483 static struct ecore_exeq_elem *ecore_exeq_get_vxlan_fltr
 1484                         (struct ecore_exe_queue_obj *o,
 1485                         struct ecore_exeq_elem *elem)
 1486 {
 1487         struct ecore_exeq_elem *pos;
 1488         struct ecore_vxlan_fltr_ramrod_data *data =
 1489                 &elem->cmd_data.vlan_mac.u.vxlan_fltr;
 1490 
 1491         /* Check pending for execution commands */
 1492         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
 1493                                   struct ecore_exeq_elem)
 1494                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vxlan_fltr, data,
 1495                               sizeof(*data)) &&
 1496                               (pos->cmd_data.vlan_mac.cmd ==
 1497                               elem->cmd_data.vlan_mac.cmd))
 1498                         return pos;
 1499 
 1500         return NULL;
 1501 }
 1502 
 1503 /**
 1504  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
 1505  *
 1506  * @sc:         device handle
 1507  * @qo:         ecore_qable_obj
 1508  * @elem:       ecore_exeq_elem
 1509  *
 1510  * Checks that the requested configuration can be added. If yes and if
 1511  * requested, consume CAM credit.
 1512  *
 1513  * The 'validate' is run after the 'optimize'.
 1514  *
 1515  */
 1516 static inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
 1517                                               union ecore_qable_obj *qo,
 1518                                               struct ecore_exeq_elem *elem)
 1519 {
 1520         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
 1521         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
 1522         int rc;
 1523 
 1524         /* Check the registry */
 1525         rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
 1526         if (rc) {
 1527                 ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
 1528                 return rc;
 1529         }
 1530 
 1531         /* Check if there is a pending ADD command for this
 1532          * MAC/VLAN/VLAN-MAC. Return an error if there is.
 1533          */
 1534         if (exeq->get(exeq, elem)) {
 1535                 ECORE_MSG(sc, "There is a pending ADD command already\n");
 1536                 return ECORE_EXISTS;
 1537         }
 1538 
 1539         /* TODO: Check the pending MOVE from other objects where this
 1540          * object is a destination object.
 1541          */
 1542 
 1543         /* Consume the credit if not requested not to */
 1544         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
 1545                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
 1546             o->get_credit(o)))
 1547                 return ECORE_INVAL;
 1548 
 1549         return ECORE_SUCCESS;
 1550 }
 1551 
 1552 /**
 1553  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
 1554  *
 1555  * @sc:         device handle
 1556  * @qo:         quable object to check
 1557  * @elem:       element that needs to be deleted
 1558  *
 1559  * Checks that the requested configuration can be deleted. If yes and if
 1560  * requested, returns a CAM credit.
 1561  *
 1562  * The 'validate' is run after the 'optimize'.
 1563  */
 1564 static inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
 1565                                               union ecore_qable_obj *qo,
 1566                                               struct ecore_exeq_elem *elem)
 1567 {
 1568         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
 1569         struct ecore_vlan_mac_registry_elem *pos;
 1570         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
 1571         struct ecore_exeq_elem query_elem;
 1572 
 1573         /* If this classification can not be deleted (doesn't exist)
 1574          * - return a ECORE_EXIST.
 1575          */
 1576         pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
 1577         if (!pos) {
 1578                 ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
 1579                 return ECORE_EXISTS;
 1580         }
 1581 
 1582         /* Check if there are pending DEL or MOVE commands for this
 1583          * MAC/VLAN/VLAN-MAC. Return an error if so.
 1584          */
 1585         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
 1586 
 1587         /* Check for MOVE commands */
 1588         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
 1589         if (exeq->get(exeq, &query_elem)) {
 1590                 ECORE_ERR("There is a pending MOVE command already\n");
 1591                 return ECORE_INVAL;
 1592         }
 1593 
 1594         /* Check for DEL commands */
 1595         if (exeq->get(exeq, elem)) {
 1596                 ECORE_MSG(sc, "There is a pending DEL command already\n");
 1597                 return ECORE_EXISTS;
 1598         }
 1599 
 1600         /* Return the credit to the credit pool if not requested not to */
 1601         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
 1602                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
 1603             o->put_credit(o))) {
 1604                 ECORE_ERR("Failed to return a credit\n");
 1605                 return ECORE_INVAL;
 1606         }
 1607 
 1608         return ECORE_SUCCESS;
 1609 }
 1610 
 1611 /**
 1612  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
 1613  *
 1614  * @sc:         device handle
 1615  * @qo:         quable object to check (source)
 1616  * @elem:       element that needs to be moved
 1617  *
 1618  * Checks that the requested configuration can be moved. If yes and if
 1619  * requested, returns a CAM credit.
 1620  *
 1621  * The 'validate' is run after the 'optimize'.
 1622  */
 1623 static inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
 1624                                                union ecore_qable_obj *qo,
 1625                                                struct ecore_exeq_elem *elem)
 1626 {
 1627         struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
 1628         struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
 1629         struct ecore_exeq_elem query_elem;
 1630         struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
 1631         struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
 1632 
 1633         /* Check if we can perform this operation based on the current registry
 1634          * state.
 1635          */
 1636         if (!src_o->check_move(sc, src_o, dest_o,
 1637                                &elem->cmd_data.vlan_mac.u)) {
 1638                 ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
 1639                 return ECORE_INVAL;
 1640         }
 1641 
 1642         /* Check if there is an already pending DEL or MOVE command for the
 1643          * source object or ADD command for a destination object. Return an
 1644          * error if so.
 1645          */
 1646         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
 1647 
 1648         /* Check DEL on source */
 1649         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
 1650         if (src_exeq->get(src_exeq, &query_elem)) {
 1651                 ECORE_ERR("There is a pending DEL command on the source queue already\n");
 1652                 return ECORE_INVAL;
 1653         }
 1654 
 1655         /* Check MOVE on source */
 1656         if (src_exeq->get(src_exeq, elem)) {
 1657                 ECORE_MSG(sc, "There is a pending MOVE command already\n");
 1658                 return ECORE_EXISTS;
 1659         }
 1660 
 1661         /* Check ADD on destination */
 1662         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
 1663         if (dest_exeq->get(dest_exeq, &query_elem)) {
 1664                 ECORE_ERR("There is a pending ADD command on the destination queue already\n");
 1665                 return ECORE_INVAL;
 1666         }
 1667 
 1668         /* Consume the credit if not requested not to */
 1669         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
 1670                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
 1671             dest_o->get_credit(dest_o)))
 1672                 return ECORE_INVAL;
 1673 
 1674         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
 1675                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
 1676             src_o->put_credit(src_o))) {
 1677                 /* return the credit taken from dest... */
 1678                 dest_o->put_credit(dest_o);
 1679                 return ECORE_INVAL;
 1680         }
 1681 
 1682         return ECORE_SUCCESS;
 1683 }
 1684 
 1685 static int ecore_validate_vlan_mac(struct bxe_softc *sc,
 1686                                    union ecore_qable_obj *qo,
 1687                                    struct ecore_exeq_elem *elem)
 1688 {
 1689         switch (elem->cmd_data.vlan_mac.cmd) {
 1690         case ECORE_VLAN_MAC_ADD:
 1691                 return ecore_validate_vlan_mac_add(sc, qo, elem);
 1692         case ECORE_VLAN_MAC_DEL:
 1693                 return ecore_validate_vlan_mac_del(sc, qo, elem);
 1694         case ECORE_VLAN_MAC_MOVE:
 1695                 return ecore_validate_vlan_mac_move(sc, qo, elem);
 1696         default:
 1697                 return ECORE_INVAL;
 1698         }
 1699 }
 1700 
 1701 static int ecore_remove_vlan_mac(struct bxe_softc *sc,
 1702                                   union ecore_qable_obj *qo,
 1703                                   struct ecore_exeq_elem *elem)
 1704 {
 1705         int rc = 0;
 1706 
 1707         /* If consumption wasn't required, nothing to do */
 1708         if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
 1709                            &elem->cmd_data.vlan_mac.vlan_mac_flags))
 1710                 return ECORE_SUCCESS;
 1711 
 1712         switch (elem->cmd_data.vlan_mac.cmd) {
 1713         case ECORE_VLAN_MAC_ADD:
 1714         case ECORE_VLAN_MAC_MOVE:
 1715                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
 1716                 break;
 1717         case ECORE_VLAN_MAC_DEL:
 1718                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
 1719                 break;
 1720         default:
 1721                 return ECORE_INVAL;
 1722         }
 1723 
 1724         if (rc != TRUE)
 1725                 return ECORE_INVAL;
 1726 
 1727         return ECORE_SUCCESS;
 1728 }
 1729 
 1730 /**
 1731  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
 1732  *
 1733  * @sc:         device handle
 1734  * @o:          ecore_vlan_mac_obj
 1735  *
 1736  */
 1737 static int ecore_wait_vlan_mac(struct bxe_softc *sc,
 1738                                struct ecore_vlan_mac_obj *o)
 1739 {
 1740         int cnt = 5000, rc;
 1741         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
 1742         struct ecore_raw_obj *raw = &o->raw;
 1743 
 1744         while (cnt--) {
 1745                 /* Wait for the current command to complete */
 1746                 rc = raw->wait_comp(sc, raw);
 1747                 if (rc)
 1748                         return rc;
 1749 
 1750                 /* Wait until there are no pending commands */
 1751                 if (!ecore_exe_queue_empty(exeq))
 1752                         ECORE_WAIT(sc, 1000);
 1753                 else
 1754                         return ECORE_SUCCESS;
 1755         }
 1756 
 1757         return ECORE_TIMEOUT;
 1758 }
 1759 
 1760 static int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
 1761                                          struct ecore_vlan_mac_obj *o,
 1762                                          unsigned long *ramrod_flags)
 1763 {
 1764         int rc = ECORE_SUCCESS;
 1765 
 1766         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
 1767 
 1768         ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
 1769         rc = __ecore_vlan_mac_h_write_trylock(sc, o);
 1770 
 1771         if (rc != ECORE_SUCCESS) {
 1772                 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
 1773 
 1774                 /** Calling function should not diffrentiate between this case
 1775                  *  and the case in which there is already a pending ramrod
 1776                  */
 1777                 rc = ECORE_PENDING;
 1778         } else {
 1779                 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
 1780         }
 1781         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
 1782 
 1783         return rc;
 1784 }
 1785 
 1786 /**
 1787  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
 1788  *
 1789  * @sc:         device handle
 1790  * @o:          ecore_vlan_mac_obj
 1791  * @cqe:
 1792  * @cont:       if TRUE schedule next execution chunk
 1793  *
 1794  */
 1795 static int ecore_complete_vlan_mac(struct bxe_softc *sc,
 1796                                    struct ecore_vlan_mac_obj *o,
 1797                                    union event_ring_elem *cqe,
 1798                                    unsigned long *ramrod_flags)
 1799 {
 1800         struct ecore_raw_obj *r = &o->raw;
 1801         int rc;
 1802 
 1803         /* Clearing the pending list & raw state should be made
 1804          * atomically (as execution flow assumes they represent the same)
 1805          */
 1806         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
 1807 
 1808         /* Reset pending list */
 1809         __ecore_exe_queue_reset_pending(sc, &o->exe_queue);
 1810 
 1811         /* Clear pending */
 1812         r->clear_pending(r);
 1813 
 1814         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
 1815 
 1816         /* If ramrod failed this is most likely a SW bug */
 1817         if (cqe->message.error)
 1818                 return ECORE_INVAL;
 1819 
 1820         /* Run the next bulk of pending commands if requested */
 1821         if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
 1822                 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
 1823                 if (rc < 0)
 1824                         return rc;
 1825         }
 1826 
 1827         /* If there is more work to do return PENDING */
 1828         if (!ecore_exe_queue_empty(&o->exe_queue))
 1829                 return ECORE_PENDING;
 1830 
 1831         return ECORE_SUCCESS;
 1832 }
 1833 
 1834 /**
 1835  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
 1836  *
 1837  * @sc:         device handle
 1838  * @o:          ecore_qable_obj
 1839  * @elem:       ecore_exeq_elem
 1840  */
 1841 static int ecore_optimize_vlan_mac(struct bxe_softc *sc,
 1842                                    union ecore_qable_obj *qo,
 1843                                    struct ecore_exeq_elem *elem)
 1844 {
 1845         struct ecore_exeq_elem query, *pos;
 1846         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
 1847         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
 1848 
 1849         ECORE_MEMCPY(&query, elem, sizeof(query));
 1850 
 1851         switch (elem->cmd_data.vlan_mac.cmd) {
 1852         case ECORE_VLAN_MAC_ADD:
 1853                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
 1854                 break;
 1855         case ECORE_VLAN_MAC_DEL:
 1856                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
 1857                 break;
 1858         default:
 1859                 /* Don't handle anything other than ADD or DEL */
 1860                 return 0;
 1861         }
 1862 
 1863         /* If we found the appropriate element - delete it */
 1864         pos = exeq->get(exeq, &query);
 1865         if (pos) {
 1866 
 1867                 /* Return the credit of the optimized command */
 1868                 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
 1869                                      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
 1870                         if ((query.cmd_data.vlan_mac.cmd ==
 1871                              ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
 1872                                 ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
 1873                                 return ECORE_INVAL;
 1874                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
 1875                                 ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
 1876                                 return ECORE_INVAL;
 1877                         }
 1878                 }
 1879 
 1880                 ECORE_MSG(sc, "Optimizing %s command\n",
 1881                           (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
 1882                           "ADD" : "DEL");
 1883 
 1884                 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
 1885                 ecore_exe_queue_free_elem(sc, pos);
 1886                 return 1;
 1887         }
 1888 
 1889         return 0;
 1890 }
 1891 
 1892 /**
 1893  * ecore_vlan_mac_get_registry_elem - prepare a registry element
 1894  *
 1895  * @sc:   device handle
 1896  * @o:
 1897  * @elem:
 1898  * @restore:
 1899  * @re:
 1900  *
 1901  * prepare a registry element according to the current command request.
 1902  */
 1903 static inline int ecore_vlan_mac_get_registry_elem(
 1904         struct bxe_softc *sc,
 1905         struct ecore_vlan_mac_obj *o,
 1906         struct ecore_exeq_elem *elem,
 1907         bool restore,
 1908         struct ecore_vlan_mac_registry_elem **re)
 1909 {
 1910         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
 1911         struct ecore_vlan_mac_registry_elem *reg_elem;
 1912 
 1913         /* Allocate a new registry element if needed. */
 1914         if (!restore &&
 1915             ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
 1916                 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
 1917                 if (!reg_elem)
 1918                         return ECORE_NOMEM;
 1919 
 1920                 /* Get a new CAM offset */
 1921                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
 1922                         /* This shall never happen, because we have checked the
 1923                          * CAM availability in the 'validate'.
 1924                          */
 1925                         ECORE_DBG_BREAK_IF(1);
 1926                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
 1927                         return ECORE_INVAL;
 1928                 }
 1929 
 1930                 ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
 1931 
 1932                 /* Set a VLAN-MAC data */
 1933                 ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
 1934                           sizeof(reg_elem->u));
 1935 
 1936                 /* Copy the flags (needed for DEL and RESTORE flows) */
 1937                 reg_elem->vlan_mac_flags =
 1938                         elem->cmd_data.vlan_mac.vlan_mac_flags;
 1939         } else /* DEL, RESTORE */
 1940                 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
 1941 
 1942         *re = reg_elem;
 1943         return ECORE_SUCCESS;
 1944 }
 1945 
 1946 /**
 1947  * ecore_execute_vlan_mac - execute vlan mac command
 1948  *
 1949  * @sc:                 device handle
 1950  * @qo:
 1951  * @exe_chunk:
 1952  * @ramrod_flags:
 1953  *
 1954  * go and send a ramrod!
 1955  */
 1956 static int ecore_execute_vlan_mac(struct bxe_softc *sc,
 1957                                   union ecore_qable_obj *qo,
 1958                                   ecore_list_t *exe_chunk,
 1959                                   unsigned long *ramrod_flags)
 1960 {
 1961         struct ecore_exeq_elem *elem;
 1962         struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
 1963         struct ecore_raw_obj *r = &o->raw;
 1964         int rc, idx = 0;
 1965         bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
 1966         bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
 1967         struct ecore_vlan_mac_registry_elem *reg_elem;
 1968         enum ecore_vlan_mac_cmd cmd;
 1969 
 1970         /* If DRIVER_ONLY execution is requested, cleanup a registry
 1971          * and exit. Otherwise send a ramrod to FW.
 1972          */
 1973         if (!drv_only) {
 1974                 ECORE_DBG_BREAK_IF(r->check_pending(r));
 1975 
 1976                 /* Set pending */
 1977                 r->set_pending(r);
 1978 
 1979                 /* Fill the ramrod data */
 1980                 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
 1981                                           struct ecore_exeq_elem) {
 1982                         cmd = elem->cmd_data.vlan_mac.cmd;
 1983                         /* We will add to the target object in MOVE command, so
 1984                          * change the object for a CAM search.
 1985                          */
 1986                         if (cmd == ECORE_VLAN_MAC_MOVE)
 1987                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
 1988                         else
 1989                                 cam_obj = o;
 1990 
 1991                         rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
 1992                                                               elem, restore,
 1993                                                               &reg_elem);
 1994                         if (rc)
 1995                                 goto error_exit;
 1996 
 1997                         ECORE_DBG_BREAK_IF(!reg_elem);
 1998 
 1999                         /* Push a new entry into the registry */
 2000                         if (!restore &&
 2001                             ((cmd == ECORE_VLAN_MAC_ADD) ||
 2002                             (cmd == ECORE_VLAN_MAC_MOVE)))
 2003                                 ECORE_LIST_PUSH_HEAD(&reg_elem->link,
 2004                                                      &cam_obj->head);
 2005 
 2006                         /* Configure a single command in a ramrod data buffer */
 2007                         o->set_one_rule(sc, o, elem, idx,
 2008                                         reg_elem->cam_offset);
 2009 
 2010                         /* MOVE command consumes 2 entries in the ramrod data */
 2011                         if (cmd == ECORE_VLAN_MAC_MOVE)
 2012                                 idx += 2;
 2013                         else
 2014                                 idx++;
 2015                 }
 2016 
 2017                 /* No need for an explicit memory barrier here as long as we
 2018                  * ensure the ordering of writing to the SPQ element
 2019                  *  and updating of the SPQ producer which involves a memory
 2020                  * read. If the memory read is removed we will have to put a
 2021                  * full memory barrier there (inside ecore_sp_post()).
 2022                  */
 2023                 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
 2024                                    r->rdata_mapping,
 2025                                    ETH_CONNECTION_TYPE);
 2026                 if (rc)
 2027                         goto error_exit;
 2028         }
 2029 
 2030         /* Now, when we are done with the ramrod - clean up the registry */
 2031         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
 2032                                   struct ecore_exeq_elem) {
 2033                 cmd = elem->cmd_data.vlan_mac.cmd;
 2034                 if ((cmd == ECORE_VLAN_MAC_DEL) ||
 2035                     (cmd == ECORE_VLAN_MAC_MOVE)) {
 2036                         reg_elem = o->check_del(sc, o,
 2037                                                 &elem->cmd_data.vlan_mac.u);
 2038 
 2039                         ECORE_DBG_BREAK_IF(!reg_elem);
 2040 
 2041                         o->put_cam_offset(o, reg_elem->cam_offset);
 2042                         ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
 2043                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
 2044                 }
 2045         }
 2046 
 2047         if (!drv_only)
 2048                 return ECORE_PENDING;
 2049         else
 2050                 return ECORE_SUCCESS;
 2051 
 2052 error_exit:
 2053         r->clear_pending(r);
 2054 
 2055         /* Cleanup a registry in case of a failure */
 2056         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
 2057                                   struct ecore_exeq_elem) {
 2058                 cmd = elem->cmd_data.vlan_mac.cmd;
 2059 
 2060                 if (cmd == ECORE_VLAN_MAC_MOVE)
 2061                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
 2062                 else
 2063                         cam_obj = o;
 2064 
 2065                 /* Delete all newly added above entries */
 2066                 if (!restore &&
 2067                     ((cmd == ECORE_VLAN_MAC_ADD) ||
 2068                     (cmd == ECORE_VLAN_MAC_MOVE))) {
 2069                         reg_elem = o->check_del(sc, cam_obj,
 2070                                                 &elem->cmd_data.vlan_mac.u);
 2071                         if (reg_elem) {
 2072                                 ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
 2073                                                         &cam_obj->head);
 2074                                 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
 2075                         }
 2076                 }
 2077         }
 2078 
 2079         return rc;
 2080 }
 2081 
 2082 static inline int ecore_vlan_mac_push_new_cmd(
 2083         struct bxe_softc *sc,
 2084         struct ecore_vlan_mac_ramrod_params *p)
 2085 {
 2086         struct ecore_exeq_elem *elem;
 2087         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
 2088         bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
 2089 
 2090         /* Allocate the execution queue element */
 2091         elem = ecore_exe_queue_alloc_elem(sc);
 2092         if (!elem)
 2093                 return ECORE_NOMEM;
 2094 
 2095         /* Set the command 'length' */
 2096         switch (p->user_req.cmd) {
 2097         case ECORE_VLAN_MAC_MOVE:
 2098                 elem->cmd_len = 2;
 2099                 break;
 2100         default:
 2101                 elem->cmd_len = 1;
 2102         }
 2103 
 2104         /* Fill the object specific info */
 2105         ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
 2106 
 2107         /* Try to add a new command to the pending list */
 2108         return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
 2109 }
 2110 
 2111 /**
 2112  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
 2113  *
 2114  * @sc:   device handle
 2115  * @p:
 2116  *
 2117  */
 2118 int ecore_config_vlan_mac(struct bxe_softc *sc,
 2119                            struct ecore_vlan_mac_ramrod_params *p)
 2120 {
 2121         int rc = ECORE_SUCCESS;
 2122         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
 2123         unsigned long *ramrod_flags = &p->ramrod_flags;
 2124         bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
 2125         struct ecore_raw_obj *raw = &o->raw;
 2126 
 2127         /*
 2128          * Add new elements to the execution list for commands that require it.
 2129          */
 2130         if (!cont) {
 2131                 rc = ecore_vlan_mac_push_new_cmd(sc, p);
 2132                 if (rc)
 2133                         return rc;
 2134         }
 2135 
 2136         /* If nothing will be executed further in this iteration we want to
 2137          * return PENDING if there are pending commands
 2138          */
 2139         if (!ecore_exe_queue_empty(&o->exe_queue))
 2140                 rc = ECORE_PENDING;
 2141 
 2142         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
 2143                 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
 2144                 raw->clear_pending(raw);
 2145         }
 2146 
 2147         /* Execute commands if required */
 2148         if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
 2149             ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
 2150                 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
 2151                                                    &p->ramrod_flags);
 2152                 if (rc < 0)
 2153                         return rc;
 2154         }
 2155 
 2156         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
 2157          * then user want to wait until the last command is done.
 2158          */
 2159         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
 2160                 /* Wait maximum for the current exe_queue length iterations plus
 2161                  * one (for the current pending command).
 2162                  */
 2163                 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
 2164 
 2165                 while (!ecore_exe_queue_empty(&o->exe_queue) &&
 2166                        max_iterations--) {
 2167 
 2168                         /* Wait for the current command to complete */
 2169                         rc = raw->wait_comp(sc, raw);
 2170                         if (rc)
 2171                                 return rc;
 2172 
 2173                         /* Make a next step */
 2174                         rc = __ecore_vlan_mac_execute_step(sc,
 2175                                                            p->vlan_mac_obj,
 2176                                                            &p->ramrod_flags);
 2177                         if (rc < 0)
 2178                                 return rc;
 2179                 }
 2180 
 2181                 return ECORE_SUCCESS;
 2182         }
 2183 
 2184         return rc;
 2185 }
 2186 
 2187 /**
 2188  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
 2189  *
 2190  * @sc:                 device handle
 2191  * @o:
 2192  * @vlan_mac_flags:
 2193  * @ramrod_flags:       execution flags to be used for this deletion
 2194  *
 2195  * if the last operation has completed successfully and there are no
 2196  * more elements left, positive value if the last operation has completed
 2197  * successfully and there are more previously configured elements, negative
 2198  * value is current operation has failed.
 2199  */
 2200 static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
 2201                                   struct ecore_vlan_mac_obj *o,
 2202                                   unsigned long *vlan_mac_flags,
 2203                                   unsigned long *ramrod_flags)
 2204 {
 2205         struct ecore_vlan_mac_registry_elem *pos = NULL;
 2206         struct ecore_vlan_mac_ramrod_params p;
 2207         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
 2208         struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
 2209         unsigned long flags;
 2210         int read_lock;
 2211         int rc = 0;
 2212 
 2213         /* Clear pending commands first */
 2214 
 2215         ECORE_SPIN_LOCK_BH(&exeq->lock);
 2216 
 2217         ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
 2218                                        &exeq->exe_queue, link,
 2219                                        struct ecore_exeq_elem) {
 2220                 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
 2221                 if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
 2222                     ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
 2223                         rc = exeq->remove(sc, exeq->owner, exeq_pos);
 2224                         if (rc) {
 2225                                 ECORE_ERR("Failed to remove command\n");
 2226                                 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
 2227                                 return rc;
 2228                         }
 2229                         ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
 2230                                                 &exeq->exe_queue);
 2231                         ecore_exe_queue_free_elem(sc, exeq_pos);
 2232                 }
 2233         }
 2234 
 2235         ECORE_SPIN_UNLOCK_BH(&exeq->lock);
 2236 
 2237         /* Prepare a command request */
 2238         ECORE_MEMSET(&p, 0, sizeof(p));
 2239         p.vlan_mac_obj = o;
 2240         p.ramrod_flags = *ramrod_flags;
 2241         p.user_req.cmd = ECORE_VLAN_MAC_DEL;
 2242 
 2243         /* Add all but the last VLAN-MAC to the execution queue without actually
 2244          * execution anything.
 2245          */
 2246         ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
 2247         ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
 2248         ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
 2249 
 2250         ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
 2251         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
 2252         if (read_lock != ECORE_SUCCESS)
 2253                 return read_lock;
 2254 
 2255         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
 2256                                   struct ecore_vlan_mac_registry_elem) {
 2257                 flags = pos->vlan_mac_flags;
 2258                 if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
 2259                     ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
 2260                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
 2261                         ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
 2262                         rc = ecore_config_vlan_mac(sc, &p);
 2263                         if (rc < 0) {
 2264                                 ECORE_ERR("Failed to add a new DEL command\n");
 2265                                 ecore_vlan_mac_h_read_unlock(sc, o);
 2266                                 return rc;
 2267                         }
 2268                 }
 2269         }
 2270 
 2271         ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
 2272         ecore_vlan_mac_h_read_unlock(sc, o);
 2273 
 2274         p.ramrod_flags = *ramrod_flags;
 2275         ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
 2276 
 2277         return ecore_config_vlan_mac(sc, &p);
 2278 }
 2279 
 2280 static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
 2281         uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
 2282         unsigned long *pstate, ecore_obj_type type)
 2283 {
 2284         raw->func_id = func_id;
 2285         raw->cid = cid;
 2286         raw->cl_id = cl_id;
 2287         raw->rdata = rdata;
 2288         raw->rdata_mapping = rdata_mapping;
 2289         raw->state = state;
 2290         raw->pstate = pstate;
 2291         raw->obj_type = type;
 2292         raw->check_pending = ecore_raw_check_pending;
 2293         raw->clear_pending = ecore_raw_clear_pending;
 2294         raw->set_pending = ecore_raw_set_pending;
 2295         raw->wait_comp = ecore_raw_wait;
 2296 }
 2297 
 2298 static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
 2299         uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
 2300         int state, unsigned long *pstate, ecore_obj_type type,
 2301         struct ecore_credit_pool_obj *macs_pool,
 2302         struct ecore_credit_pool_obj *vlans_pool)
 2303 {
 2304         ECORE_LIST_INIT(&o->head);
 2305         o->head_reader = 0;
 2306         o->head_exe_request = FALSE;
 2307         o->saved_ramrod_flags = 0;
 2308 
 2309         o->macs_pool = macs_pool;
 2310         o->vlans_pool = vlans_pool;
 2311 
 2312         o->delete_all = ecore_vlan_mac_del_all;
 2313         o->restore = ecore_vlan_mac_restore;
 2314         o->complete = ecore_complete_vlan_mac;
 2315         o->wait = ecore_wait_vlan_mac;
 2316 
 2317         ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
 2318                            state, pstate, type);
 2319 }
 2320 
 2321 void ecore_init_mac_obj(struct bxe_softc *sc,
 2322                         struct ecore_vlan_mac_obj *mac_obj,
 2323                         uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 2324                         ecore_dma_addr_t rdata_mapping, int state,
 2325                         unsigned long *pstate, ecore_obj_type type,
 2326                         struct ecore_credit_pool_obj *macs_pool)
 2327 {
 2328         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
 2329 
 2330         ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
 2331                                    rdata_mapping, state, pstate, type,
 2332                                    macs_pool, NULL);
 2333 
 2334         /* CAM credit pool handling */
 2335         mac_obj->get_credit = ecore_get_credit_mac;
 2336         mac_obj->put_credit = ecore_put_credit_mac;
 2337         mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
 2338         mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
 2339 
 2340         if (CHIP_IS_E1x(sc)) {
 2341                 mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
 2342                 mac_obj->check_del         = ecore_check_mac_del;
 2343                 mac_obj->check_add         = ecore_check_mac_add;
 2344                 mac_obj->check_move        = ecore_check_move_always_err;
 2345                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
 2346 
 2347                 /* Exe Queue */
 2348                 ecore_exe_queue_init(sc,
 2349                                      &mac_obj->exe_queue, 1, qable_obj,
 2350                                      ecore_validate_vlan_mac,
 2351                                      ecore_remove_vlan_mac,
 2352                                      ecore_optimize_vlan_mac,
 2353                                      ecore_execute_vlan_mac,
 2354                                      ecore_exeq_get_mac);
 2355         } else {
 2356                 mac_obj->set_one_rule      = ecore_set_one_mac_e2;
 2357                 mac_obj->check_del         = ecore_check_mac_del;
 2358                 mac_obj->check_add         = ecore_check_mac_add;
 2359                 mac_obj->check_move        = ecore_check_move;
 2360                 mac_obj->ramrod_cmd        =
 2361                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
 2362                 mac_obj->get_n_elements    = ecore_get_n_elements;
 2363 
 2364                 /* Exe Queue */
 2365                 ecore_exe_queue_init(sc,
 2366                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
 2367                                      qable_obj, ecore_validate_vlan_mac,
 2368                                      ecore_remove_vlan_mac,
 2369                                      ecore_optimize_vlan_mac,
 2370                                      ecore_execute_vlan_mac,
 2371                                      ecore_exeq_get_mac);
 2372         }
 2373 }
 2374 
 2375 void ecore_init_vlan_obj(struct bxe_softc *sc,
 2376                          struct ecore_vlan_mac_obj *vlan_obj,
 2377                          uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 2378                          ecore_dma_addr_t rdata_mapping, int state,
 2379                          unsigned long *pstate, ecore_obj_type type,
 2380                          struct ecore_credit_pool_obj *vlans_pool)
 2381 {
 2382         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
 2383 
 2384         ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
 2385                                    rdata_mapping, state, pstate, type, NULL,
 2386                                    vlans_pool);
 2387 
 2388         vlan_obj->get_credit = ecore_get_credit_vlan;
 2389         vlan_obj->put_credit = ecore_put_credit_vlan;
 2390         vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
 2391         vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
 2392 
 2393         if (CHIP_IS_E1x(sc)) {
 2394                 ECORE_ERR("Do not support chips others than E2 and newer\n");
 2395                 ECORE_BUG();
 2396         } else {
 2397                 vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
 2398                 vlan_obj->check_del         = ecore_check_vlan_del;
 2399                 vlan_obj->check_add         = ecore_check_vlan_add;
 2400                 vlan_obj->check_move        = ecore_check_move;
 2401                 vlan_obj->ramrod_cmd        =
 2402                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
 2403                 vlan_obj->get_n_elements    = ecore_get_n_elements;
 2404 
 2405                 /* Exe Queue */
 2406                 ecore_exe_queue_init(sc,
 2407                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
 2408                                      qable_obj, ecore_validate_vlan_mac,
 2409                                      ecore_remove_vlan_mac,
 2410                                      ecore_optimize_vlan_mac,
 2411                                      ecore_execute_vlan_mac,
 2412                                      ecore_exeq_get_vlan);
 2413         }
 2414 }
 2415 
 2416 void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
 2417                              struct ecore_vlan_mac_obj *vlan_mac_obj,
 2418                              uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 2419                              ecore_dma_addr_t rdata_mapping, int state,
 2420                              unsigned long *pstate, ecore_obj_type type,
 2421                              struct ecore_credit_pool_obj *macs_pool,
 2422                              struct ecore_credit_pool_obj *vlans_pool)
 2423 {
 2424         union ecore_qable_obj *qable_obj =
 2425                 (union ecore_qable_obj *)vlan_mac_obj;
 2426 
 2427         ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
 2428                                    rdata_mapping, state, pstate, type,
 2429                                    macs_pool, vlans_pool);
 2430 
 2431         /* CAM pool handling */
 2432         vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
 2433         vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
 2434         /* CAM offset is relevant for 57710 and 57711 chips only which have a
 2435          * single CAM for both MACs and VLAN-MAC pairs. So the offset
 2436          * will be taken from MACs' pool object only.
 2437          */
 2438         vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
 2439         vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
 2440 
 2441         if (CHIP_IS_E1(sc)) {
 2442                 ECORE_ERR("Do not support chips others than E2\n");
 2443                 ECORE_BUG();
 2444         } else if (CHIP_IS_E1H(sc)) {
 2445                 vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
 2446                 vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
 2447                 vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
 2448                 vlan_mac_obj->check_move        = ecore_check_move_always_err;
 2449                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
 2450 
 2451                 /* Exe Queue */
 2452                 ecore_exe_queue_init(sc,
 2453                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
 2454                                      ecore_validate_vlan_mac,
 2455                                      ecore_remove_vlan_mac,
 2456                                      ecore_optimize_vlan_mac,
 2457                                      ecore_execute_vlan_mac,
 2458                                      ecore_exeq_get_vlan_mac);
 2459         } else {
 2460                 vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
 2461                 vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
 2462                 vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
 2463                 vlan_mac_obj->check_move        = ecore_check_move;
 2464                 vlan_mac_obj->ramrod_cmd        =
 2465                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
 2466 
 2467                 /* Exe Queue */
 2468                 ecore_exe_queue_init(sc,
 2469                                      &vlan_mac_obj->exe_queue,
 2470                                      CLASSIFY_RULES_COUNT,
 2471                                      qable_obj, ecore_validate_vlan_mac,
 2472                                      ecore_remove_vlan_mac,
 2473                                      ecore_optimize_vlan_mac,
 2474                                      ecore_execute_vlan_mac,
 2475                                      ecore_exeq_get_vlan_mac);
 2476         }
 2477 }
 2478 
 2479 void ecore_init_vxlan_fltr_obj(struct bxe_softc *sc,
 2480                                 struct ecore_vlan_mac_obj *vlan_mac_obj,
 2481                                 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
 2482                                 ecore_dma_addr_t rdata_mapping, int state,
 2483                                 unsigned long *pstate, ecore_obj_type type,
 2484                                 struct ecore_credit_pool_obj *macs_pool,
 2485                                 struct ecore_credit_pool_obj *vlans_pool)
 2486 {
 2487         union ecore_qable_obj *qable_obj =
 2488                 (union ecore_qable_obj *)vlan_mac_obj;
 2489 
 2490         ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id,
 2491                                    rdata, rdata_mapping, state, pstate,
 2492                                    type, macs_pool, vlans_pool);
 2493 
 2494         /* CAM pool handling */
 2495         vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
 2496         vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
 2497         /* CAM offset is relevant for 57710 and 57711 chips only which have a
 2498          * single CAM for both MACs and VLAN-MAC pairs. So the offset
 2499          * will be taken from MACs' pool object only.
 2500          */
 2501         vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
 2502         vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
 2503 
 2504         if (CHIP_IS_E1x(sc)) {
 2505                 ECORE_ERR("Do not support chips others than E2/E3\n");
 2506                 ECORE_BUG();
 2507         } else {
 2508                 vlan_mac_obj->set_one_rule      = ecore_set_one_vxlan_fltr_e2;
 2509                 vlan_mac_obj->check_del         = ecore_check_vxlan_fltr_del;
 2510                 vlan_mac_obj->check_add         = ecore_check_vxlan_fltr_add;
 2511                 vlan_mac_obj->check_move        = ecore_check_move;
 2512                 vlan_mac_obj->ramrod_cmd        =
 2513                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
 2514 
 2515                 /* Exe Queue */
 2516                 ecore_exe_queue_init(sc,
 2517                                      &vlan_mac_obj->exe_queue,
 2518                                      CLASSIFY_RULES_COUNT,
 2519                                      qable_obj, ecore_validate_vlan_mac,
 2520                                      ecore_remove_vlan_mac,
 2521                                      ecore_optimize_vlan_mac,
 2522                                      ecore_execute_vlan_mac,
 2523                                      ecore_exeq_get_vxlan_fltr);
 2524         }
 2525 }
 2526 
 2527 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 2528 static inline void __storm_memset_mac_filters(struct bxe_softc *sc,
 2529                         struct tstorm_eth_mac_filter_config *mac_filters,
 2530                         uint16_t pf_id)
 2531 {
 2532         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
 2533 
 2534         uint32_t addr = BAR_TSTRORM_INTMEM +
 2535                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
 2536 
 2537         ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
 2538 }
 2539 
 2540 static int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
 2541                                  struct ecore_rx_mode_ramrod_params *p)
 2542 {
 2543         /* update the sc MAC filter structure */
 2544         uint32_t mask = (1 << p->cl_id);
 2545 
 2546         struct tstorm_eth_mac_filter_config *mac_filters =
 2547                 (struct tstorm_eth_mac_filter_config *)p->rdata;
 2548 
 2549         /* initial setting is drop-all */
 2550         uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
 2551         uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
 2552         uint8_t unmatched_unicast = 0;
 2553 
 2554     /* In e1x there we only take into account rx accept flag since tx switching
 2555      * isn't enabled. */
 2556         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
 2557                 /* accept matched ucast */
 2558                 drop_all_ucast = 0;
 2559 
 2560         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
 2561                 /* accept matched mcast */
 2562                 drop_all_mcast = 0;
 2563 
 2564         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
 2565                 /* accept all mcast */
 2566                 drop_all_ucast = 0;
 2567                 accp_all_ucast = 1;
 2568         }
 2569         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
 2570                 /* accept all mcast */
 2571                 drop_all_mcast = 0;
 2572                 accp_all_mcast = 1;
 2573         }
 2574         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
 2575                 /* accept (all) bcast */
 2576                 accp_all_bcast = 1;
 2577         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
 2578                 /* accept unmatched unicasts */
 2579                 unmatched_unicast = 1;
 2580 
 2581         mac_filters->ucast_drop_all = drop_all_ucast ?
 2582                 mac_filters->ucast_drop_all | mask :
 2583                 mac_filters->ucast_drop_all & ~mask;
 2584 
 2585         mac_filters->mcast_drop_all = drop_all_mcast ?
 2586                 mac_filters->mcast_drop_all | mask :
 2587                 mac_filters->mcast_drop_all & ~mask;
 2588 
 2589         mac_filters->ucast_accept_all = accp_all_ucast ?
 2590                 mac_filters->ucast_accept_all | mask :
 2591                 mac_filters->ucast_accept_all & ~mask;
 2592 
 2593         mac_filters->mcast_accept_all = accp_all_mcast ?
 2594                 mac_filters->mcast_accept_all | mask :
 2595                 mac_filters->mcast_accept_all & ~mask;
 2596 
 2597         mac_filters->bcast_accept_all = accp_all_bcast ?
 2598                 mac_filters->bcast_accept_all | mask :
 2599                 mac_filters->bcast_accept_all & ~mask;
 2600 
 2601         mac_filters->unmatched_unicast = unmatched_unicast ?
 2602                 mac_filters->unmatched_unicast | mask :
 2603                 mac_filters->unmatched_unicast & ~mask;
 2604 
 2605         ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
 2606                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
 2607            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
 2608            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
 2609            mac_filters->bcast_accept_all);
 2610 
 2611         /* write the MAC filter structure*/
 2612         __storm_memset_mac_filters(sc, mac_filters, p->func_id);
 2613 
 2614         /* The operation is completed */
 2615         ECORE_CLEAR_BIT(p->state, p->pstate);
 2616         ECORE_SMP_MB_AFTER_CLEAR_BIT();
 2617 
 2618         return ECORE_SUCCESS;
 2619 }
 2620 
 2621 /* Setup ramrod data */
 2622 static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
 2623                                 struct eth_classify_header *hdr,
 2624                                 uint8_t rule_cnt)
 2625 {
 2626         hdr->echo = ECORE_CPU_TO_LE32(cid);
 2627         hdr->rule_cnt = rule_cnt;
 2628 }
 2629 
 2630 static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
 2631                                 unsigned long *accept_flags,
 2632                                 struct eth_filter_rules_cmd *cmd,
 2633                                 bool clear_accept_all)
 2634 {
 2635         uint16_t state;
 2636 
 2637         /* start with 'drop-all' */
 2638         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
 2639                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
 2640 
 2641         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
 2642                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
 2643 
 2644         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
 2645                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
 2646 
 2647         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
 2648                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
 2649                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
 2650         }
 2651 
 2652         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
 2653                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
 2654                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
 2655         }
 2656         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
 2657                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
 2658 
 2659         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
 2660                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
 2661                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
 2662         }
 2663         if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
 2664                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
 2665 
 2666         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
 2667         if (clear_accept_all) {
 2668                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
 2669                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
 2670                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
 2671                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
 2672         }
 2673 
 2674         cmd->state = ECORE_CPU_TO_LE16(state);
 2675 }
 2676 
 2677 static int ecore_set_rx_mode_e2(struct bxe_softc *sc,
 2678                                 struct ecore_rx_mode_ramrod_params *p)
 2679 {
 2680         struct eth_filter_rules_ramrod_data *data = p->rdata;
 2681         int rc;
 2682         uint8_t rule_idx = 0;
 2683 
 2684         /* Reset the ramrod data buffer */
 2685         ECORE_MEMSET(data, 0, sizeof(*data));
 2686 
 2687         /* Setup ramrod data */
 2688 
 2689         /* Tx (internal switching) */
 2690         if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
 2691                 data->rules[rule_idx].client_id = p->cl_id;
 2692                 data->rules[rule_idx].func_id = p->func_id;
 2693 
 2694                 data->rules[rule_idx].cmd_general_data =
 2695                         ETH_FILTER_RULES_CMD_TX_CMD;
 2696 
 2697                 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
 2698                                                &(data->rules[rule_idx++]),
 2699                                                FALSE);
 2700         }
 2701 
 2702         /* Rx */
 2703         if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
 2704                 data->rules[rule_idx].client_id = p->cl_id;
 2705                 data->rules[rule_idx].func_id = p->func_id;
 2706 
 2707                 data->rules[rule_idx].cmd_general_data =
 2708                         ETH_FILTER_RULES_CMD_RX_CMD;
 2709 
 2710                 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
 2711                                                &(data->rules[rule_idx++]),
 2712                                                FALSE);
 2713         }
 2714 
 2715         /* If FCoE Queue configuration has been requested configure the Rx and
 2716          * internal switching modes for this queue in separate rules.
 2717          *
 2718          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
 2719          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
 2720          */
 2721         if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
 2722                 /*  Tx (internal switching) */
 2723                 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
 2724                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
 2725                         data->rules[rule_idx].func_id = p->func_id;
 2726 
 2727                         data->rules[rule_idx].cmd_general_data =
 2728                                                 ETH_FILTER_RULES_CMD_TX_CMD;
 2729 
 2730                         ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
 2731                                                        &(data->rules[rule_idx]),
 2732                                                        TRUE);
 2733                         rule_idx++;
 2734                 }
 2735 
 2736                 /* Rx */
 2737                 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
 2738                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
 2739                         data->rules[rule_idx].func_id = p->func_id;
 2740 
 2741                         data->rules[rule_idx].cmd_general_data =
 2742                                                 ETH_FILTER_RULES_CMD_RX_CMD;
 2743 
 2744                         ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
 2745                                                        &(data->rules[rule_idx]),
 2746                                                        TRUE);
 2747                         rule_idx++;
 2748                 }
 2749         }
 2750 
 2751         /* Set the ramrod header (most importantly - number of rules to
 2752          * configure).
 2753          */
 2754         ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
 2755 
 2756         ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
 2757                   data->header.rule_cnt, p->rx_accept_flags,
 2758                   p->tx_accept_flags);
 2759 
 2760         /* No need for an explicit memory barrier here as long as we
 2761          * ensure the ordering of writing to the SPQ element
 2762          * and updating of the SPQ producer which involves a memory
 2763          * read. If the memory read is removed we will have to put a
 2764          * full memory barrier there (inside ecore_sp_post()).
 2765          */
 2766 
 2767         /* Send a ramrod */
 2768         rc = ecore_sp_post(sc,
 2769                            RAMROD_CMD_ID_ETH_FILTER_RULES,
 2770                            p->cid,
 2771                            p->rdata_mapping,
 2772                            ETH_CONNECTION_TYPE);
 2773         if (rc)
 2774                 return rc;
 2775 
 2776         /* Ramrod completion is pending */
 2777         return ECORE_PENDING;
 2778 }
 2779 
 2780 static int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
 2781                                       struct ecore_rx_mode_ramrod_params *p)
 2782 {
 2783         return ecore_state_wait(sc, p->state, p->pstate);
 2784 }
 2785 
 2786 static int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
 2787                                     struct ecore_rx_mode_ramrod_params *p)
 2788 {
 2789         /* Do nothing */
 2790         return ECORE_SUCCESS;
 2791 }
 2792 
 2793 int ecore_config_rx_mode(struct bxe_softc *sc,
 2794                          struct ecore_rx_mode_ramrod_params *p)
 2795 {
 2796         int rc;
 2797 
 2798         /* Configure the new classification in the chip */
 2799         rc = p->rx_mode_obj->config_rx_mode(sc, p);
 2800         if (rc < 0)
 2801                 return rc;
 2802 
 2803         /* Wait for a ramrod completion if was requested */
 2804         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
 2805                 rc = p->rx_mode_obj->wait_comp(sc, p);
 2806                 if (rc)
 2807                         return rc;
 2808         }
 2809 
 2810         return rc;
 2811 }
 2812 
 2813 void ecore_init_rx_mode_obj(struct bxe_softc *sc,
 2814                             struct ecore_rx_mode_obj *o)
 2815 {
 2816         if (CHIP_IS_E1x(sc)) {
 2817                 o->wait_comp      = ecore_empty_rx_mode_wait;
 2818                 o->config_rx_mode = ecore_set_rx_mode_e1x;
 2819         } else {
 2820                 o->wait_comp      = ecore_wait_rx_mode_comp_e2;
 2821                 o->config_rx_mode = ecore_set_rx_mode_e2;
 2822         }
 2823 }
 2824 
 2825 /********************* Multicast verbs: SET, CLEAR ****************************/
 2826 static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
 2827 {
 2828         return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
 2829 }
 2830 
 2831 struct ecore_mcast_mac_elem {
 2832         ecore_list_entry_t link;
 2833         uint8_t mac[ETH_ALEN];
 2834         uint8_t pad[2]; /* For a natural alignment of the following buffer */
 2835 };
 2836 
 2837 struct ecore_pending_mcast_cmd {
 2838         ecore_list_entry_t link;
 2839         int type; /* ECORE_MCAST_CMD_X */
 2840         union {
 2841                 ecore_list_t macs_head;
 2842                 uint32_t macs_num; /* Needed for DEL command */
 2843                 int next_bin; /* Needed for RESTORE flow with aprox match */
 2844         } data;
 2845 
 2846         bool done; /* set to TRUE, when the command has been handled,
 2847                     * practically used in 57712 handling only, where one pending
 2848                     * command may be handled in a few operations. As long as for
 2849                     * other chips every operation handling is completed in a
 2850                     * single ramrod, there is no need to utilize this field.
 2851                     */
 2852 };
 2853 
 2854 static int ecore_mcast_wait(struct bxe_softc *sc,
 2855                             struct ecore_mcast_obj *o)
 2856 {
 2857         if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
 2858                         o->raw.wait_comp(sc, &o->raw))
 2859                 return ECORE_TIMEOUT;
 2860 
 2861         return ECORE_SUCCESS;
 2862 }
 2863 
 2864 static int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
 2865                                    struct ecore_mcast_obj *o,
 2866                                    struct ecore_mcast_ramrod_params *p,
 2867                                    enum ecore_mcast_cmd cmd)
 2868 {
 2869         int total_sz;
 2870         struct ecore_pending_mcast_cmd *new_cmd;
 2871         struct ecore_mcast_mac_elem *cur_mac = NULL;
 2872         struct ecore_mcast_list_elem *pos;
 2873         int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
 2874                              p->mcast_list_len : 0);
 2875 
 2876         /* If the command is empty ("handle pending commands only"), break */
 2877         if (!p->mcast_list_len)
 2878                 return ECORE_SUCCESS;
 2879 
 2880         total_sz = sizeof(*new_cmd) +
 2881                 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
 2882 
 2883         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
 2884         new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
 2885 
 2886         if (!new_cmd)
 2887                 return ECORE_NOMEM;
 2888 
 2889         ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
 2890                   cmd, macs_list_len);
 2891 
 2892         ECORE_LIST_INIT(&new_cmd->data.macs_head);
 2893 
 2894         new_cmd->type = cmd;
 2895         new_cmd->done = FALSE;
 2896 
 2897         switch (cmd) {
 2898         case ECORE_MCAST_CMD_ADD:
 2899                 cur_mac = (struct ecore_mcast_mac_elem *)
 2900                           ((uint8_t *)new_cmd + sizeof(*new_cmd));
 2901 
 2902                 /* Push the MACs of the current command into the pending command
 2903                  * MACs list: FIFO
 2904                  */
 2905                 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
 2906                                           struct ecore_mcast_list_elem) {
 2907                         ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
 2908                         ECORE_LIST_PUSH_TAIL(&cur_mac->link,
 2909                                              &new_cmd->data.macs_head);
 2910                         cur_mac++;
 2911                 }
 2912 
 2913                 break;
 2914 
 2915         case ECORE_MCAST_CMD_DEL:
 2916                 new_cmd->data.macs_num = p->mcast_list_len;
 2917                 break;
 2918 
 2919         case ECORE_MCAST_CMD_RESTORE:
 2920                 new_cmd->data.next_bin = 0;
 2921                 break;
 2922 
 2923         default:
 2924                 ECORE_FREE(sc, new_cmd, total_sz);
 2925                 ECORE_ERR("Unknown command: %d\n", cmd);
 2926                 return ECORE_INVAL;
 2927         }
 2928 
 2929         /* Push the new pending command to the tail of the pending list: FIFO */
 2930         ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
 2931 
 2932         o->set_sched(o);
 2933 
 2934         return ECORE_PENDING;
 2935 }
 2936 
 2937 /**
 2938  * ecore_mcast_get_next_bin - get the next set bin (index)
 2939  *
 2940  * @o:
 2941  * @last:       index to start looking from (including)
 2942  *
 2943  * returns the next found (set) bin or a negative value if none is found.
 2944  */
 2945 static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
 2946 {
 2947         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
 2948 
 2949         for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
 2950                 if (o->registry.aprox_match.vec[i])
 2951                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
 2952                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
 2953                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
 2954                                                        vec, cur_bit)) {
 2955                                         return cur_bit;
 2956                                 }
 2957                         }
 2958                 inner_start = 0;
 2959         }
 2960 
 2961         /* None found */
 2962         return -1;
 2963 }
 2964 
 2965 /**
 2966  * ecore_mcast_clear_first_bin - find the first set bin and clear it
 2967  *
 2968  * @o:
 2969  *
 2970  * returns the index of the found bin or -1 if none is found
 2971  */
 2972 static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
 2973 {
 2974         int cur_bit = ecore_mcast_get_next_bin(o, 0);
 2975 
 2976         if (cur_bit >= 0)
 2977                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
 2978 
 2979         return cur_bit;
 2980 }
 2981 
 2982 static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
 2983 {
 2984         struct ecore_raw_obj *raw = &o->raw;
 2985         uint8_t rx_tx_flag = 0;
 2986 
 2987         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
 2988             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
 2989                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
 2990 
 2991         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
 2992             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
 2993                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
 2994 
 2995         return rx_tx_flag;
 2996 }
 2997 
 2998 static void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
 2999                                         struct ecore_mcast_obj *o, int idx,
 3000                                         union ecore_mcast_config_data *cfg_data,
 3001                                         enum ecore_mcast_cmd cmd)
 3002 {
 3003         struct ecore_raw_obj *r = &o->raw;
 3004         struct eth_multicast_rules_ramrod_data *data =
 3005                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
 3006         uint8_t func_id = r->func_id;
 3007         uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
 3008         int bin;
 3009 
 3010         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
 3011                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
 3012 
 3013         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
 3014 
 3015         /* Get a bin and update a bins' vector */
 3016         switch (cmd) {
 3017         case ECORE_MCAST_CMD_ADD:
 3018                 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
 3019                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
 3020                 break;
 3021 
 3022         case ECORE_MCAST_CMD_DEL:
 3023                 /* If there were no more bins to clear
 3024                  * (ecore_mcast_clear_first_bin() returns -1) then we would
 3025                  * clear any (0xff) bin.
 3026                  * See ecore_mcast_validate_e2() for explanation when it may
 3027                  * happen.
 3028                  */
 3029                 bin = ecore_mcast_clear_first_bin(o);
 3030                 break;
 3031 
 3032         case ECORE_MCAST_CMD_RESTORE:
 3033                 bin = cfg_data->bin;
 3034                 break;
 3035 
 3036         default:
 3037                 ECORE_ERR("Unknown command: %d\n", cmd);
 3038                 return;
 3039         }
 3040 
 3041         ECORE_MSG(sc, "%s bin %d\n",
 3042                   ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
 3043                    "Setting"  : "Clearing"), bin);
 3044 
 3045         data->rules[idx].bin_id    = (uint8_t)bin;
 3046         data->rules[idx].func_id   = func_id;
 3047         data->rules[idx].engine_id = o->engine_id;
 3048 }
 3049 
 3050 /**
 3051  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
 3052  *
 3053  * @sc:         device handle
 3054  * @o:
 3055  * @start_bin:  index in the registry to start from (including)
 3056  * @rdata_idx:  index in the ramrod data to start from
 3057  *
 3058  * returns last handled bin index or -1 if all bins have been handled
 3059  */
 3060 static inline int ecore_mcast_handle_restore_cmd_e2(
 3061         struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
 3062         int *rdata_idx)
 3063 {
 3064         int cur_bin, cnt = *rdata_idx;
 3065         union ecore_mcast_config_data cfg_data = {NULL};
 3066 
 3067         /* go through the registry and configure the bins from it */
 3068         for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
 3069             cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
 3070 
 3071                 cfg_data.bin = (uint8_t)cur_bin;
 3072                 o->set_one_rule(sc, o, cnt, &cfg_data,
 3073                                 ECORE_MCAST_CMD_RESTORE);
 3074 
 3075                 cnt++;
 3076 
 3077                 ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
 3078 
 3079                 /* Break if we reached the maximum number
 3080                  * of rules.
 3081                  */
 3082                 if (cnt >= o->max_cmd_len)
 3083                         break;
 3084         }
 3085 
 3086         *rdata_idx = cnt;
 3087 
 3088         return cur_bin;
 3089 }
 3090 
 3091 static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
 3092         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
 3093         int *line_idx)
 3094 {
 3095         struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
 3096         int cnt = *line_idx;
 3097         union ecore_mcast_config_data cfg_data = {NULL};
 3098 
 3099         ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
 3100                 &cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
 3101 
 3102                 cfg_data.mac = &pmac_pos->mac[0];
 3103                 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
 3104 
 3105                 cnt++;
 3106 
 3107                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
 3108                           pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
 3109 
 3110                 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
 3111                                         &cmd_pos->data.macs_head);
 3112 
 3113                 /* Break if we reached the maximum number
 3114                  * of rules.
 3115                  */
 3116                 if (cnt >= o->max_cmd_len)
 3117                         break;
 3118         }
 3119 
 3120         *line_idx = cnt;
 3121 
 3122         /* if no more MACs to configure - we are done */
 3123         if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
 3124                 cmd_pos->done = TRUE;
 3125 }
 3126 
 3127 static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
 3128         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
 3129         int *line_idx)
 3130 {
 3131         int cnt = *line_idx;
 3132 
 3133         while (cmd_pos->data.macs_num) {
 3134                 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
 3135 
 3136                 cnt++;
 3137 
 3138                 cmd_pos->data.macs_num--;
 3139 
 3140                   ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
 3141                                   cmd_pos->data.macs_num, cnt);
 3142 
 3143                 /* Break if we reached the maximum
 3144                  * number of rules.
 3145                  */
 3146                 if (cnt >= o->max_cmd_len)
 3147                         break;
 3148         }
 3149 
 3150         *line_idx = cnt;
 3151 
 3152         /* If we cleared all bins - we are done */
 3153         if (!cmd_pos->data.macs_num)
 3154                 cmd_pos->done = TRUE;
 3155 }
 3156 
 3157 static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
 3158         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
 3159         int *line_idx)
 3160 {
 3161         cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
 3162                                                 line_idx);
 3163 
 3164         if (cmd_pos->data.next_bin < 0)
 3165                 /* If o->set_restore returned -1 we are done */
 3166                 cmd_pos->done = TRUE;
 3167         else
 3168                 /* Start from the next bin next time */
 3169                 cmd_pos->data.next_bin++;
 3170 }
 3171 
 3172 static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
 3173                                 struct ecore_mcast_ramrod_params *p)
 3174 {
 3175         struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
 3176         int cnt = 0;
 3177         struct ecore_mcast_obj *o = p->mcast_obj;
 3178 
 3179         ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
 3180                 &o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
 3181                 switch (cmd_pos->type) {
 3182                 case ECORE_MCAST_CMD_ADD:
 3183                         ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
 3184                         break;
 3185 
 3186                 case ECORE_MCAST_CMD_DEL:
 3187                         ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
 3188                         break;
 3189 
 3190                 case ECORE_MCAST_CMD_RESTORE:
 3191                         ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
 3192                                                            &cnt);
 3193                         break;
 3194 
 3195                 default:
 3196                         ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
 3197                         return ECORE_INVAL;
 3198                 }
 3199 
 3200                 /* If the command has been completed - remove it from the list
 3201                  * and free the memory
 3202                  */
 3203                 if (cmd_pos->done) {
 3204                         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
 3205                                                 &o->pending_cmds_head);
 3206                         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
 3207                 }
 3208 
 3209                 /* Break if we reached the maximum number of rules */
 3210                 if (cnt >= o->max_cmd_len)
 3211                         break;
 3212         }
 3213 
 3214         return cnt;
 3215 }
 3216 
 3217 static inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
 3218         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
 3219         int *line_idx)
 3220 {
 3221         struct ecore_mcast_list_elem *mlist_pos;
 3222         union ecore_mcast_config_data cfg_data = {NULL};
 3223         int cnt = *line_idx;
 3224 
 3225         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
 3226                                   struct ecore_mcast_list_elem) {
 3227                 cfg_data.mac = mlist_pos->mac;
 3228                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
 3229 
 3230                 cnt++;
 3231 
 3232                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
 3233                           mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
 3234         }
 3235 
 3236         *line_idx = cnt;
 3237 }
 3238 
 3239 static inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
 3240         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
 3241         int *line_idx)
 3242 {
 3243         int cnt = *line_idx, i;
 3244 
 3245         for (i = 0; i < p->mcast_list_len; i++) {
 3246                 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
 3247 
 3248                 cnt++;
 3249 
 3250                 ECORE_MSG(sc, "Deleting MAC. %d left\n",
 3251                           p->mcast_list_len - i - 1);
 3252         }
 3253 
 3254         *line_idx = cnt;
 3255 }
 3256 
 3257 /**
 3258  * ecore_mcast_handle_current_cmd -
 3259  *
 3260  * @sc:         device handle
 3261  * @p:
 3262  * @cmd:
 3263  * @start_cnt:  first line in the ramrod data that may be used
 3264  *
 3265  * This function is called iff there is enough place for the current command in
 3266  * the ramrod data.
 3267  * Returns number of lines filled in the ramrod data in total.
 3268  */
 3269 static inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
 3270                         struct ecore_mcast_ramrod_params *p,
 3271                         enum ecore_mcast_cmd cmd,
 3272                         int start_cnt)
 3273 {
 3274         struct ecore_mcast_obj *o = p->mcast_obj;
 3275         int cnt = start_cnt;
 3276 
 3277         ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
 3278 
 3279         switch (cmd) {
 3280         case ECORE_MCAST_CMD_ADD:
 3281                 ecore_mcast_hdl_add(sc, o, p, &cnt);
 3282                 break;
 3283 
 3284         case ECORE_MCAST_CMD_DEL:
 3285                 ecore_mcast_hdl_del(sc, o, p, &cnt);
 3286                 break;
 3287 
 3288         case ECORE_MCAST_CMD_RESTORE:
 3289                 o->hdl_restore(sc, o, 0, &cnt);
 3290                 break;
 3291 
 3292         default:
 3293                 ECORE_ERR("Unknown command: %d\n", cmd);
 3294                 return ECORE_INVAL;
 3295         }
 3296 
 3297         /* The current command has been handled */
 3298         p->mcast_list_len = 0;
 3299 
 3300         return cnt;
 3301 }
 3302 
 3303 static int ecore_mcast_validate_e2(struct bxe_softc *sc,
 3304                                    struct ecore_mcast_ramrod_params *p,
 3305                                    enum ecore_mcast_cmd cmd)
 3306 {
 3307         struct ecore_mcast_obj *o = p->mcast_obj;
 3308         int reg_sz = o->get_registry_size(o);
 3309 
 3310         switch (cmd) {
 3311         /* DEL command deletes all currently configured MACs */
 3312         case ECORE_MCAST_CMD_DEL:
 3313                 o->set_registry_size(o, 0);
 3314                 /* Don't break */
 3315 
 3316         /* RESTORE command will restore the entire multicast configuration */
 3317         case ECORE_MCAST_CMD_RESTORE:
 3318                 /* Here we set the approximate amount of work to do, which in
 3319                  * fact may be only less as some MACs in postponed ADD
 3320                  * command(s) scheduled before this command may fall into
 3321                  * the same bin and the actual number of bins set in the
 3322                  * registry would be less than we estimated here. See
 3323                  * ecore_mcast_set_one_rule_e2() for further details.
 3324                  */
 3325                 p->mcast_list_len = reg_sz;
 3326                 break;
 3327 
 3328         case ECORE_MCAST_CMD_ADD:
 3329         case ECORE_MCAST_CMD_CONT:
 3330                 /* Here we assume that all new MACs will fall into new bins.
 3331                  * However we will correct the real registry size after we
 3332                  * handle all pending commands.
 3333                  */
 3334                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
 3335                 break;
 3336 
 3337         default:
 3338                 ECORE_ERR("Unknown command: %d\n", cmd);
 3339                 return ECORE_INVAL;
 3340         }
 3341 
 3342         /* Increase the total number of MACs pending to be configured */
 3343         o->total_pending_num += p->mcast_list_len;
 3344 
 3345         return ECORE_SUCCESS;
 3346 }
 3347 
 3348 static void ecore_mcast_revert_e2(struct bxe_softc *sc,
 3349                                       struct ecore_mcast_ramrod_params *p,
 3350                                       int old_num_bins)
 3351 {
 3352         struct ecore_mcast_obj *o = p->mcast_obj;
 3353 
 3354         o->set_registry_size(o, old_num_bins);
 3355         o->total_pending_num -= p->mcast_list_len;
 3356 }
 3357 
 3358 /**
 3359  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
 3360  *
 3361  * @sc:         device handle
 3362  * @p:
 3363  * @len:        number of rules to handle
 3364  */
 3365 static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
 3366                                         struct ecore_mcast_ramrod_params *p,
 3367                                         uint8_t len)
 3368 {
 3369         struct ecore_raw_obj *r = &p->mcast_obj->raw;
 3370         struct eth_multicast_rules_ramrod_data *data =
 3371                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
 3372 
 3373         data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
 3374                                         (ECORE_FILTER_MCAST_PENDING <<
 3375                                          ECORE_SWCID_SHIFT));
 3376         data->header.rule_cnt = len;
 3377 }
 3378 
 3379 /**
 3380  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
 3381  *
 3382  * @sc:         device handle
 3383  * @o:
 3384  *
 3385  * Recalculate the actual number of set bins in the registry using Brian
 3386  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
 3387  *
 3388  * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
 3389  */
 3390 static inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
 3391                                                   struct ecore_mcast_obj *o)
 3392 {
 3393         int i, cnt = 0;
 3394         uint64_t elem;
 3395 
 3396         for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
 3397                 elem = o->registry.aprox_match.vec[i];
 3398                 for (; elem; cnt++)
 3399                         elem &= elem - 1;
 3400         }
 3401 
 3402         o->set_registry_size(o, cnt);
 3403 
 3404         return ECORE_SUCCESS;
 3405 }
 3406 
 3407 static int ecore_mcast_setup_e2(struct bxe_softc *sc,
 3408                                 struct ecore_mcast_ramrod_params *p,
 3409                                 enum ecore_mcast_cmd cmd)
 3410 {
 3411         struct ecore_raw_obj *raw = &p->mcast_obj->raw;
 3412         struct ecore_mcast_obj *o = p->mcast_obj;
 3413         struct eth_multicast_rules_ramrod_data *data =
 3414                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
 3415         int cnt = 0, rc;
 3416 
 3417         /* Reset the ramrod data buffer */
 3418         ECORE_MEMSET(data, 0, sizeof(*data));
 3419 
 3420         cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
 3421 
 3422         /* If there are no more pending commands - clear SCHEDULED state */
 3423         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
 3424                 o->clear_sched(o);
 3425 
 3426         /* The below may be TRUE iff there was enough room in ramrod
 3427          * data for all pending commands and for the current
 3428          * command. Otherwise the current command would have been added
 3429          * to the pending commands and p->mcast_list_len would have been
 3430          * zeroed.
 3431          */
 3432         if (p->mcast_list_len > 0)
 3433                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
 3434 
 3435         /* We've pulled out some MACs - update the total number of
 3436          * outstanding.
 3437          */
 3438         o->total_pending_num -= cnt;
 3439 
 3440         /* send a ramrod */
 3441         ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
 3442         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
 3443 
 3444         ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
 3445 
 3446         /* Update a registry size if there are no more pending operations.
 3447          *
 3448          * We don't want to change the value of the registry size if there are
 3449          * pending operations because we want it to always be equal to the
 3450          * exact or the approximate number (see ecore_mcast_validate_e2()) of
 3451          * set bins after the last requested operation in order to properly
 3452          * evaluate the size of the next DEL/RESTORE operation.
 3453          *
 3454          * Note that we update the registry itself during command(s) handling
 3455          * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
 3456          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
 3457          * with a limited amount of update commands (per MAC/bin) and we don't
 3458          * know in this scope what the actual state of bins configuration is
 3459          * going to be after this ramrod.
 3460          */
 3461         if (!o->total_pending_num)
 3462                 ecore_mcast_refresh_registry_e2(sc, o);
 3463 
 3464         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
 3465          * RAMROD_PENDING status immediately.
 3466          */
 3467         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
 3468                 raw->clear_pending(raw);
 3469                 return ECORE_SUCCESS;
 3470         } else {
 3471                 /* No need for an explicit memory barrier here as long as we
 3472                  * ensure the ordering of writing to the SPQ element
 3473                  * and updating of the SPQ producer which involves a memory
 3474                  * read. If the memory read is removed we will have to put a
 3475                  * full memory barrier there (inside ecore_sp_post()).
 3476                  */
 3477 
 3478                 /* Send a ramrod */
 3479                 rc = ecore_sp_post( sc,
 3480                                     RAMROD_CMD_ID_ETH_MULTICAST_RULES,
 3481                                     raw->cid,
 3482                                     raw->rdata_mapping,
 3483                                     ETH_CONNECTION_TYPE);
 3484                 if (rc)
 3485                         return rc;
 3486 
 3487                 /* Ramrod completion is pending */
 3488                 return ECORE_PENDING;
 3489         }
 3490 }
 3491 
 3492 static int ecore_mcast_validate_e1h(struct bxe_softc *sc,
 3493                                     struct ecore_mcast_ramrod_params *p,
 3494                                     enum ecore_mcast_cmd cmd)
 3495 {
 3496         /* Mark, that there is a work to do */
 3497         if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
 3498                 p->mcast_list_len = 1;
 3499 
 3500         return ECORE_SUCCESS;
 3501 }
 3502 
 3503 static void ecore_mcast_revert_e1h(struct bxe_softc *sc,
 3504                                        struct ecore_mcast_ramrod_params *p,
 3505                                        int old_num_bins)
 3506 {
 3507         /* Do nothing */
 3508 }
 3509 
 3510 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
 3511 do { \
 3512         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
 3513 } while (0)
 3514 
 3515 static inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
 3516                                            struct ecore_mcast_obj *o,
 3517                                            struct ecore_mcast_ramrod_params *p,
 3518                                            uint32_t *mc_filter)
 3519 {
 3520         struct ecore_mcast_list_elem *mlist_pos;
 3521         int bit;
 3522 
 3523         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
 3524                                   struct ecore_mcast_list_elem) {
 3525                 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
 3526                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
 3527 
 3528                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
 3529                           mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
 3530 
 3531                 /* bookkeeping... */
 3532                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
 3533                                   bit);
 3534         }
 3535 }
 3536 
 3537 static inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
 3538         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
 3539         uint32_t *mc_filter)
 3540 {
 3541         int bit;
 3542 
 3543         for (bit = ecore_mcast_get_next_bin(o, 0);
 3544              bit >= 0;
 3545              bit = ecore_mcast_get_next_bin(o, bit + 1)) {
 3546                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
 3547                 ECORE_MSG(sc, "About to set bin %d\n", bit);
 3548         }
 3549 }
 3550 
 3551 /* On 57711 we write the multicast MACs' approximate match
 3552  * table by directly into the TSTORM's internal RAM. So we don't
 3553  * really need to handle any tricks to make it work.
 3554  */
 3555 static int ecore_mcast_setup_e1h(struct bxe_softc *sc,
 3556                                  struct ecore_mcast_ramrod_params *p,
 3557                                  enum ecore_mcast_cmd cmd)
 3558 {
 3559         int i;
 3560         struct ecore_mcast_obj *o = p->mcast_obj;
 3561         struct ecore_raw_obj *r = &o->raw;
 3562 
 3563         /* If CLEAR_ONLY has been requested - clear the registry
 3564          * and clear a pending bit.
 3565          */
 3566         if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
 3567                 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
 3568 
 3569                 /* Set the multicast filter bits before writing it into
 3570                  * the internal memory.
 3571                  */
 3572                 switch (cmd) {
 3573                 case ECORE_MCAST_CMD_ADD:
 3574                         ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
 3575                         break;
 3576 
 3577                 case ECORE_MCAST_CMD_DEL:
 3578                         ECORE_MSG(sc,
 3579                                   "Invalidating multicast MACs configuration\n");
 3580 
 3581                         /* clear the registry */
 3582                         ECORE_MEMSET(o->registry.aprox_match.vec, 0,
 3583                                sizeof(o->registry.aprox_match.vec));
 3584                         break;
 3585 
 3586                 case ECORE_MCAST_CMD_RESTORE:
 3587                         ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
 3588                         break;
 3589 
 3590                 default:
 3591                         ECORE_ERR("Unknown command: %d\n", cmd);
 3592                         return ECORE_INVAL;
 3593                 }
 3594 
 3595                 /* Set the mcast filter in the internal memory */
 3596                 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
 3597                         REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
 3598         } else
 3599                 /* clear the registry */
 3600                 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
 3601                        sizeof(o->registry.aprox_match.vec));
 3602 
 3603         /* We are done */
 3604         r->clear_pending(r);
 3605 
 3606         return ECORE_SUCCESS;
 3607 }
 3608 
 3609 static int ecore_mcast_validate_e1(struct bxe_softc *sc,
 3610                                    struct ecore_mcast_ramrod_params *p,
 3611                                    enum ecore_mcast_cmd cmd)
 3612 {
 3613         struct ecore_mcast_obj *o = p->mcast_obj;
 3614         int reg_sz = o->get_registry_size(o);
 3615 
 3616         switch (cmd) {
 3617         /* DEL command deletes all currently configured MACs */
 3618         case ECORE_MCAST_CMD_DEL:
 3619                 o->set_registry_size(o, 0);
 3620                 /* Don't break */
 3621 
 3622         /* RESTORE command will restore the entire multicast configuration */
 3623         case ECORE_MCAST_CMD_RESTORE:
 3624                 p->mcast_list_len = reg_sz;
 3625                   ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
 3626                                   cmd, p->mcast_list_len);
 3627                 break;
 3628 
 3629         case ECORE_MCAST_CMD_ADD:
 3630         case ECORE_MCAST_CMD_CONT:
 3631                 /* Multicast MACs on 57710 are configured as unicast MACs and
 3632                  * there is only a limited number of CAM entries for that
 3633                  * matter.
 3634                  */
 3635                 if (p->mcast_list_len > o->max_cmd_len) {
 3636                         ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
 3637                                   o->max_cmd_len);
 3638                         return ECORE_INVAL;
 3639                 }
 3640                 /* Every configured MAC should be cleared if DEL command is
 3641                  * called. Only the last ADD command is relevant as long as
 3642                  * every ADD commands overrides the previous configuration.
 3643                  */
 3644                 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
 3645                 if (p->mcast_list_len > 0)
 3646                         o->set_registry_size(o, p->mcast_list_len);
 3647 
 3648                 break;
 3649 
 3650         default:
 3651                 ECORE_ERR("Unknown command: %d\n", cmd);
 3652                 return ECORE_INVAL;
 3653         }
 3654 
 3655         /* We want to ensure that commands are executed one by one for 57710.
 3656          * Therefore each none-empty command will consume o->max_cmd_len.
 3657          */
 3658         if (p->mcast_list_len)
 3659                 o->total_pending_num += o->max_cmd_len;
 3660 
 3661         return ECORE_SUCCESS;
 3662 }
 3663 
 3664 static void ecore_mcast_revert_e1(struct bxe_softc *sc,
 3665                                       struct ecore_mcast_ramrod_params *p,
 3666                                       int old_num_macs)
 3667 {
 3668         struct ecore_mcast_obj *o = p->mcast_obj;
 3669 
 3670         o->set_registry_size(o, old_num_macs);
 3671 
 3672         /* If current command hasn't been handled yet and we are
 3673          * here means that it's meant to be dropped and we have to
 3674          * update the number of outstanding MACs accordingly.
 3675          */
 3676         if (p->mcast_list_len)
 3677                 o->total_pending_num -= o->max_cmd_len;
 3678 }
 3679 
 3680 static void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
 3681                                         struct ecore_mcast_obj *o, int idx,
 3682                                         union ecore_mcast_config_data *cfg_data,
 3683                                         enum ecore_mcast_cmd cmd)
 3684 {
 3685         struct ecore_raw_obj *r = &o->raw;
 3686         struct mac_configuration_cmd *data =
 3687                 (struct mac_configuration_cmd *)(r->rdata);
 3688 
 3689         /* copy mac */
 3690         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
 3691                 ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
 3692                                       &data->config_table[idx].middle_mac_addr,
 3693                                       &data->config_table[idx].lsb_mac_addr,
 3694                                       cfg_data->mac);
 3695 
 3696                 data->config_table[idx].vlan_id = 0;
 3697                 data->config_table[idx].pf_id = r->func_id;
 3698                 data->config_table[idx].clients_bit_vector =
 3699                         ECORE_CPU_TO_LE32(1 << r->cl_id);
 3700 
 3701                 ECORE_SET_FLAG(data->config_table[idx].flags,
 3702                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 3703                                T_ETH_MAC_COMMAND_SET);
 3704         }
 3705 }
 3706 
 3707 /**
 3708  * ecore_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
 3709  *
 3710  * @sc:         device handle
 3711  * @p:
 3712  * @len:        number of rules to handle
 3713  */
 3714 static inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
 3715                                         struct ecore_mcast_ramrod_params *p,
 3716                                         uint8_t len)
 3717 {
 3718         struct ecore_raw_obj *r = &p->mcast_obj->raw;
 3719         struct mac_configuration_cmd *data =
 3720                 (struct mac_configuration_cmd *)(r->rdata);
 3721 
 3722         uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
 3723                      ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
 3724                      ECORE_MAX_MULTICAST*(1 + r->func_id));
 3725 
 3726         data->hdr.offset = offset;
 3727         data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
 3728         data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
 3729                                      (ECORE_FILTER_MCAST_PENDING <<
 3730                                       ECORE_SWCID_SHIFT));
 3731         data->hdr.length = len;
 3732 }
 3733 
 3734 /**
 3735  * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
 3736  *
 3737  * @sc:         device handle
 3738  * @o:
 3739  * @start_idx:  index in the registry to start from
 3740  * @rdata_idx:  index in the ramrod data to start from
 3741  *
 3742  * restore command for 57710 is like all other commands - always a stand alone
 3743  * command - start_idx and rdata_idx will always be 0. This function will always
 3744  * succeed.
 3745  * returns -1 to comply with 57712 variant.
 3746  */
 3747 static inline int ecore_mcast_handle_restore_cmd_e1(
 3748         struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
 3749         int *rdata_idx)
 3750 {
 3751         struct ecore_mcast_mac_elem *elem;
 3752         int i = 0;
 3753         union ecore_mcast_config_data cfg_data = {NULL};
 3754 
 3755         /* go through the registry and configure the MACs from it. */
 3756         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
 3757                                   struct ecore_mcast_mac_elem) {
 3758                 cfg_data.mac = &elem->mac[0];
 3759                 o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
 3760 
 3761                 i++;
 3762 
 3763                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
 3764                           cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
 3765         }
 3766 
 3767         *rdata_idx = i;
 3768 
 3769         return -1;
 3770 }
 3771 
 3772 static inline int ecore_mcast_handle_pending_cmds_e1(
 3773         struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
 3774 {
 3775         struct ecore_pending_mcast_cmd *cmd_pos;
 3776         struct ecore_mcast_mac_elem *pmac_pos;
 3777         struct ecore_mcast_obj *o = p->mcast_obj;
 3778         union ecore_mcast_config_data cfg_data = {NULL};
 3779         int cnt = 0;
 3780 
 3781         /* If nothing to be done - return */
 3782         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
 3783                 return 0;
 3784 
 3785         /* Handle the first command */
 3786         cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
 3787                                          struct ecore_pending_mcast_cmd, link);
 3788 
 3789         switch (cmd_pos->type) {
 3790         case ECORE_MCAST_CMD_ADD:
 3791                 ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
 3792                                           link, struct ecore_mcast_mac_elem) {
 3793                         cfg_data.mac = &pmac_pos->mac[0];
 3794                         o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
 3795 
 3796                         cnt++;
 3797 
 3798                         ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
 3799                                   pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
 3800                 }
 3801                 break;
 3802 
 3803         case ECORE_MCAST_CMD_DEL:
 3804                 cnt = cmd_pos->data.macs_num;
 3805                 ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
 3806                 break;
 3807 
 3808         case ECORE_MCAST_CMD_RESTORE:
 3809                 o->hdl_restore(sc, o, 0, &cnt);
 3810                 break;
 3811 
 3812         default:
 3813                 ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
 3814                 return ECORE_INVAL;
 3815         }
 3816 
 3817         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
 3818         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
 3819 
 3820         return cnt;
 3821 }
 3822 
 3823 /**
 3824  * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
 3825  *
 3826  * @fw_hi:
 3827  * @fw_mid:
 3828  * @fw_lo:
 3829  * @mac:
 3830  */
 3831 static inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
 3832                                          uint16_t *fw_lo, uint8_t *mac)
 3833 {
 3834         mac[1] = ((uint8_t *)fw_hi)[0];
 3835         mac[0] = ((uint8_t *)fw_hi)[1];
 3836         mac[3] = ((uint8_t *)fw_mid)[0];
 3837         mac[2] = ((uint8_t *)fw_mid)[1];
 3838         mac[5] = ((uint8_t *)fw_lo)[0];
 3839         mac[4] = ((uint8_t *)fw_lo)[1];
 3840 }
 3841 
 3842 /**
 3843  * ecore_mcast_refresh_registry_e1 -
 3844  *
 3845  * @sc:         device handle
 3846  * @cnt:
 3847  *
 3848  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
 3849  * and update the registry correspondingly: if ADD - allocate a memory and add
 3850  * the entries to the registry (list), if DELETE - clear the registry and free
 3851  * the memory.
 3852  */
 3853 static inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
 3854                                                   struct ecore_mcast_obj *o)
 3855 {
 3856         struct ecore_raw_obj *raw = &o->raw;
 3857         struct ecore_mcast_mac_elem *elem;
 3858         struct mac_configuration_cmd *data =
 3859                         (struct mac_configuration_cmd *)(raw->rdata);
 3860 
 3861         /* If first entry contains a SET bit - the command was ADD,
 3862          * otherwise - DEL_ALL
 3863          */
 3864         if (ECORE_GET_FLAG(data->config_table[0].flags,
 3865                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
 3866                 int i, len = data->hdr.length;
 3867 
 3868                 /* Break if it was a RESTORE command */
 3869                 if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
 3870                         return ECORE_SUCCESS;
 3871 
 3872                 elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
 3873                 if (!elem) {
 3874                         ECORE_ERR("Failed to allocate registry memory\n");
 3875                         return ECORE_NOMEM;
 3876                 }
 3877 
 3878                 for (i = 0; i < len; i++, elem++) {
 3879                         ecore_get_fw_mac_addr(
 3880                                 &data->config_table[i].msb_mac_addr,
 3881                                 &data->config_table[i].middle_mac_addr,
 3882                                 &data->config_table[i].lsb_mac_addr,
 3883                                 elem->mac);
 3884                         ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
 3885                                   elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
 3886                         ECORE_LIST_PUSH_TAIL(&elem->link,
 3887                                              &o->registry.exact_match.macs);
 3888                 }
 3889         } else {
 3890                 elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
 3891                                               struct ecore_mcast_mac_elem,
 3892                                               link);
 3893                 ECORE_MSG(sc, "Deleting a registry\n");
 3894                 ECORE_FREE(sc, elem, sizeof(*elem));
 3895                 ECORE_LIST_INIT(&o->registry.exact_match.macs);
 3896         }
 3897 
 3898         return ECORE_SUCCESS;
 3899 }
 3900 
 3901 static int ecore_mcast_setup_e1(struct bxe_softc *sc,
 3902                                 struct ecore_mcast_ramrod_params *p,
 3903                                 enum ecore_mcast_cmd cmd)
 3904 {
 3905         struct ecore_mcast_obj *o = p->mcast_obj;
 3906         struct ecore_raw_obj *raw = &o->raw;
 3907         struct mac_configuration_cmd *data =
 3908                 (struct mac_configuration_cmd *)(raw->rdata);
 3909         int cnt = 0, i, rc;
 3910 
 3911         /* Reset the ramrod data buffer */
 3912         ECORE_MEMSET(data, 0, sizeof(*data));
 3913 
 3914         /* First set all entries as invalid */
 3915         for (i = 0; i < o->max_cmd_len ; i++)
 3916                 ECORE_SET_FLAG(data->config_table[i].flags,
 3917                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 3918                         T_ETH_MAC_COMMAND_INVALIDATE);
 3919 
 3920         /* Handle pending commands first */
 3921         cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
 3922 
 3923         /* If there are no more pending commands - clear SCHEDULED state */
 3924         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
 3925                 o->clear_sched(o);
 3926 
 3927         /* The below may be TRUE iff there were no pending commands */
 3928         if (!cnt)
 3929                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
 3930 
 3931         /* For 57710 every command has o->max_cmd_len length to ensure that
 3932          * commands are done one at a time.
 3933          */
 3934         o->total_pending_num -= o->max_cmd_len;
 3935 
 3936         /* send a ramrod */
 3937 
 3938         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
 3939 
 3940         /* Set ramrod header (in particular, a number of entries to update) */
 3941         ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
 3942 
 3943         /* update a registry: we need the registry contents to be always up
 3944          * to date in order to be able to execute a RESTORE opcode. Here
 3945          * we use the fact that for 57710 we sent one command at a time
 3946          * hence we may take the registry update out of the command handling
 3947          * and do it in a simpler way here.
 3948          */
 3949         rc = ecore_mcast_refresh_registry_e1(sc, o);
 3950         if (rc)
 3951                 return rc;
 3952 
 3953         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
 3954          * RAMROD_PENDING status immediately.
 3955          */
 3956         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
 3957                 raw->clear_pending(raw);
 3958                 return ECORE_SUCCESS;
 3959         } else {
 3960                 /* No need for an explicit memory barrier here as long as we
 3961                  * ensure the ordering of writing to the SPQ element
 3962                  * and updating of the SPQ producer which involves a memory
 3963                  * read. If the memory read is removed we will have to put a
 3964                  * full memory barrier there (inside ecore_sp_post()).
 3965                  */
 3966 
 3967                 /* Send a ramrod */
 3968                 rc = ecore_sp_post( sc,
 3969                                     RAMROD_CMD_ID_ETH_SET_MAC,
 3970                                     raw->cid,
 3971                                     raw->rdata_mapping,
 3972                                     ETH_CONNECTION_TYPE);
 3973                 if (rc)
 3974                         return rc;
 3975 
 3976                 /* Ramrod completion is pending */
 3977                 return ECORE_PENDING;
 3978         }
 3979 }
 3980 
 3981 static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
 3982 {
 3983         return o->registry.exact_match.num_macs_set;
 3984 }
 3985 
 3986 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
 3987 {
 3988         return o->registry.aprox_match.num_bins_set;
 3989 }
 3990 
 3991 static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
 3992                                                 int n)
 3993 {
 3994         o->registry.exact_match.num_macs_set = n;
 3995 }
 3996 
 3997 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
 3998                                                 int n)
 3999 {
 4000         o->registry.aprox_match.num_bins_set = n;
 4001 }
 4002 
 4003 int ecore_config_mcast(struct bxe_softc *sc,
 4004                        struct ecore_mcast_ramrod_params *p,
 4005                        enum ecore_mcast_cmd cmd)
 4006 {
 4007         struct ecore_mcast_obj *o = p->mcast_obj;
 4008         struct ecore_raw_obj *r = &o->raw;
 4009         int rc = 0, old_reg_size;
 4010 
 4011         /* This is needed to recover number of currently configured mcast macs
 4012          * in case of failure.
 4013          */
 4014         old_reg_size = o->get_registry_size(o);
 4015 
 4016         /* Do some calculations and checks */
 4017         rc = o->validate(sc, p, cmd);
 4018         if (rc)
 4019                 return rc;
 4020 
 4021         /* Return if there is no work to do */
 4022         if ((!p->mcast_list_len) && (!o->check_sched(o)))
 4023                 return ECORE_SUCCESS;
 4024 
 4025         ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
 4026                   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
 4027 
 4028         /* Enqueue the current command to the pending list if we can't complete
 4029          * it in the current iteration
 4030          */
 4031         if (r->check_pending(r) ||
 4032             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
 4033                 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
 4034                 if (rc < 0)
 4035                         goto error_exit1;
 4036 
 4037                 /* As long as the current command is in a command list we
 4038                  * don't need to handle it separately.
 4039                  */
 4040                 p->mcast_list_len = 0;
 4041         }
 4042 
 4043         if (!r->check_pending(r)) {
 4044 
 4045                 /* Set 'pending' state */
 4046                 r->set_pending(r);
 4047 
 4048                 /* Configure the new classification in the chip */
 4049                 rc = o->config_mcast(sc, p, cmd);
 4050                 if (rc < 0)
 4051                         goto error_exit2;
 4052 
 4053                 /* Wait for a ramrod completion if was requested */
 4054                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
 4055                         rc = o->wait_comp(sc, o);
 4056         }
 4057 
 4058         return rc;
 4059 
 4060 error_exit2:
 4061         r->clear_pending(r);
 4062 
 4063 error_exit1:
 4064         o->revert(sc, p, old_reg_size);
 4065 
 4066         return rc;
 4067 }
 4068 
 4069 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
 4070 {
 4071         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
 4072         ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
 4073         ECORE_SMP_MB_AFTER_CLEAR_BIT();
 4074 }
 4075 
 4076 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
 4077 {
 4078         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
 4079         ECORE_SET_BIT(o->sched_state, o->raw.pstate);
 4080         ECORE_SMP_MB_AFTER_CLEAR_BIT();
 4081 }
 4082 
 4083 static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
 4084 {
 4085         return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
 4086 }
 4087 
 4088 static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
 4089 {
 4090         return o->raw.check_pending(&o->raw) || o->check_sched(o);
 4091 }
 4092 
 4093 void ecore_init_mcast_obj(struct bxe_softc *sc,
 4094                           struct ecore_mcast_obj *mcast_obj,
 4095                           uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
 4096                           uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
 4097                           int state, unsigned long *pstate, ecore_obj_type type)
 4098 {
 4099         ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
 4100 
 4101         ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
 4102                            rdata, rdata_mapping, state, pstate, type);
 4103 
 4104         mcast_obj->engine_id = engine_id;
 4105 
 4106         ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
 4107 
 4108         mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
 4109         mcast_obj->check_sched = ecore_mcast_check_sched;
 4110         mcast_obj->set_sched = ecore_mcast_set_sched;
 4111         mcast_obj->clear_sched = ecore_mcast_clear_sched;
 4112 
 4113         if (CHIP_IS_E1(sc)) {
 4114                 mcast_obj->config_mcast      = ecore_mcast_setup_e1;
 4115                 mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
 4116                 mcast_obj->hdl_restore       =
 4117                         ecore_mcast_handle_restore_cmd_e1;
 4118                 mcast_obj->check_pending     = ecore_mcast_check_pending;
 4119 
 4120                 if (CHIP_REV_IS_SLOW(sc))
 4121                         mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
 4122                 else
 4123                         mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
 4124 
 4125                 mcast_obj->wait_comp         = ecore_mcast_wait;
 4126                 mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e1;
 4127                 mcast_obj->validate          = ecore_mcast_validate_e1;
 4128                 mcast_obj->revert            = ecore_mcast_revert_e1;
 4129                 mcast_obj->get_registry_size =
 4130                         ecore_mcast_get_registry_size_exact;
 4131                 mcast_obj->set_registry_size =
 4132                         ecore_mcast_set_registry_size_exact;
 4133 
 4134                 /* 57710 is the only chip that uses the exact match for mcast
 4135                  * at the moment.
 4136                  */
 4137                 ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
 4138 
 4139         } else if (CHIP_IS_E1H(sc)) {
 4140                 mcast_obj->config_mcast  = ecore_mcast_setup_e1h;
 4141                 mcast_obj->enqueue_cmd   = NULL;
 4142                 mcast_obj->hdl_restore   = NULL;
 4143                 mcast_obj->check_pending = ecore_mcast_check_pending;
 4144 
 4145                 /* 57711 doesn't send a ramrod, so it has unlimited credit
 4146                  * for one command.
 4147                  */
 4148                 mcast_obj->max_cmd_len       = -1;
 4149                 mcast_obj->wait_comp         = ecore_mcast_wait;
 4150                 mcast_obj->set_one_rule      = NULL;
 4151                 mcast_obj->validate          = ecore_mcast_validate_e1h;
 4152                 mcast_obj->revert            = ecore_mcast_revert_e1h;
 4153                 mcast_obj->get_registry_size =
 4154                         ecore_mcast_get_registry_size_aprox;
 4155                 mcast_obj->set_registry_size =
 4156                         ecore_mcast_set_registry_size_aprox;
 4157         } else {
 4158                 mcast_obj->config_mcast      = ecore_mcast_setup_e2;
 4159                 mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
 4160                 mcast_obj->hdl_restore       =
 4161                         ecore_mcast_handle_restore_cmd_e2;
 4162                 mcast_obj->check_pending     = ecore_mcast_check_pending;
 4163                 /* TODO: There should be a proper HSI define for this number!!!
 4164                  */
 4165                 mcast_obj->max_cmd_len       = 16;
 4166                 mcast_obj->wait_comp         = ecore_mcast_wait;
 4167                 mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e2;
 4168                 mcast_obj->validate          = ecore_mcast_validate_e2;
 4169                 mcast_obj->revert            = ecore_mcast_revert_e2;
 4170                 mcast_obj->get_registry_size =
 4171                         ecore_mcast_get_registry_size_aprox;
 4172                 mcast_obj->set_registry_size =
 4173                         ecore_mcast_set_registry_size_aprox;
 4174         }
 4175 }
 4176 
 4177 /*************************** Credit handling **********************************/
 4178 
 4179 /**
 4180  * atomic_add_ifless - add if the result is less than a given value.
 4181  *
 4182  * @v:  pointer of type ecore_atomic_t
 4183  * @a:  the amount to add to v...
 4184  * @u:  ...if (v + a) is less than u.
 4185  *
 4186  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
 4187  *
 4188  */
 4189 static inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
 4190 {
 4191         int c, old;
 4192 
 4193         c = ECORE_ATOMIC_READ(v);
 4194         for (;;) {
 4195                 if (ECORE_UNLIKELY(c + a >= u))
 4196                         return FALSE;
 4197 
 4198                 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
 4199                 if (ECORE_LIKELY(old == c))
 4200                         break;
 4201                 c = old;
 4202         }
 4203 
 4204         return TRUE;
 4205 }
 4206 
 4207 /**
 4208  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
 4209  *
 4210  * @v:  pointer of type ecore_atomic_t
 4211  * @a:  the amount to dec from v...
 4212  * @u:  ...if (v - a) is more or equal than u.
 4213  *
 4214  * returns TRUE if (v - a) was more or equal than u, and FALSE
 4215  * otherwise.
 4216  */
 4217 static inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
 4218 {
 4219         int c, old;
 4220 
 4221         c = ECORE_ATOMIC_READ(v);
 4222         for (;;) {
 4223                 if (ECORE_UNLIKELY(c - a < u))
 4224                         return FALSE;
 4225 
 4226                 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
 4227                 if (ECORE_LIKELY(old == c))
 4228                         break;
 4229                 c = old;
 4230         }
 4231 
 4232         return TRUE;
 4233 }
 4234 
 4235 static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
 4236 {
 4237         bool rc;
 4238 
 4239         ECORE_SMP_MB();
 4240         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
 4241         ECORE_SMP_MB();
 4242 
 4243         return rc;
 4244 }
 4245 
 4246 static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
 4247 {
 4248         bool rc;
 4249 
 4250         ECORE_SMP_MB();
 4251 
 4252         /* Don't let to refill if credit + cnt > pool_sz */
 4253         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
 4254 
 4255         ECORE_SMP_MB();
 4256 
 4257         return rc;
 4258 }
 4259 
 4260 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
 4261 {
 4262         int cur_credit;
 4263 
 4264         ECORE_SMP_MB();
 4265         cur_credit = ECORE_ATOMIC_READ(&o->credit);
 4266 
 4267         return cur_credit;
 4268 }
 4269 
 4270 static bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
 4271                                           int cnt)
 4272 {
 4273         return TRUE;
 4274 }
 4275 
 4276 static bool ecore_credit_pool_get_entry(
 4277         struct ecore_credit_pool_obj *o,
 4278         int *offset)
 4279 {
 4280         int idx, vec, i;
 4281 
 4282         *offset = -1;
 4283 
 4284         /* Find "internal cam-offset" then add to base for this object... */
 4285         for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
 4286 
 4287                 /* Skip the current vector if there are no free entries in it */
 4288                 if (!o->pool_mirror[vec])
 4289                         continue;
 4290 
 4291                 /* If we've got here we are going to find a free entry */
 4292                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
 4293                       i < BIT_VEC64_ELEM_SZ; idx++, i++)
 4294 
 4295                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
 4296                                 /* Got one!! */
 4297                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
 4298                                 *offset = o->base_pool_offset + idx;
 4299                                 return TRUE;
 4300                         }
 4301         }
 4302 
 4303         return FALSE;
 4304 }
 4305 
 4306 static bool ecore_credit_pool_put_entry(
 4307         struct ecore_credit_pool_obj *o,
 4308         int offset)
 4309 {
 4310         if (offset < o->base_pool_offset)
 4311                 return FALSE;
 4312 
 4313         offset -= o->base_pool_offset;
 4314 
 4315         if (offset >= o->pool_sz)
 4316                 return FALSE;
 4317 
 4318         /* Return the entry to the pool */
 4319         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
 4320 
 4321         return TRUE;
 4322 }
 4323 
 4324 static bool ecore_credit_pool_put_entry_always_TRUE(
 4325         struct ecore_credit_pool_obj *o,
 4326         int offset)
 4327 {
 4328         return TRUE;
 4329 }
 4330 
 4331 static bool ecore_credit_pool_get_entry_always_TRUE(
 4332         struct ecore_credit_pool_obj *o,
 4333         int *offset)
 4334 {
 4335         *offset = -1;
 4336         return TRUE;
 4337 }
 4338 /**
 4339  * ecore_init_credit_pool - initialize credit pool internals.
 4340  *
 4341  * @p:
 4342  * @base:       Base entry in the CAM to use.
 4343  * @credit:     pool size.
 4344  *
 4345  * If base is negative no CAM entries handling will be performed.
 4346  * If credit is negative pool operations will always succeed (unlimited pool).
 4347  *
 4348  */
 4349 void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
 4350                                           int base, int credit)
 4351 {
 4352         /* Zero the object first */
 4353         ECORE_MEMSET(p, 0, sizeof(*p));
 4354 
 4355         /* Set the table to all 1s */
 4356         ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
 4357 
 4358         /* Init a pool as full */
 4359         ECORE_ATOMIC_SET(&p->credit, credit);
 4360 
 4361         /* The total poll size */
 4362         p->pool_sz = credit;
 4363 
 4364         p->base_pool_offset = base;
 4365 
 4366         /* Commit the change */
 4367         ECORE_SMP_MB();
 4368 
 4369         p->check = ecore_credit_pool_check;
 4370 
 4371         /* if pool credit is negative - disable the checks */
 4372         if (credit >= 0) {
 4373                 p->put      = ecore_credit_pool_put;
 4374                 p->get      = ecore_credit_pool_get;
 4375                 p->put_entry = ecore_credit_pool_put_entry;
 4376                 p->get_entry = ecore_credit_pool_get_entry;
 4377         } else {
 4378                 p->put      = ecore_credit_pool_always_TRUE;
 4379                 p->get      = ecore_credit_pool_always_TRUE;
 4380                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
 4381                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
 4382         }
 4383 
 4384         /* If base is negative - disable entries handling */
 4385         if (base < 0) {
 4386                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
 4387                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
 4388         }
 4389 }
 4390 
 4391 void ecore_init_mac_credit_pool(struct bxe_softc *sc,
 4392                                 struct ecore_credit_pool_obj *p, uint8_t func_id,
 4393                                 uint8_t func_num)
 4394 {
 4395 /* TODO: this will be defined in consts as well... */
 4396 #define ECORE_CAM_SIZE_EMUL 5
 4397 
 4398         int cam_sz;
 4399 
 4400         if (CHIP_IS_E1(sc)) {
 4401                 /* In E1, Multicast is saved in cam... */
 4402                 if (!CHIP_REV_IS_SLOW(sc))
 4403                         cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
 4404                 else
 4405                         cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
 4406 
 4407                 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
 4408 
 4409         } else if (CHIP_IS_E1H(sc)) {
 4410                 /* CAM credit is equally divided between all active functions
 4411                  * on the PORT!.
 4412                  */
 4413                 if ((func_num > 0)) {
 4414                         if (!CHIP_REV_IS_SLOW(sc))
 4415                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
 4416                         else
 4417                                 cam_sz = ECORE_CAM_SIZE_EMUL;
 4418                         ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
 4419                 } else {
 4420                         /* this should never happen! Block MAC operations. */
 4421                         ecore_init_credit_pool(p, 0, 0);
 4422                 }
 4423         } else {
 4424                 /*
 4425                  * CAM credit is equaly divided between all active functions
 4426                  * on the PATH.
 4427                  */
 4428                 if (func_num > 0) {
 4429                         if (!CHIP_REV_IS_SLOW(sc))
 4430                                 cam_sz = PF_MAC_CREDIT_E2(sc, func_num);
 4431                         else
 4432                                 cam_sz = ECORE_CAM_SIZE_EMUL;
 4433 
 4434                         /* No need for CAM entries handling for 57712 and
 4435                          * newer.
 4436                          */
 4437                         ecore_init_credit_pool(p, -1, cam_sz);
 4438                 } else {
 4439                         /* this should never happen! Block MAC operations. */
 4440                         ecore_init_credit_pool(p, 0, 0);
 4441                 }
 4442         }
 4443 }
 4444 
 4445 void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
 4446                                  struct ecore_credit_pool_obj *p,
 4447                                  uint8_t func_id,
 4448                                  uint8_t func_num)
 4449 {
 4450         if (CHIP_IS_E1x(sc)) {
 4451                 /* There is no VLAN credit in HW on 57710 and 57711 only
 4452                  * MAC / MAC-VLAN can be set
 4453                  */
 4454                 ecore_init_credit_pool(p, 0, -1);
 4455         } else {
 4456                 /* CAM credit is equally divided between all active functions
 4457                  * on the PATH.
 4458                  */
 4459                 if (func_num > 0) {
 4460                         int credit = PF_VLAN_CREDIT_E2(sc, func_num);
 4461 
 4462                         ecore_init_credit_pool(p, -1/*unused for E2*/, credit);
 4463                 } else
 4464                         /* this should never happen! Block VLAN operations. */
 4465                         ecore_init_credit_pool(p, 0, 0);
 4466         }
 4467 }
 4468 
 4469 /****************** RSS Configuration ******************/
 4470 
 4471 /**
 4472  * ecore_setup_rss - configure RSS
 4473  *
 4474  * @sc:         device handle
 4475  * @p:          rss configuration
 4476  *
 4477  * sends on UPDATE ramrod for that matter.
 4478  */
 4479 static int ecore_setup_rss(struct bxe_softc *sc,
 4480                            struct ecore_config_rss_params *p)
 4481 {
 4482         struct ecore_rss_config_obj *o = p->rss_obj;
 4483         struct ecore_raw_obj *r = &o->raw;
 4484         struct eth_rss_update_ramrod_data *data =
 4485                 (struct eth_rss_update_ramrod_data *)(r->rdata);
 4486         uint16_t caps = 0;
 4487         uint8_t rss_mode = 0;
 4488         int rc;
 4489 
 4490         ECORE_MEMSET(data, 0, sizeof(*data));
 4491 
 4492         ECORE_MSG(sc, "Configuring RSS\n");
 4493 
 4494         /* Set an echo field */
 4495         data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
 4496                                  (r->state << ECORE_SWCID_SHIFT));
 4497 
 4498         /* RSS mode */
 4499         if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
 4500                 rss_mode = ETH_RSS_MODE_DISABLED;
 4501         else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
 4502                 rss_mode = ETH_RSS_MODE_REGULAR;
 4503 
 4504         data->rss_mode = rss_mode;
 4505 
 4506         ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
 4507 
 4508         /* RSS capabilities */
 4509         if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
 4510                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
 4511 
 4512         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
 4513                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
 4514 
 4515         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
 4516                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
 4517 
 4518         if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
 4519                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
 4520 
 4521         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
 4522                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
 4523 
 4524         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
 4525                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
 4526 
 4527         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_VXLAN, &p->rss_flags))
 4528                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
 4529 
 4530         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_VXLAN, &p->rss_flags))
 4531                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
 4532 
 4533         if (ECORE_TEST_BIT(ECORE_RSS_TUNN_INNER_HDRS, &p->rss_flags))
 4534                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
 4535 
 4536         /* RSS keys */
 4537         if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
 4538                 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
 4539                        sizeof(data->rss_key));
 4540                 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
 4541         }
 4542 
 4543         data->capabilities = ECORE_CPU_TO_LE16(caps);
 4544 
 4545         /* Hashing mask */
 4546         data->rss_result_mask = p->rss_result_mask;
 4547 
 4548         /* RSS engine ID */
 4549         data->rss_engine_id = o->engine_id;
 4550 
 4551         ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
 4552 
 4553         /* Indirection table */
 4554         ECORE_MEMCPY(data->indirection_table, p->ind_table,
 4555                   T_ETH_INDIRECTION_TABLE_SIZE);
 4556 
 4557         /* Remember the last configuration */
 4558         ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
 4559 
 4560 
 4561         /* No need for an explicit memory barrier here as long as we
 4562          * ensure the ordering of writing to the SPQ element
 4563          * and updating of the SPQ producer which involves a memory
 4564          * read. If the memory read is removed we will have to put a
 4565          * full memory barrier there (inside ecore_sp_post()).
 4566          */
 4567 
 4568         /* Send a ramrod */
 4569         rc = ecore_sp_post(sc,
 4570                              RAMROD_CMD_ID_ETH_RSS_UPDATE,
 4571                              r->cid,
 4572                              r->rdata_mapping,
 4573                              ETH_CONNECTION_TYPE);
 4574 
 4575         if (rc < 0)
 4576                 return rc;
 4577 
 4578         return ECORE_PENDING;
 4579 }
 4580 
 4581 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
 4582                              uint8_t *ind_table)
 4583 {
 4584         ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
 4585 }
 4586 
 4587 int ecore_config_rss(struct bxe_softc *sc,
 4588                      struct ecore_config_rss_params *p)
 4589 {
 4590         int rc;
 4591         struct ecore_rss_config_obj *o = p->rss_obj;
 4592         struct ecore_raw_obj *r = &o->raw;
 4593 
 4594         /* Do nothing if only driver cleanup was requested */
 4595         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
 4596                 ECORE_MSG(sc, "Not configuring RSS ramrod_flags=%lx\n",
 4597                           p->ramrod_flags);
 4598                 return ECORE_SUCCESS;
 4599         }
 4600 
 4601         r->set_pending(r);
 4602 
 4603         rc = o->config_rss(sc, p);
 4604         if (rc < 0) {
 4605                 r->clear_pending(r);
 4606                 return rc;
 4607         }
 4608 
 4609         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
 4610                 rc = r->wait_comp(sc, r);
 4611 
 4612         return rc;
 4613 }
 4614 
 4615 void ecore_init_rss_config_obj(struct bxe_softc *sc,
 4616                                struct ecore_rss_config_obj *rss_obj,
 4617                                uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
 4618                                void *rdata, ecore_dma_addr_t rdata_mapping,
 4619                                int state, unsigned long *pstate,
 4620                                ecore_obj_type type)
 4621 {
 4622         ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
 4623                            rdata_mapping, state, pstate, type);
 4624 
 4625         rss_obj->engine_id  = engine_id;
 4626         rss_obj->config_rss = ecore_setup_rss;
 4627 }
 4628 
 4629 
 4630 /********************** Queue state object ***********************************/
 4631 
 4632 /**
 4633  * ecore_queue_state_change - perform Queue state change transition
 4634  *
 4635  * @sc:         device handle
 4636  * @params:     parameters to perform the transition
 4637  *
 4638  * returns 0 in case of successfully completed transition, negative error
 4639  * code in case of failure, positive (EBUSY) value if there is a completion
 4640  * to that is still pending (possible only if RAMROD_COMP_WAIT is
 4641  * not set in params->ramrod_flags for asynchronous commands).
 4642  *
 4643  */
 4644 int ecore_queue_state_change(struct bxe_softc *sc,
 4645                              struct ecore_queue_state_params *params)
 4646 {
 4647         struct ecore_queue_sp_obj *o = params->q_obj;
 4648         int rc, pending_bit;
 4649         unsigned long *pending = &o->pending;
 4650 
 4651         /* Check that the requested transition is legal */
 4652         rc = o->check_transition(sc, o, params);
 4653         if (rc) {
 4654                 ECORE_ERR("check transition returned an error. rc %d\n", rc);
 4655                 return ECORE_INVAL;
 4656         }
 4657 
 4658         /* Set "pending" bit */
 4659         ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
 4660         pending_bit = o->set_pending(o, params);
 4661         ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
 4662 
 4663         /* Don't send a command if only driver cleanup was requested */
 4664         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
 4665                 o->complete_cmd(sc, o, pending_bit);
 4666         else {
 4667                 /* Send a ramrod */
 4668                 rc = o->send_cmd(sc, params);
 4669                 if (rc) {
 4670                         o->next_state = ECORE_Q_STATE_MAX;
 4671                         ECORE_CLEAR_BIT(pending_bit, pending);
 4672                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
 4673                         return rc;
 4674                 }
 4675 
 4676                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
 4677                         rc = o->wait_comp(sc, o, pending_bit);
 4678                         if (rc)
 4679                                 return rc;
 4680 
 4681                         return ECORE_SUCCESS;
 4682                 }
 4683         }
 4684 
 4685         return ECORE_RET_PENDING(pending_bit, pending);
 4686 }
 4687 
 4688 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
 4689                                    struct ecore_queue_state_params *params)
 4690 {
 4691         enum ecore_queue_cmd cmd = params->cmd, bit;
 4692 
 4693         /* ACTIVATE and DEACTIVATE commands are implemented on top of
 4694          * UPDATE command.
 4695          */
 4696         if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
 4697             (cmd == ECORE_Q_CMD_DEACTIVATE))
 4698                 bit = ECORE_Q_CMD_UPDATE;
 4699         else
 4700                 bit = cmd;
 4701 
 4702         ECORE_SET_BIT(bit, &obj->pending);
 4703         return bit;
 4704 }
 4705 
 4706 static int ecore_queue_wait_comp(struct bxe_softc *sc,
 4707                                  struct ecore_queue_sp_obj *o,
 4708                                  enum ecore_queue_cmd cmd)
 4709 {
 4710         return ecore_state_wait(sc, cmd, &o->pending);
 4711 }
 4712 
 4713 /**
 4714  * ecore_queue_comp_cmd - complete the state change command.
 4715  *
 4716  * @sc:         device handle
 4717  * @o:
 4718  * @cmd:
 4719  *
 4720  * Checks that the arrived completion is expected.
 4721  */
 4722 static int ecore_queue_comp_cmd(struct bxe_softc *sc,
 4723                                 struct ecore_queue_sp_obj *o,
 4724                                 enum ecore_queue_cmd cmd)
 4725 {
 4726         unsigned long cur_pending = o->pending;
 4727 
 4728         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
 4729                 ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
 4730                           cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
 4731                           o->state, cur_pending, o->next_state);
 4732                 return ECORE_INVAL;
 4733         }
 4734 
 4735         if (o->next_tx_only >= o->max_cos)
 4736                 /* >= because tx only must always be smaller than cos since the
 4737                  * primary connection supports COS 0
 4738                  */
 4739                 ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
 4740                           o->next_tx_only, o->max_cos);
 4741 
 4742         ECORE_MSG(sc,
 4743                   "Completing command %d for queue %d, setting state to %d\n",
 4744                   cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
 4745 
 4746         if (o->next_tx_only)  /* print num tx-only if any exist */
 4747                 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
 4748                           o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
 4749 
 4750         o->state = o->next_state;
 4751         o->num_tx_only = o->next_tx_only;
 4752         o->next_state = ECORE_Q_STATE_MAX;
 4753 
 4754         /* It's important that o->state and o->next_state are
 4755          * updated before o->pending.
 4756          */
 4757         wmb();
 4758 
 4759         ECORE_CLEAR_BIT(cmd, &o->pending);
 4760         ECORE_SMP_MB_AFTER_CLEAR_BIT();
 4761 
 4762         return ECORE_SUCCESS;
 4763 }
 4764 
 4765 static void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
 4766                                 struct ecore_queue_state_params *cmd_params,
 4767                                 struct client_init_ramrod_data *data)
 4768 {
 4769         struct ecore_queue_setup_params *params = &cmd_params->params.setup;
 4770 
 4771         /* Rx data */
 4772 
 4773         /* IPv6 TPA supported for E2 and above only */
 4774         data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
 4775                                           &params->flags) *
 4776                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
 4777 }
 4778 
 4779 static void ecore_q_fill_init_general_data(struct bxe_softc *sc,
 4780                                 struct ecore_queue_sp_obj *o,
 4781                                 struct ecore_general_setup_params *params,
 4782                                 struct client_init_general_data *gen_data,
 4783                                 unsigned long *flags)
 4784 {
 4785         gen_data->client_id = o->cl_id;
 4786 
 4787         if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
 4788                 gen_data->statistics_counter_id =
 4789                                         params->stat_id;
 4790                 gen_data->statistics_en_flg = 1;
 4791                 gen_data->statistics_zero_flg =
 4792                         ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
 4793         } else
 4794                 gen_data->statistics_counter_id =
 4795                                         DISABLE_STATISTIC_COUNTER_ID_VALUE;
 4796 
 4797         gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
 4798                                                    flags);
 4799         gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
 4800                                                     flags);
 4801         gen_data->sp_client_id = params->spcl_id;
 4802         gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
 4803         gen_data->func_id = o->func_id;
 4804 
 4805         gen_data->cos = params->cos;
 4806 
 4807         gen_data->traffic_type =
 4808                 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
 4809                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
 4810 
 4811         gen_data->fp_hsi_ver = params->fp_hsi;
 4812 
 4813         ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
 4814                   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
 4815 }
 4816 
 4817 static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
 4818                                 struct ecore_txq_setup_params *params,
 4819                                 struct client_init_tx_data *tx_data,
 4820                                 unsigned long *flags)
 4821 {
 4822         tx_data->enforce_security_flg =
 4823                 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
 4824         tx_data->default_vlan =
 4825                 ECORE_CPU_TO_LE16(params->default_vlan);
 4826         tx_data->default_vlan_flg =
 4827                 ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
 4828         tx_data->tx_switching_flg =
 4829                 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
 4830         tx_data->anti_spoofing_flg =
 4831                 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
 4832         tx_data->force_default_pri_flg =
 4833                 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
 4834         tx_data->refuse_outband_vlan_flg =
 4835                 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
 4836         tx_data->tunnel_lso_inc_ip_id =
 4837                 ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
 4838         tx_data->tunnel_non_lso_pcsum_location =
 4839                 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
 4840                                                             CSUM_ON_BD;
 4841 
 4842         tx_data->tx_status_block_id = params->fw_sb_id;
 4843         tx_data->tx_sb_index_number = params->sb_cq_index;
 4844         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
 4845 
 4846         tx_data->tx_bd_page_base.lo =
 4847                 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
 4848         tx_data->tx_bd_page_base.hi =
 4849                 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
 4850 
 4851         /* Don't configure any Tx switching mode during queue SETUP */
 4852         tx_data->state = 0;
 4853 }
 4854 
 4855 static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
 4856                                 struct rxq_pause_params *params,
 4857                                 struct client_init_rx_data *rx_data)
 4858 {
 4859         /* flow control data */
 4860         rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
 4861         rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
 4862         rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
 4863         rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
 4864         rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
 4865         rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
 4866         rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
 4867 }
 4868 
 4869 static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
 4870                                 struct ecore_rxq_setup_params *params,
 4871                                 struct client_init_rx_data *rx_data,
 4872                                 unsigned long *flags)
 4873 {
 4874         rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
 4875                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
 4876         rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
 4877                                 CLIENT_INIT_RX_DATA_TPA_MODE;
 4878         rx_data->vmqueue_mode_en_flg = 0;
 4879 
 4880         rx_data->extra_data_over_sgl_en_flg =
 4881                 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
 4882         rx_data->cache_line_alignment_log_size =
 4883                 params->cache_line_log;
 4884         rx_data->enable_dynamic_hc =
 4885                 ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
 4886         rx_data->max_sges_for_packet = params->max_sges_pkt;
 4887         rx_data->client_qzone_id = params->cl_qzone_id;
 4888         rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
 4889 
 4890         /* Always start in DROP_ALL mode */
 4891         rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
 4892                                      CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
 4893 
 4894         /* We don't set drop flags */
 4895         rx_data->drop_ip_cs_err_flg = 0;
 4896         rx_data->drop_tcp_cs_err_flg = 0;
 4897         rx_data->drop_ttl0_flg = 0;
 4898         rx_data->drop_udp_cs_err_flg = 0;
 4899         rx_data->inner_vlan_removal_enable_flg =
 4900                 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
 4901         rx_data->outer_vlan_removal_enable_flg =
 4902                 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
 4903         rx_data->status_block_id = params->fw_sb_id;
 4904         rx_data->rx_sb_index_number = params->sb_cq_index;
 4905         rx_data->max_tpa_queues = params->max_tpa_queues;
 4906         rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
 4907         rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
 4908         rx_data->bd_page_base.lo =
 4909                 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
 4910         rx_data->bd_page_base.hi =
 4911                 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
 4912         rx_data->sge_page_base.lo =
 4913                 ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
 4914         rx_data->sge_page_base.hi =
 4915                 ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
 4916         rx_data->cqe_page_base.lo =
 4917                 ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
 4918         rx_data->cqe_page_base.hi =
 4919                 ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
 4920         rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
 4921                                                  flags);
 4922 
 4923         if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
 4924                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
 4925                 rx_data->is_approx_mcast = 1;
 4926         }
 4927 
 4928         rx_data->rss_engine_id = params->rss_engine_id;
 4929 
 4930         /* silent vlan removal */
 4931         rx_data->silent_vlan_removal_flg =
 4932                 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
 4933         rx_data->silent_vlan_value =
 4934                 ECORE_CPU_TO_LE16(params->silent_removal_value);
 4935         rx_data->silent_vlan_mask =
 4936                 ECORE_CPU_TO_LE16(params->silent_removal_mask);
 4937 }
 4938 
 4939 /* initialize the general, tx and rx parts of a queue object */
 4940 static void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
 4941                                 struct ecore_queue_state_params *cmd_params,
 4942                                 struct client_init_ramrod_data *data)
 4943 {
 4944         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
 4945                                        &cmd_params->params.setup.gen_params,
 4946                                        &data->general,
 4947                                        &cmd_params->params.setup.flags);
 4948 
 4949         ecore_q_fill_init_tx_data(cmd_params->q_obj,
 4950                                   &cmd_params->params.setup.txq_params,
 4951                                   &data->tx,
 4952                                   &cmd_params->params.setup.flags);
 4953 
 4954         ecore_q_fill_init_rx_data(cmd_params->q_obj,
 4955                                   &cmd_params->params.setup.rxq_params,
 4956                                   &data->rx,
 4957                                   &cmd_params->params.setup.flags);
 4958 
 4959         ecore_q_fill_init_pause_data(cmd_params->q_obj,
 4960                                      &cmd_params->params.setup.pause_params,
 4961                                      &data->rx);
 4962 }
 4963 
 4964 /* initialize the general and tx parts of a tx-only queue object */
 4965 static void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
 4966                                 struct ecore_queue_state_params *cmd_params,
 4967                                 struct tx_queue_init_ramrod_data *data)
 4968 {
 4969         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
 4970                                        &cmd_params->params.tx_only.gen_params,
 4971                                        &data->general,
 4972                                        &cmd_params->params.tx_only.flags);
 4973 
 4974         ecore_q_fill_init_tx_data(cmd_params->q_obj,
 4975                                   &cmd_params->params.tx_only.txq_params,
 4976                                   &data->tx,
 4977                                   &cmd_params->params.tx_only.flags);
 4978 
 4979         ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
 4980                   cmd_params->q_obj->cids[0],
 4981                   data->tx.tx_bd_page_base.lo,
 4982                   data->tx.tx_bd_page_base.hi);
 4983 }
 4984 
 4985 /**
 4986  * ecore_q_init - init HW/FW queue
 4987  *
 4988  * @sc:         device handle
 4989  * @params:
 4990  *
 4991  * HW/FW initial Queue configuration:
 4992  *      - HC: Rx and Tx
 4993  *      - CDU context validation
 4994  *
 4995  */
 4996 static inline int ecore_q_init(struct bxe_softc *sc,
 4997                                struct ecore_queue_state_params *params)
 4998 {
 4999         struct ecore_queue_sp_obj *o = params->q_obj;
 5000         struct ecore_queue_init_params *init = &params->params.init;
 5001         uint16_t hc_usec;
 5002         uint8_t cos;
 5003 
 5004         /* Tx HC configuration */
 5005         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
 5006             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
 5007                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
 5008 
 5009                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
 5010                         init->tx.sb_cq_index,
 5011                         !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
 5012                         hc_usec);
 5013         }
 5014 
 5015         /* Rx HC configuration */
 5016         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
 5017             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
 5018                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
 5019 
 5020                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
 5021                         init->rx.sb_cq_index,
 5022                         !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
 5023                         hc_usec);
 5024         }
 5025 
 5026         /* Set CDU context validation values */
 5027         for (cos = 0; cos < o->max_cos; cos++) {
 5028                 ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
 5029                           o->cids[cos], cos);
 5030                 ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
 5031                 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
 5032         }
 5033 
 5034         /* As no ramrod is sent, complete the command immediately  */
 5035         o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
 5036 
 5037         ECORE_MMIOWB();
 5038         ECORE_SMP_MB();
 5039 
 5040         return ECORE_SUCCESS;
 5041 }
 5042 
 5043 static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
 5044                                         struct ecore_queue_state_params *params)
 5045 {
 5046         struct ecore_queue_sp_obj *o = params->q_obj;
 5047         struct client_init_ramrod_data *rdata =
 5048                 (struct client_init_ramrod_data *)o->rdata;
 5049         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 5050         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
 5051 
 5052         /* Clear the ramrod data */
 5053         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 5054 
 5055         /* Fill the ramrod data */
 5056         ecore_q_fill_setup_data_cmn(sc, params, rdata);
 5057 
 5058         /* No need for an explicit memory barrier here as long as we
 5059          * ensure the ordering of writing to the SPQ element
 5060          * and updating of the SPQ producer which involves a memory
 5061          * read. If the memory read is removed we will have to put a
 5062          * full memory barrier there (inside ecore_sp_post()).
 5063          */
 5064         return ecore_sp_post(sc,
 5065                              ramrod,
 5066                              o->cids[ECORE_PRIMARY_CID_INDEX],
 5067                              data_mapping,
 5068                              ETH_CONNECTION_TYPE);
 5069 }
 5070 
 5071 static inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
 5072                                         struct ecore_queue_state_params *params)
 5073 {
 5074         struct ecore_queue_sp_obj *o = params->q_obj;
 5075         struct client_init_ramrod_data *rdata =
 5076                 (struct client_init_ramrod_data *)o->rdata;
 5077         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 5078         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
 5079 
 5080         /* Clear the ramrod data */
 5081         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 5082 
 5083         /* Fill the ramrod data */
 5084         ecore_q_fill_setup_data_cmn(sc, params, rdata);
 5085         ecore_q_fill_setup_data_e2(sc, params, rdata);
 5086 
 5087         /* No need for an explicit memory barrier here as long as we
 5088          * ensure the ordering of writing to the SPQ element
 5089          * and updating of the SPQ producer which involves a memory
 5090          * read. If the memory read is removed we will have to put a
 5091          * full memory barrier there (inside ecore_sp_post()).
 5092          */
 5093         return ecore_sp_post(sc,
 5094                              ramrod,
 5095                              o->cids[ECORE_PRIMARY_CID_INDEX],
 5096                              data_mapping,
 5097                              ETH_CONNECTION_TYPE);
 5098 }
 5099 
 5100 static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
 5101                                   struct ecore_queue_state_params *params)
 5102 {
 5103         struct ecore_queue_sp_obj *o = params->q_obj;
 5104         struct tx_queue_init_ramrod_data *rdata =
 5105                 (struct tx_queue_init_ramrod_data *)o->rdata;
 5106         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 5107         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
 5108         struct ecore_queue_setup_tx_only_params *tx_only_params =
 5109                 &params->params.tx_only;
 5110         uint8_t cid_index = tx_only_params->cid_index;
 5111 
 5112         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
 5113                 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
 5114         ECORE_MSG(sc, "sending forward tx-only ramrod");
 5115 
 5116         if (cid_index >= o->max_cos) {
 5117                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
 5118                           o->cl_id, cid_index);
 5119                 return ECORE_INVAL;
 5120         }
 5121 
 5122         ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
 5123                   tx_only_params->gen_params.cos,
 5124                   tx_only_params->gen_params.spcl_id);
 5125 
 5126         /* Clear the ramrod data */
 5127         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 5128 
 5129         /* Fill the ramrod data */
 5130         ecore_q_fill_setup_tx_only(sc, params, rdata);
 5131 
 5132         ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
 5133                   o->cids[cid_index], rdata->general.client_id,
 5134                   rdata->general.sp_client_id, rdata->general.cos);
 5135 
 5136         /* No need for an explicit memory barrier here as long as we
 5137          * ensure the ordering of writing to the SPQ element
 5138          * and updating of the SPQ producer which involves a memory
 5139          * read. If the memory read is removed we will have to put a
 5140          * full memory barrier there (inside ecore_sp_post()).
 5141          */
 5142         return ecore_sp_post(sc, ramrod, o->cids[cid_index],
 5143                              data_mapping, ETH_CONNECTION_TYPE);
 5144 }
 5145 
 5146 static void ecore_q_fill_update_data(struct bxe_softc *sc,
 5147                                      struct ecore_queue_sp_obj *obj,
 5148                                      struct ecore_queue_update_params *params,
 5149                                      struct client_update_ramrod_data *data)
 5150 {
 5151         /* Client ID of the client to update */
 5152         data->client_id = obj->cl_id;
 5153 
 5154         /* Function ID of the client to update */
 5155         data->func_id = obj->func_id;
 5156 
 5157         /* Default VLAN value */
 5158         data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
 5159 
 5160         /* Inner VLAN stripping */
 5161         data->inner_vlan_removal_enable_flg =
 5162                 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
 5163                                &params->update_flags);
 5164         data->inner_vlan_removal_change_flg =
 5165                 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
 5166                        &params->update_flags);
 5167 
 5168         /* Outer VLAN stripping */
 5169         data->outer_vlan_removal_enable_flg =
 5170                 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
 5171                                &params->update_flags);
 5172         data->outer_vlan_removal_change_flg =
 5173                 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
 5174                        &params->update_flags);
 5175 
 5176         /* Drop packets that have source MAC that doesn't belong to this
 5177          * Queue.
 5178          */
 5179         data->anti_spoofing_enable_flg =
 5180                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
 5181                                &params->update_flags);
 5182         data->anti_spoofing_change_flg =
 5183                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
 5184                        &params->update_flags);
 5185 
 5186         /* Activate/Deactivate */
 5187         data->activate_flg =
 5188                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
 5189         data->activate_change_flg =
 5190                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
 5191                                &params->update_flags);
 5192 
 5193         /* Enable default VLAN */
 5194         data->default_vlan_enable_flg =
 5195                 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
 5196                                &params->update_flags);
 5197         data->default_vlan_change_flg =
 5198                 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
 5199                        &params->update_flags);
 5200 
 5201         /* silent vlan removal */
 5202         data->silent_vlan_change_flg =
 5203                 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
 5204                                &params->update_flags);
 5205         data->silent_vlan_removal_flg =
 5206                 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
 5207                                &params->update_flags);
 5208         data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
 5209         data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
 5210 
 5211         /* tx switching */
 5212         data->tx_switching_flg =
 5213                 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
 5214                                &params->update_flags);
 5215         data->tx_switching_change_flg =
 5216                 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
 5217                                &params->update_flags);
 5218 
 5219         /* PTP */
 5220         data->handle_ptp_pkts_flg =
 5221                 ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS,
 5222                                &params->update_flags);
 5223         data->handle_ptp_pkts_change_flg =
 5224                 ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS_CHNG,
 5225                                &params->update_flags);
 5226 }
 5227 
 5228 static inline int ecore_q_send_update(struct bxe_softc *sc,
 5229                                       struct ecore_queue_state_params *params)
 5230 {
 5231         struct ecore_queue_sp_obj *o = params->q_obj;
 5232         struct client_update_ramrod_data *rdata =
 5233                 (struct client_update_ramrod_data *)o->rdata;
 5234         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 5235         struct ecore_queue_update_params *update_params =
 5236                 &params->params.update;
 5237         uint8_t cid_index = update_params->cid_index;
 5238 
 5239         if (cid_index >= o->max_cos) {
 5240                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
 5241                           o->cl_id, cid_index);
 5242                 return ECORE_INVAL;
 5243         }
 5244 
 5245         /* Clear the ramrod data */
 5246         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 5247 
 5248         /* Fill the ramrod data */
 5249         ecore_q_fill_update_data(sc, o, update_params, rdata);
 5250 
 5251         /* No need for an explicit memory barrier here as long as we
 5252          * ensure the ordering of writing to the SPQ element
 5253          * and updating of the SPQ producer which involves a memory
 5254          * read. If the memory read is removed we will have to put a
 5255          * full memory barrier there (inside ecore_sp_post()).
 5256          */
 5257         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
 5258                              o->cids[cid_index], data_mapping,
 5259                              ETH_CONNECTION_TYPE);
 5260 }
 5261 
 5262 /**
 5263  * ecore_q_send_deactivate - send DEACTIVATE command
 5264  *
 5265  * @sc:         device handle
 5266  * @params:
 5267  *
 5268  * implemented using the UPDATE command.
 5269  */
 5270 static inline int ecore_q_send_deactivate(struct bxe_softc *sc,
 5271                                         struct ecore_queue_state_params *params)
 5272 {
 5273         struct ecore_queue_update_params *update = &params->params.update;
 5274 
 5275         ECORE_MEMSET(update, 0, sizeof(*update));
 5276 
 5277         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
 5278 
 5279         return ecore_q_send_update(sc, params);
 5280 }
 5281 
 5282 /**
 5283  * ecore_q_send_activate - send ACTIVATE command
 5284  *
 5285  * @sc:         device handle
 5286  * @params:
 5287  *
 5288  * implemented using the UPDATE command.
 5289  */
 5290 static inline int ecore_q_send_activate(struct bxe_softc *sc,
 5291                                         struct ecore_queue_state_params *params)
 5292 {
 5293         struct ecore_queue_update_params *update = &params->params.update;
 5294 
 5295         ECORE_MEMSET(update, 0, sizeof(*update));
 5296 
 5297         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
 5298         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
 5299 
 5300         return ecore_q_send_update(sc, params);
 5301 }
 5302 
 5303 static void ecore_q_fill_update_tpa_data(struct bxe_softc *sc,
 5304                                 struct ecore_queue_sp_obj *obj,
 5305                                 struct ecore_queue_update_tpa_params *params,
 5306                                 struct tpa_update_ramrod_data *data)
 5307 {
 5308         data->client_id = obj->cl_id;
 5309         data->complete_on_both_clients = params->complete_on_both_clients;
 5310         data->dont_verify_rings_pause_thr_flg =
 5311                 params->dont_verify_thr;
 5312         data->max_agg_size = ECORE_CPU_TO_LE16(params->max_agg_sz);
 5313         data->max_sges_for_packet = params->max_sges_pkt;
 5314         data->max_tpa_queues = params->max_tpa_queues;
 5315         data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buff_sz);
 5316         data->sge_page_base_hi = ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
 5317         data->sge_page_base_lo = ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
 5318         data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_pause_thr_high);
 5319         data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_pause_thr_low);
 5320         data->tpa_mode = params->tpa_mode;
 5321         data->update_ipv4 = params->update_ipv4;
 5322         data->update_ipv6 = params->update_ipv6;
 5323 }
 5324 
 5325 static inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
 5326                                         struct ecore_queue_state_params *params)
 5327 {
 5328         struct ecore_queue_sp_obj *o = params->q_obj;
 5329         struct tpa_update_ramrod_data *rdata =
 5330                 (struct tpa_update_ramrod_data *)o->rdata;
 5331         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 5332         struct ecore_queue_update_tpa_params *update_tpa_params =
 5333                 &params->params.update_tpa;
 5334         uint16_t type;
 5335 
 5336         /* Clear the ramrod data */
 5337         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 5338 
 5339         /* Fill the ramrod data */
 5340         ecore_q_fill_update_tpa_data(sc, o, update_tpa_params, rdata);
 5341 
 5342         /* Add the function id inside the type, so that sp post function
 5343          * doesn't automatically add the PF func-id, this is required
 5344          * for operations done by PFs on behalf of their VFs
 5345          */
 5346         type = ETH_CONNECTION_TYPE |
 5347                 ((o->func_id) << SPE_HDR_T_FUNCTION_ID_SHIFT);
 5348 
 5349         /* No need for an explicit memory barrier here as long as we
 5350          * ensure the ordering of writing to the SPQ element
 5351          * and updating of the SPQ producer which involves a memory
 5352          * read. If the memory read is removed we will have to put a
 5353          * full memory barrier there (inside ecore_sp_post()).
 5354          */
 5355         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TPA_UPDATE,
 5356                              o->cids[ECORE_PRIMARY_CID_INDEX],
 5357                              data_mapping, type);
 5358 }
 5359 
 5360 static inline int ecore_q_send_halt(struct bxe_softc *sc,
 5361                                     struct ecore_queue_state_params *params)
 5362 {
 5363         struct ecore_queue_sp_obj *o = params->q_obj;
 5364 
 5365         /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
 5366         ecore_dma_addr_t data_mapping = 0;
 5367         data_mapping = (ecore_dma_addr_t)o->cl_id;
 5368 
 5369         /* No need for an explicit memory barrier here as long as we
 5370          * ensure the ordering of writing to the SPQ element
 5371          * and updating of the SPQ producer which involves a memory
 5372          * read. If the memory read is removed we will have to put a
 5373          * full memory barrier there (inside ecore_sp_post()).
 5374          */
 5375         return ecore_sp_post(sc,
 5376                              RAMROD_CMD_ID_ETH_HALT,
 5377                              o->cids[ECORE_PRIMARY_CID_INDEX],
 5378                              data_mapping,
 5379                              ETH_CONNECTION_TYPE);
 5380 }
 5381 
 5382 static inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
 5383                                        struct ecore_queue_state_params *params)
 5384 {
 5385         struct ecore_queue_sp_obj *o = params->q_obj;
 5386         uint8_t cid_idx = params->params.cfc_del.cid_index;
 5387 
 5388         if (cid_idx >= o->max_cos) {
 5389                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
 5390                           o->cl_id, cid_idx);
 5391                 return ECORE_INVAL;
 5392         }
 5393 
 5394         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
 5395                              o->cids[cid_idx], 0,
 5396                              NONE_CONNECTION_TYPE);
 5397 }
 5398 
 5399 static inline int ecore_q_send_terminate(struct bxe_softc *sc,
 5400                                         struct ecore_queue_state_params *params)
 5401 {
 5402         struct ecore_queue_sp_obj *o = params->q_obj;
 5403         uint8_t cid_index = params->params.terminate.cid_index;
 5404 
 5405         if (cid_index >= o->max_cos) {
 5406                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
 5407                           o->cl_id, cid_index);
 5408                 return ECORE_INVAL;
 5409         }
 5410 
 5411         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
 5412                              o->cids[cid_index], 0,
 5413                              ETH_CONNECTION_TYPE);
 5414 }
 5415 
 5416 static inline int ecore_q_send_empty(struct bxe_softc *sc,
 5417                                      struct ecore_queue_state_params *params)
 5418 {
 5419         struct ecore_queue_sp_obj *o = params->q_obj;
 5420 
 5421         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
 5422                              o->cids[ECORE_PRIMARY_CID_INDEX], 0,
 5423                              ETH_CONNECTION_TYPE);
 5424 }
 5425 
 5426 static inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
 5427                                         struct ecore_queue_state_params *params)
 5428 {
 5429         switch (params->cmd) {
 5430         case ECORE_Q_CMD_INIT:
 5431                 return ecore_q_init(sc, params);
 5432         case ECORE_Q_CMD_SETUP_TX_ONLY:
 5433                 return ecore_q_send_setup_tx_only(sc, params);
 5434         case ECORE_Q_CMD_DEACTIVATE:
 5435                 return ecore_q_send_deactivate(sc, params);
 5436         case ECORE_Q_CMD_ACTIVATE:
 5437                 return ecore_q_send_activate(sc, params);
 5438         case ECORE_Q_CMD_UPDATE:
 5439                 return ecore_q_send_update(sc, params);
 5440         case ECORE_Q_CMD_UPDATE_TPA:
 5441                 return ecore_q_send_update_tpa(sc, params);
 5442         case ECORE_Q_CMD_HALT:
 5443                 return ecore_q_send_halt(sc, params);
 5444         case ECORE_Q_CMD_CFC_DEL:
 5445                 return ecore_q_send_cfc_del(sc, params);
 5446         case ECORE_Q_CMD_TERMINATE:
 5447                 return ecore_q_send_terminate(sc, params);
 5448         case ECORE_Q_CMD_EMPTY:
 5449                 return ecore_q_send_empty(sc, params);
 5450         default:
 5451                 ECORE_ERR("Unknown command: %d\n", params->cmd);
 5452                 return ECORE_INVAL;
 5453         }
 5454 }
 5455 
 5456 static int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
 5457                                     struct ecore_queue_state_params *params)
 5458 {
 5459         switch (params->cmd) {
 5460         case ECORE_Q_CMD_SETUP:
 5461                 return ecore_q_send_setup_e1x(sc, params);
 5462         case ECORE_Q_CMD_INIT:
 5463         case ECORE_Q_CMD_SETUP_TX_ONLY:
 5464         case ECORE_Q_CMD_DEACTIVATE:
 5465         case ECORE_Q_CMD_ACTIVATE:
 5466         case ECORE_Q_CMD_UPDATE:
 5467         case ECORE_Q_CMD_UPDATE_TPA:
 5468         case ECORE_Q_CMD_HALT:
 5469         case ECORE_Q_CMD_CFC_DEL:
 5470         case ECORE_Q_CMD_TERMINATE:
 5471         case ECORE_Q_CMD_EMPTY:
 5472                 return ecore_queue_send_cmd_cmn(sc, params);
 5473         default:
 5474                 ECORE_ERR("Unknown command: %d\n", params->cmd);
 5475                 return ECORE_INVAL;
 5476         }
 5477 }
 5478 
 5479 static int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
 5480                                    struct ecore_queue_state_params *params)
 5481 {
 5482         switch (params->cmd) {
 5483         case ECORE_Q_CMD_SETUP:
 5484                 return ecore_q_send_setup_e2(sc, params);
 5485         case ECORE_Q_CMD_INIT:
 5486         case ECORE_Q_CMD_SETUP_TX_ONLY:
 5487         case ECORE_Q_CMD_DEACTIVATE:
 5488         case ECORE_Q_CMD_ACTIVATE:
 5489         case ECORE_Q_CMD_UPDATE:
 5490         case ECORE_Q_CMD_UPDATE_TPA:
 5491         case ECORE_Q_CMD_HALT:
 5492         case ECORE_Q_CMD_CFC_DEL:
 5493         case ECORE_Q_CMD_TERMINATE:
 5494         case ECORE_Q_CMD_EMPTY:
 5495                 return ecore_queue_send_cmd_cmn(sc, params);
 5496         default:
 5497                 ECORE_ERR("Unknown command: %d\n", params->cmd);
 5498                 return ECORE_INVAL;
 5499         }
 5500 }
 5501 
 5502 /**
 5503  * ecore_queue_chk_transition - check state machine of a regular Queue
 5504  *
 5505  * @sc:         device handle
 5506  * @o:
 5507  * @params:
 5508  *
 5509  * (not Forwarding)
 5510  * It both checks if the requested command is legal in a current
 5511  * state and, if it's legal, sets a `next_state' in the object
 5512  * that will be used in the completion flow to set the `state'
 5513  * of the object.
 5514  *
 5515  * returns 0 if a requested command is a legal transition,
 5516  *         ECORE_INVAL otherwise.
 5517  */
 5518 static int ecore_queue_chk_transition(struct bxe_softc *sc,
 5519                                       struct ecore_queue_sp_obj *o,
 5520                                       struct ecore_queue_state_params *params)
 5521 {
 5522         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
 5523         enum ecore_queue_cmd cmd = params->cmd;
 5524         struct ecore_queue_update_params *update_params =
 5525                  &params->params.update;
 5526         uint8_t next_tx_only = o->num_tx_only;
 5527 
 5528         /* Forget all pending for completion commands if a driver only state
 5529          * transition has been requested.
 5530          */
 5531         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
 5532                 o->pending = 0;
 5533                 o->next_state = ECORE_Q_STATE_MAX;
 5534         }
 5535 
 5536         /* Don't allow a next state transition if we are in the middle of
 5537          * the previous one.
 5538          */
 5539         if (o->pending) {
 5540                 ECORE_ERR("Blocking transition since pending was %lx\n",
 5541                           o->pending);
 5542                 return ECORE_BUSY;
 5543         }
 5544 
 5545         switch (state) {
 5546         case ECORE_Q_STATE_RESET:
 5547                 if (cmd == ECORE_Q_CMD_INIT)
 5548                         next_state = ECORE_Q_STATE_INITIALIZED;
 5549 
 5550                 break;
 5551         case ECORE_Q_STATE_INITIALIZED:
 5552                 if (cmd == ECORE_Q_CMD_SETUP) {
 5553                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
 5554                                            &params->params.setup.flags))
 5555                                 next_state = ECORE_Q_STATE_ACTIVE;
 5556                         else
 5557                                 next_state = ECORE_Q_STATE_INACTIVE;
 5558                 }
 5559 
 5560                 break;
 5561         case ECORE_Q_STATE_ACTIVE:
 5562                 if (cmd == ECORE_Q_CMD_DEACTIVATE)
 5563                         next_state = ECORE_Q_STATE_INACTIVE;
 5564 
 5565                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
 5566                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
 5567                         next_state = ECORE_Q_STATE_ACTIVE;
 5568 
 5569                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
 5570                         next_state = ECORE_Q_STATE_MULTI_COS;
 5571                         next_tx_only = 1;
 5572                 }
 5573 
 5574                 else if (cmd == ECORE_Q_CMD_HALT)
 5575                         next_state = ECORE_Q_STATE_STOPPED;
 5576 
 5577                 else if (cmd == ECORE_Q_CMD_UPDATE) {
 5578                         /* If "active" state change is requested, update the
 5579                          *  state accordingly.
 5580                          */
 5581                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
 5582                                            &update_params->update_flags) &&
 5583                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
 5584                                             &update_params->update_flags))
 5585                                 next_state = ECORE_Q_STATE_INACTIVE;
 5586                         else
 5587                                 next_state = ECORE_Q_STATE_ACTIVE;
 5588                 }
 5589 
 5590                 break;
 5591         case ECORE_Q_STATE_MULTI_COS:
 5592                 if (cmd == ECORE_Q_CMD_TERMINATE)
 5593                         next_state = ECORE_Q_STATE_MCOS_TERMINATED;
 5594 
 5595                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
 5596                         next_state = ECORE_Q_STATE_MULTI_COS;
 5597                         next_tx_only = o->num_tx_only + 1;
 5598                 }
 5599 
 5600                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
 5601                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
 5602                         next_state = ECORE_Q_STATE_MULTI_COS;
 5603 
 5604                 else if (cmd == ECORE_Q_CMD_UPDATE) {
 5605                         /* If "active" state change is requested, update the
 5606                          *  state accordingly.
 5607                          */
 5608                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
 5609                                            &update_params->update_flags) &&
 5610                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
 5611                                             &update_params->update_flags))
 5612                                 next_state = ECORE_Q_STATE_INACTIVE;
 5613                         else
 5614                                 next_state = ECORE_Q_STATE_MULTI_COS;
 5615                 }
 5616 
 5617                 break;
 5618         case ECORE_Q_STATE_MCOS_TERMINATED:
 5619                 if (cmd == ECORE_Q_CMD_CFC_DEL) {
 5620                         next_tx_only = o->num_tx_only - 1;
 5621                         if (next_tx_only == 0)
 5622                                 next_state = ECORE_Q_STATE_ACTIVE;
 5623                         else
 5624                                 next_state = ECORE_Q_STATE_MULTI_COS;
 5625                 }
 5626 
 5627                 break;
 5628         case ECORE_Q_STATE_INACTIVE:
 5629                 if (cmd == ECORE_Q_CMD_ACTIVATE)
 5630                         next_state = ECORE_Q_STATE_ACTIVE;
 5631 
 5632                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
 5633                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
 5634                         next_state = ECORE_Q_STATE_INACTIVE;
 5635 
 5636                 else if (cmd == ECORE_Q_CMD_HALT)
 5637                         next_state = ECORE_Q_STATE_STOPPED;
 5638 
 5639                 else if (cmd == ECORE_Q_CMD_UPDATE) {
 5640                         /* If "active" state change is requested, update the
 5641                          * state accordingly.
 5642                          */
 5643                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
 5644                                            &update_params->update_flags) &&
 5645                             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
 5646                                            &update_params->update_flags)){
 5647                                 if (o->num_tx_only == 0)
 5648                                         next_state = ECORE_Q_STATE_ACTIVE;
 5649                                 else /* tx only queues exist for this queue */
 5650                                         next_state = ECORE_Q_STATE_MULTI_COS;
 5651                         } else
 5652                                 next_state = ECORE_Q_STATE_INACTIVE;
 5653                 }
 5654 
 5655                 break;
 5656         case ECORE_Q_STATE_STOPPED:
 5657                 if (cmd == ECORE_Q_CMD_TERMINATE)
 5658                         next_state = ECORE_Q_STATE_TERMINATED;
 5659 
 5660                 break;
 5661         case ECORE_Q_STATE_TERMINATED:
 5662                 if (cmd == ECORE_Q_CMD_CFC_DEL)
 5663                         next_state = ECORE_Q_STATE_RESET;
 5664 
 5665                 break;
 5666         default:
 5667                 ECORE_ERR("Illegal state: %d\n", state);
 5668         }
 5669 
 5670         /* Transition is assured */
 5671         if (next_state != ECORE_Q_STATE_MAX) {
 5672                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
 5673                           state, cmd, next_state);
 5674                 o->next_state = next_state;
 5675                 o->next_tx_only = next_tx_only;
 5676                 return ECORE_SUCCESS;
 5677         }
 5678 
 5679         ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
 5680 
 5681         return ECORE_INVAL;
 5682 }
 5683 
 5684 /**
 5685  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
 5686  *
 5687  * @sc:         device handle
 5688  * @o:
 5689  * @params:
 5690  *
 5691  * It both checks if the requested command is legal in a current
 5692  * state and, if it's legal, sets a `next_state' in the object
 5693  * that will be used in the completion flow to set the `state'
 5694  * of the object.
 5695  *
 5696  * returns 0 if a requested command is a legal transition,
 5697  *         ECORE_INVAL otherwise.
 5698  */
 5699 static int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
 5700                                           struct ecore_queue_sp_obj *o,
 5701                                         struct ecore_queue_state_params *params)
 5702 {
 5703         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
 5704         enum ecore_queue_cmd cmd = params->cmd;
 5705 
 5706         switch (state) {
 5707         case ECORE_Q_STATE_RESET:
 5708                 if (cmd == ECORE_Q_CMD_INIT)
 5709                         next_state = ECORE_Q_STATE_INITIALIZED;
 5710 
 5711                 break;
 5712         case ECORE_Q_STATE_INITIALIZED:
 5713                 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
 5714                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
 5715                                            &params->params.tx_only.flags))
 5716                                 next_state = ECORE_Q_STATE_ACTIVE;
 5717                         else
 5718                                 next_state = ECORE_Q_STATE_INACTIVE;
 5719                 }
 5720 
 5721                 break;
 5722         case ECORE_Q_STATE_ACTIVE:
 5723         case ECORE_Q_STATE_INACTIVE:
 5724                 if (cmd == ECORE_Q_CMD_CFC_DEL)
 5725                         next_state = ECORE_Q_STATE_RESET;
 5726 
 5727                 break;
 5728         default:
 5729                 ECORE_ERR("Illegal state: %d\n", state);
 5730         }
 5731 
 5732         /* Transition is assured */
 5733         if (next_state != ECORE_Q_STATE_MAX) {
 5734                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
 5735                           state, cmd, next_state);
 5736                 o->next_state = next_state;
 5737                 return ECORE_SUCCESS;
 5738         }
 5739 
 5740         ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
 5741         return ECORE_INVAL;
 5742 }
 5743 
 5744 void ecore_init_queue_obj(struct bxe_softc *sc,
 5745                           struct ecore_queue_sp_obj *obj,
 5746                           uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
 5747                           void *rdata,
 5748                           ecore_dma_addr_t rdata_mapping, unsigned long type)
 5749 {
 5750         ECORE_MEMSET(obj, 0, sizeof(*obj));
 5751 
 5752         /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
 5753         ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
 5754 
 5755         memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
 5756         obj->max_cos = cid_cnt;
 5757         obj->cl_id = cl_id;
 5758         obj->func_id = func_id;
 5759         obj->rdata = rdata;
 5760         obj->rdata_mapping = rdata_mapping;
 5761         obj->type = type;
 5762         obj->next_state = ECORE_Q_STATE_MAX;
 5763 
 5764         if (CHIP_IS_E1x(sc))
 5765                 obj->send_cmd = ecore_queue_send_cmd_e1x;
 5766         else
 5767                 obj->send_cmd = ecore_queue_send_cmd_e2;
 5768 
 5769         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
 5770                 obj->check_transition = ecore_queue_chk_fwd_transition;
 5771         else
 5772         obj->check_transition = ecore_queue_chk_transition;
 5773 
 5774         obj->complete_cmd = ecore_queue_comp_cmd;
 5775         obj->wait_comp = ecore_queue_wait_comp;
 5776         obj->set_pending = ecore_queue_set_pending;
 5777 }
 5778 
 5779 /* return a queue object's logical state*/
 5780 int ecore_get_q_logical_state(struct bxe_softc *sc,
 5781                                struct ecore_queue_sp_obj *obj)
 5782 {
 5783         switch (obj->state) {
 5784         case ECORE_Q_STATE_ACTIVE:
 5785         case ECORE_Q_STATE_MULTI_COS:
 5786                 return ECORE_Q_LOGICAL_STATE_ACTIVE;
 5787         case ECORE_Q_STATE_RESET:
 5788         case ECORE_Q_STATE_INITIALIZED:
 5789         case ECORE_Q_STATE_MCOS_TERMINATED:
 5790         case ECORE_Q_STATE_INACTIVE:
 5791         case ECORE_Q_STATE_STOPPED:
 5792         case ECORE_Q_STATE_TERMINATED:
 5793         case ECORE_Q_STATE_FLRED:
 5794                 return ECORE_Q_LOGICAL_STATE_STOPPED;
 5795         default:
 5796                 return ECORE_INVAL;
 5797         }
 5798 }
 5799 
 5800 /********************** Function state object *********************************/
 5801 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
 5802                                            struct ecore_func_sp_obj *o)
 5803 {
 5804         /* in the middle of transaction - return INVALID state */
 5805         if (o->pending)
 5806                 return ECORE_F_STATE_MAX;
 5807 
 5808         /* unsure the order of reading of o->pending and o->state
 5809          * o->pending should be read first
 5810          */
 5811         rmb();
 5812 
 5813         return o->state;
 5814 }
 5815 
 5816 static int ecore_func_wait_comp(struct bxe_softc *sc,
 5817                                 struct ecore_func_sp_obj *o,
 5818                                 enum ecore_func_cmd cmd)
 5819 {
 5820         return ecore_state_wait(sc, cmd, &o->pending);
 5821 }
 5822 
 5823 /**
 5824  * ecore_func_state_change_comp - complete the state machine transition
 5825  *
 5826  * @sc:         device handle
 5827  * @o:
 5828  * @cmd:
 5829  *
 5830  * Called on state change transition. Completes the state
 5831  * machine transition only - no HW interaction.
 5832  */
 5833 static inline int ecore_func_state_change_comp(struct bxe_softc *sc,
 5834                                                struct ecore_func_sp_obj *o,
 5835                                                enum ecore_func_cmd cmd)
 5836 {
 5837         unsigned long cur_pending = o->pending;
 5838 
 5839         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
 5840                 ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
 5841                           cmd, ECORE_FUNC_ID(sc), o->state,
 5842                           cur_pending, o->next_state);
 5843                 return ECORE_INVAL;
 5844         }
 5845 
 5846         ECORE_MSG(sc,
 5847                   "Completing command %d for func %d, setting state to %d\n",
 5848                   cmd, ECORE_FUNC_ID(sc), o->next_state);
 5849 
 5850         o->state = o->next_state;
 5851         o->next_state = ECORE_F_STATE_MAX;
 5852 
 5853         /* It's important that o->state and o->next_state are
 5854          * updated before o->pending.
 5855          */
 5856         wmb();
 5857 
 5858         ECORE_CLEAR_BIT(cmd, &o->pending);
 5859         ECORE_SMP_MB_AFTER_CLEAR_BIT();
 5860 
 5861         return ECORE_SUCCESS;
 5862 }
 5863 
 5864 /**
 5865  * ecore_func_comp_cmd - complete the state change command
 5866  *
 5867  * @sc:         device handle
 5868  * @o:
 5869  * @cmd:
 5870  *
 5871  * Checks that the arrived completion is expected.
 5872  */
 5873 static int ecore_func_comp_cmd(struct bxe_softc *sc,
 5874                                struct ecore_func_sp_obj *o,
 5875                                enum ecore_func_cmd cmd)
 5876 {
 5877         /* Complete the state machine part first, check if it's a
 5878          * legal completion.
 5879          */
 5880         int rc = ecore_func_state_change_comp(sc, o, cmd);
 5881         return rc;
 5882 }
 5883 
 5884 /**
 5885  * ecore_func_chk_transition - perform function state machine transition
 5886  *
 5887  * @sc:         device handle
 5888  * @o:
 5889  * @params:
 5890  *
 5891  * It both checks if the requested command is legal in a current
 5892  * state and, if it's legal, sets a `next_state' in the object
 5893  * that will be used in the completion flow to set the `state'
 5894  * of the object.
 5895  *
 5896  * returns 0 if a requested command is a legal transition,
 5897  *         ECORE_INVAL otherwise.
 5898  */
 5899 static int ecore_func_chk_transition(struct bxe_softc *sc,
 5900                                      struct ecore_func_sp_obj *o,
 5901                                      struct ecore_func_state_params *params)
 5902 {
 5903         enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
 5904         enum ecore_func_cmd cmd = params->cmd;
 5905 
 5906         /* Forget all pending for completion commands if a driver only state
 5907          * transition has been requested.
 5908          */
 5909         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
 5910                 o->pending = 0;
 5911                 o->next_state = ECORE_F_STATE_MAX;
 5912         }
 5913 
 5914         /* Don't allow a next state transition if we are in the middle of
 5915          * the previous one.
 5916          */
 5917         if (o->pending)
 5918                 return ECORE_BUSY;
 5919 
 5920         switch (state) {
 5921         case ECORE_F_STATE_RESET:
 5922                 if (cmd == ECORE_F_CMD_HW_INIT)
 5923                         next_state = ECORE_F_STATE_INITIALIZED;
 5924 
 5925                 break;
 5926         case ECORE_F_STATE_INITIALIZED:
 5927                 if (cmd == ECORE_F_CMD_START)
 5928                         next_state = ECORE_F_STATE_STARTED;
 5929 
 5930                 else if (cmd == ECORE_F_CMD_HW_RESET)
 5931                         next_state = ECORE_F_STATE_RESET;
 5932 
 5933                 break;
 5934         case ECORE_F_STATE_STARTED:
 5935                 if (cmd == ECORE_F_CMD_STOP)
 5936                         next_state = ECORE_F_STATE_INITIALIZED;
 5937                 /* afex ramrods can be sent only in started mode, and only
 5938                  * if not pending for function_stop ramrod completion
 5939                  * for these events - next state remained STARTED.
 5940                  */
 5941                 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
 5942                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
 5943                         next_state = ECORE_F_STATE_STARTED;
 5944 
 5945                 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
 5946                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
 5947                         next_state = ECORE_F_STATE_STARTED;
 5948 
 5949                 /* Switch_update ramrod can be sent in either started or
 5950                  * tx_stopped state, and it doesn't change the state.
 5951                  */
 5952                 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
 5953                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
 5954                         next_state = ECORE_F_STATE_STARTED;
 5955 
 5956                 else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
 5957                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
 5958                         next_state = ECORE_F_STATE_STARTED;
 5959 
 5960                 else if (cmd == ECORE_F_CMD_TX_STOP)
 5961                         next_state = ECORE_F_STATE_TX_STOPPED;
 5962 
 5963                 break;
 5964         case ECORE_F_STATE_TX_STOPPED:
 5965                 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
 5966                     (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
 5967                         next_state = ECORE_F_STATE_TX_STOPPED;
 5968 
 5969                 else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
 5970                     (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
 5971                         next_state = ECORE_F_STATE_TX_STOPPED;
 5972 
 5973                 else if (cmd == ECORE_F_CMD_TX_START)
 5974                         next_state = ECORE_F_STATE_STARTED;
 5975 
 5976                 break;
 5977         default:
 5978                 ECORE_ERR("Unknown state: %d\n", state);
 5979         }
 5980 
 5981         /* Transition is assured */
 5982         if (next_state != ECORE_F_STATE_MAX) {
 5983                 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
 5984                           state, cmd, next_state);
 5985                 o->next_state = next_state;
 5986                 return ECORE_SUCCESS;
 5987         }
 5988 
 5989         ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
 5990                   state, cmd);
 5991 
 5992         return ECORE_INVAL;
 5993 }
 5994 
 5995 /**
 5996  * ecore_func_init_func - performs HW init at function stage
 5997  *
 5998  * @sc:         device handle
 5999  * @drv:
 6000  *
 6001  * Init HW when the current phase is
 6002  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
 6003  * HW blocks.
 6004  */
 6005 static inline int ecore_func_init_func(struct bxe_softc *sc,
 6006                                        const struct ecore_func_sp_drv_ops *drv)
 6007 {
 6008         return drv->init_hw_func(sc);
 6009 }
 6010 
 6011 /**
 6012  * ecore_func_init_port - performs HW init at port stage
 6013  *
 6014  * @sc:         device handle
 6015  * @drv:
 6016  *
 6017  * Init HW when the current phase is
 6018  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
 6019  * FUNCTION-only HW blocks.
 6020  *
 6021  */
 6022 static inline int ecore_func_init_port(struct bxe_softc *sc,
 6023                                        const struct ecore_func_sp_drv_ops *drv)
 6024 {
 6025         int rc = drv->init_hw_port(sc);
 6026         if (rc)
 6027                 return rc;
 6028 
 6029         return ecore_func_init_func(sc, drv);
 6030 }
 6031 
 6032 /**
 6033  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
 6034  *
 6035  * @sc:         device handle
 6036  * @drv:
 6037  *
 6038  * Init HW when the current phase is
 6039  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
 6040  * PORT-only and FUNCTION-only HW blocks.
 6041  */
 6042 static inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
 6043                                         const struct ecore_func_sp_drv_ops *drv)
 6044 {
 6045         int rc = drv->init_hw_cmn_chip(sc);
 6046         if (rc)
 6047                 return rc;
 6048 
 6049         return ecore_func_init_port(sc, drv);
 6050 }
 6051 
 6052 /**
 6053  * ecore_func_init_cmn - performs HW init at common stage
 6054  *
 6055  * @sc:         device handle
 6056  * @drv:
 6057  *
 6058  * Init HW when the current phase is
 6059  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
 6060  * PORT-only and FUNCTION-only HW blocks.
 6061  */
 6062 static inline int ecore_func_init_cmn(struct bxe_softc *sc,
 6063                                       const struct ecore_func_sp_drv_ops *drv)
 6064 {
 6065         int rc = drv->init_hw_cmn(sc);
 6066         if (rc)
 6067                 return rc;
 6068 
 6069         return ecore_func_init_port(sc, drv);
 6070 }
 6071 
 6072 static int ecore_func_hw_init(struct bxe_softc *sc,
 6073                               struct ecore_func_state_params *params)
 6074 {
 6075         uint32_t load_code = params->params.hw_init.load_phase;
 6076         struct ecore_func_sp_obj *o = params->f_obj;
 6077         const struct ecore_func_sp_drv_ops *drv = o->drv;
 6078         int rc = 0;
 6079 
 6080         ECORE_MSG(sc, "function %d  load_code %x\n",
 6081                   ECORE_ABS_FUNC_ID(sc), load_code);
 6082 
 6083         /* Prepare buffers for unzipping the FW */
 6084         rc = drv->gunzip_init(sc);
 6085         if (rc)
 6086                 return rc;
 6087 
 6088         /* Prepare FW */
 6089         rc = drv->init_fw(sc);
 6090         if (rc) {
 6091                 ECORE_ERR("Error loading firmware\n");
 6092                 goto init_err;
 6093         }
 6094 
 6095         /* Handle the beginning of COMMON_XXX pases separately... */
 6096         switch (load_code) {
 6097         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
 6098                 rc = ecore_func_init_cmn_chip(sc, drv);
 6099                 if (rc)
 6100                         goto init_err;
 6101 
 6102                 break;
 6103         case FW_MSG_CODE_DRV_LOAD_COMMON:
 6104                 rc = ecore_func_init_cmn(sc, drv);
 6105                 if (rc)
 6106                         goto init_err;
 6107 
 6108                 break;
 6109         case FW_MSG_CODE_DRV_LOAD_PORT:
 6110                 rc = ecore_func_init_port(sc, drv);
 6111                 if (rc)
 6112                         goto init_err;
 6113 
 6114                 break;
 6115         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
 6116                 rc = ecore_func_init_func(sc, drv);
 6117                 if (rc)
 6118                         goto init_err;
 6119 
 6120                 break;
 6121         default:
 6122                 ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
 6123                 rc = ECORE_INVAL;
 6124         }
 6125 
 6126 init_err:
 6127         drv->gunzip_end(sc);
 6128 
 6129         /* In case of success, complete the command immediately: no ramrods
 6130          * have been sent.
 6131          */
 6132         if (!rc)
 6133                 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
 6134 
 6135         return rc;
 6136 }
 6137 
 6138 /**
 6139  * ecore_func_reset_func - reset HW at function stage
 6140  *
 6141  * @sc:         device handle
 6142  * @drv:
 6143  *
 6144  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
 6145  * FUNCTION-only HW blocks.
 6146  */
 6147 static inline void ecore_func_reset_func(struct bxe_softc *sc,
 6148                                         const struct ecore_func_sp_drv_ops *drv)
 6149 {
 6150         drv->reset_hw_func(sc);
 6151 }
 6152 
 6153 /**
 6154  * ecore_func_reset_port - reser HW at port stage
 6155  *
 6156  * @sc:         device handle
 6157  * @drv:
 6158  *
 6159  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
 6160  * FUNCTION-only and PORT-only HW blocks.
 6161  *
 6162  *                 !!!IMPORTANT!!!
 6163  *
 6164  * It's important to call reset_port before reset_func() as the last thing
 6165  * reset_func does is pf_disable() thus disabling PGLUE_B, which
 6166  * makes impossible any DMAE transactions.
 6167  */
 6168 static inline void ecore_func_reset_port(struct bxe_softc *sc,
 6169                                         const struct ecore_func_sp_drv_ops *drv)
 6170 {
 6171         drv->reset_hw_port(sc);
 6172         ecore_func_reset_func(sc, drv);
 6173 }
 6174 
 6175 /**
 6176  * ecore_func_reset_cmn - reser HW at common stage
 6177  *
 6178  * @sc:         device handle
 6179  * @drv:
 6180  *
 6181  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
 6182  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
 6183  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
 6184  */
 6185 static inline void ecore_func_reset_cmn(struct bxe_softc *sc,
 6186                                         const struct ecore_func_sp_drv_ops *drv)
 6187 {
 6188         ecore_func_reset_port(sc, drv);
 6189         drv->reset_hw_cmn(sc);
 6190 }
 6191 
 6192 static inline int ecore_func_hw_reset(struct bxe_softc *sc,
 6193                                       struct ecore_func_state_params *params)
 6194 {
 6195         uint32_t reset_phase = params->params.hw_reset.reset_phase;
 6196         struct ecore_func_sp_obj *o = params->f_obj;
 6197         const struct ecore_func_sp_drv_ops *drv = o->drv;
 6198 
 6199         ECORE_MSG(sc, "function %d  reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
 6200                   reset_phase);
 6201 
 6202         switch (reset_phase) {
 6203         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
 6204                 ecore_func_reset_cmn(sc, drv);
 6205                 break;
 6206         case FW_MSG_CODE_DRV_UNLOAD_PORT:
 6207                 ecore_func_reset_port(sc, drv);
 6208                 break;
 6209         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
 6210                 ecore_func_reset_func(sc, drv);
 6211                 break;
 6212         default:
 6213                 ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
 6214                           reset_phase);
 6215                 break;
 6216         }
 6217 
 6218         /* Complete the command immediately: no ramrods have been sent. */
 6219         o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
 6220 
 6221         return ECORE_SUCCESS;
 6222 }
 6223 
 6224 static inline int ecore_func_send_start(struct bxe_softc *sc,
 6225                                         struct ecore_func_state_params *params)
 6226 {
 6227         struct ecore_func_sp_obj *o = params->f_obj;
 6228         struct function_start_data *rdata =
 6229                 (struct function_start_data *)o->rdata;
 6230         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 6231         struct ecore_func_start_params *start_params = &params->params.start;
 6232 
 6233         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 6234 
 6235         /* Fill the ramrod data with provided parameters */
 6236         rdata->function_mode    = (uint8_t)start_params->mf_mode;
 6237         rdata->sd_vlan_tag      = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
 6238         rdata->path_id          = ECORE_PATH_ID(sc);
 6239         rdata->network_cos_mode = start_params->network_cos_mode;
 6240 
 6241         rdata->vxlan_dst_port   = start_params->vxlan_dst_port;
 6242         rdata->geneve_dst_port  = start_params->geneve_dst_port;
 6243         rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
 6244         rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
 6245         rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
 6246         rdata->inner_rss        = start_params->inner_rss;
 6247 
 6248         rdata->sd_accept_mf_clss_fail = start_params->class_fail;
 6249         if (start_params->class_fail_ethtype) {
 6250                 rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
 6251                 rdata->sd_accept_mf_clss_fail_ethtype =
 6252                         ECORE_CPU_TO_LE16(start_params->class_fail_ethtype);
 6253         }
 6254         rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
 6255         rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
 6256 
 6257         /** @@@TMP - until FW 7.10.7 (which will introduce an HSI change)
 6258          * `sd_vlan_eth_type' will replace ethertype in SD mode even if
 6259          * it's set to 0; This will probably break SD, so we're setting it
 6260          * to ethertype 0x8100 for now.
 6261          */
 6262         if (start_params->sd_vlan_eth_type)
 6263                 rdata->sd_vlan_eth_type =
 6264                         ECORE_CPU_TO_LE16(start_params->sd_vlan_eth_type);
 6265         else
 6266                 rdata->sd_vlan_eth_type =
 6267                         ECORE_CPU_TO_LE16((uint16_t) 0x8100);
 6268 
 6269         rdata->no_added_tags = start_params->no_added_tags;
 6270 
 6271         rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
 6272         if (rdata->c2s_pri_tt_valid) {
 6273                 memcpy(rdata->c2s_pri_trans_table.val,
 6274                        start_params->c2s_pri,
 6275                        MAX_VLAN_PRIORITIES);
 6276                 rdata->c2s_pri_default = start_params->c2s_pri_default;
 6277         }
 6278 
 6279         /* No need for an explicit memory barrier here as long as we
 6280          * ensure the ordering of writing to the SPQ element
 6281          *  and updating of the SPQ producer which involves a memory
 6282          * read. If the memory read is removed we will have to put a
 6283          * full memory barrier there (inside ecore_sp_post()).
 6284          */
 6285         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
 6286                              data_mapping, NONE_CONNECTION_TYPE);
 6287 }
 6288 
 6289 static inline int ecore_func_send_switch_update(struct bxe_softc *sc,
 6290                                         struct ecore_func_state_params *params)
 6291 {
 6292         struct ecore_func_sp_obj *o = params->f_obj;
 6293         struct function_update_data *rdata =
 6294                 (struct function_update_data *)o->rdata;
 6295         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 6296         struct ecore_func_switch_update_params *switch_update_params =
 6297                 &params->params.switch_update;
 6298 
 6299         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 6300 
 6301         /* Fill the ramrod data with provided parameters */
 6302         if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
 6303                            &switch_update_params->changes)) {
 6304         rdata->tx_switch_suspend_change_flg = 1;
 6305                 rdata->tx_switch_suspend =
 6306                         ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
 6307                                        &switch_update_params->changes);
 6308         }
 6309 
 6310         if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
 6311                            &switch_update_params->changes)) {
 6312                 rdata->sd_vlan_tag_change_flg = 1;
 6313                 rdata->sd_vlan_tag =
 6314                         ECORE_CPU_TO_LE16(switch_update_params->vlan);
 6315         }
 6316 
 6317         if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
 6318                            &switch_update_params->changes)) {
 6319                 rdata->sd_vlan_eth_type_change_flg = 1;
 6320                 rdata->sd_vlan_eth_type =
 6321                         ECORE_CPU_TO_LE16(switch_update_params->vlan_eth_type);
 6322         }
 6323 
 6324         if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
 6325                            &switch_update_params->changes)) {
 6326                 rdata->sd_vlan_force_pri_change_flg = 1;
 6327                 if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
 6328                                    &switch_update_params->changes))
 6329                         rdata->sd_vlan_force_pri_flg = 1;
 6330                 rdata->sd_vlan_force_pri_flg =
 6331                         switch_update_params->vlan_force_prio;
 6332         }
 6333 
 6334         if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
 6335                            &switch_update_params->changes)) {
 6336                 rdata->update_tunn_cfg_flg = 1;
 6337                 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
 6338                                    &switch_update_params->changes))
 6339                         rdata->inner_clss_l2gre = 1;
 6340                 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
 6341                                    &switch_update_params->changes))
 6342                         rdata->inner_clss_vxlan = 1;
 6343                 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
 6344                                    &switch_update_params->changes))
 6345                         rdata->inner_clss_l2geneve = 1;
 6346                 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_RSS,
 6347                                    &switch_update_params->changes))
 6348                         rdata->inner_rss = 1;
 6349 
 6350                 rdata->vxlan_dst_port =
 6351                         ECORE_CPU_TO_LE16(switch_update_params->vxlan_dst_port);
 6352                 rdata->geneve_dst_port =
 6353                         ECORE_CPU_TO_LE16(switch_update_params->geneve_dst_port);
 6354         }
 6355 
 6356         rdata->echo = SWITCH_UPDATE;
 6357 
 6358         /* No need for an explicit memory barrier here as long as we
 6359          * ensure the ordering of writing to the SPQ element
 6360          * and updating of the SPQ producer which involves a memory
 6361          * read. If the memory read is removed we will have to put a
 6362          * full memory barrier there (inside ecore_sp_post()).
 6363          */
 6364         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
 6365                              data_mapping, NONE_CONNECTION_TYPE);
 6366 }
 6367 
 6368 static inline int ecore_func_send_afex_update(struct bxe_softc *sc,
 6369                                          struct ecore_func_state_params *params)
 6370 {
 6371         struct ecore_func_sp_obj *o = params->f_obj;
 6372         struct function_update_data *rdata =
 6373                 (struct function_update_data *)o->afex_rdata;
 6374         ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
 6375         struct ecore_func_afex_update_params *afex_update_params =
 6376                 &params->params.afex_update;
 6377 
 6378         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 6379 
 6380         /* Fill the ramrod data with provided parameters */
 6381         rdata->vif_id_change_flg = 1;
 6382         rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
 6383         rdata->afex_default_vlan_change_flg = 1;
 6384         rdata->afex_default_vlan =
 6385                 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
 6386         rdata->allowed_priorities_change_flg = 1;
 6387         rdata->allowed_priorities = afex_update_params->allowed_priorities;
 6388         rdata->echo = AFEX_UPDATE;
 6389 
 6390         /* No need for an explicit memory barrier here as long as we
 6391          * ensure the ordering of writing to the SPQ element
 6392          *  and updating of the SPQ producer which involves a memory
 6393          * read. If the memory read is removed we will have to put a
 6394          * full memory barrier there (inside ecore_sp_post()).
 6395          */
 6396         ECORE_MSG(sc,
 6397                   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
 6398                   rdata->vif_id,
 6399                   rdata->afex_default_vlan, rdata->allowed_priorities);
 6400 
 6401         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
 6402                              data_mapping, NONE_CONNECTION_TYPE);
 6403 }
 6404 
 6405 static
 6406 inline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
 6407                                          struct ecore_func_state_params *params)
 6408 {
 6409         struct ecore_func_sp_obj *o = params->f_obj;
 6410         struct afex_vif_list_ramrod_data *rdata =
 6411                 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
 6412         struct ecore_func_afex_viflists_params *afex_vif_params =
 6413                 &params->params.afex_viflists;
 6414         uint64_t *p_rdata = (uint64_t *)rdata;
 6415 
 6416         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 6417 
 6418         /* Fill the ramrod data with provided parameters */
 6419         rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
 6420         rdata->func_bit_map          = afex_vif_params->func_bit_map;
 6421         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
 6422         rdata->func_to_clear         = afex_vif_params->func_to_clear;
 6423 
 6424         /* send in echo type of sub command */
 6425         rdata->echo = afex_vif_params->afex_vif_list_command;
 6426 
 6427         ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
 6428                   rdata->afex_vif_list_command, rdata->vif_list_index,
 6429                   rdata->func_bit_map, rdata->func_to_clear);
 6430 
 6431         /* No need for an explicit memory barrier here as long as we
 6432          * ensure the ordering of writing to the SPQ element
 6433          * and updating of the SPQ producer which involves a memory
 6434          * read. If the memory read is removed we will have to put a
 6435          * full memory barrier there (inside ecore_sp_post()).
 6436          */
 6437 
 6438         /* this ramrod sends data directly and not through DMA mapping */
 6439         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
 6440                              *p_rdata, NONE_CONNECTION_TYPE);
 6441 }
 6442 
 6443 static inline int ecore_func_send_stop(struct bxe_softc *sc,
 6444                                        struct ecore_func_state_params *params)
 6445 {
 6446         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
 6447                              NONE_CONNECTION_TYPE);
 6448 }
 6449 
 6450 static inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
 6451                                        struct ecore_func_state_params *params)
 6452 {
 6453         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
 6454                              NONE_CONNECTION_TYPE);
 6455 }
 6456 static inline int ecore_func_send_tx_start(struct bxe_softc *sc,
 6457                                        struct ecore_func_state_params *params)
 6458 {
 6459         struct ecore_func_sp_obj *o = params->f_obj;
 6460         struct flow_control_configuration *rdata =
 6461                 (struct flow_control_configuration *)o->rdata;
 6462         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 6463         struct ecore_func_tx_start_params *tx_start_params =
 6464                 &params->params.tx_start;
 6465         int i;
 6466 
 6467         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 6468 
 6469         rdata->dcb_enabled = tx_start_params->dcb_enabled;
 6470         rdata->dcb_version = tx_start_params->dcb_version;
 6471         rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
 6472 
 6473         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
 6474                 rdata->traffic_type_to_priority_cos[i] =
 6475                         tx_start_params->traffic_type_to_priority_cos[i];
 6476 
 6477         for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
 6478                 rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
 6479 
 6480         /* No need for an explicit memory barrier here as long as we
 6481          * ensure the ordering of writing to the SPQ element
 6482          * and updating of the SPQ producer which involves a memory
 6483          * read. If the memory read is removed we will have to put a
 6484          * full memory barrier there (inside ecore_sp_post()).
 6485          */
 6486         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
 6487                              data_mapping, NONE_CONNECTION_TYPE);
 6488 }
 6489 
 6490 static inline int ecore_func_send_set_timesync(struct bxe_softc *sc,
 6491                                         struct ecore_func_state_params *params)
 6492 {
 6493         struct ecore_func_sp_obj *o = params->f_obj;
 6494         struct set_timesync_ramrod_data *rdata =
 6495                 (struct set_timesync_ramrod_data *)o->rdata;
 6496         ecore_dma_addr_t data_mapping = o->rdata_mapping;
 6497         struct ecore_func_set_timesync_params *set_timesync_params =
 6498                 &params->params.set_timesync;
 6499 
 6500         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 6501 
 6502         /* Fill the ramrod data with provided parameters */
 6503         rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
 6504         rdata->offset_cmd = set_timesync_params->offset_cmd;
 6505         rdata->add_sub_drift_adjust_value =
 6506                 set_timesync_params->add_sub_drift_adjust_value;
 6507         rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
 6508         rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
 6509         rdata->offset_delta.lo =
 6510                 ECORE_CPU_TO_LE32(U64_LO(set_timesync_params->offset_delta));
 6511         rdata->offset_delta.hi =
 6512                 ECORE_CPU_TO_LE32(U64_HI(set_timesync_params->offset_delta));
 6513 
 6514         ECORE_MSG(sc, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
 6515            rdata->drift_adjust_cmd, rdata->offset_cmd,
 6516            rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
 6517            rdata->drift_adjust_period, rdata->offset_delta.lo,
 6518            rdata->offset_delta.hi);
 6519 
 6520         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
 6521                              data_mapping, NONE_CONNECTION_TYPE);
 6522 }
 6523 
 6524 static int ecore_func_send_cmd(struct bxe_softc *sc,
 6525                                struct ecore_func_state_params *params)
 6526 {
 6527         switch (params->cmd) {
 6528         case ECORE_F_CMD_HW_INIT:
 6529                 return ecore_func_hw_init(sc, params);
 6530         case ECORE_F_CMD_START:
 6531                 return ecore_func_send_start(sc, params);
 6532         case ECORE_F_CMD_STOP:
 6533                 return ecore_func_send_stop(sc, params);
 6534         case ECORE_F_CMD_HW_RESET:
 6535                 return ecore_func_hw_reset(sc, params);
 6536         case ECORE_F_CMD_AFEX_UPDATE:
 6537                 return ecore_func_send_afex_update(sc, params);
 6538         case ECORE_F_CMD_AFEX_VIFLISTS:
 6539                 return ecore_func_send_afex_viflists(sc, params);
 6540         case ECORE_F_CMD_TX_STOP:
 6541                 return ecore_func_send_tx_stop(sc, params);
 6542         case ECORE_F_CMD_TX_START:
 6543                 return ecore_func_send_tx_start(sc, params);
 6544         case ECORE_F_CMD_SWITCH_UPDATE:
 6545                 return ecore_func_send_switch_update(sc, params);
 6546         case ECORE_F_CMD_SET_TIMESYNC:
 6547                 return ecore_func_send_set_timesync(sc, params);
 6548         default:
 6549                 ECORE_ERR("Unknown command: %d\n", params->cmd);
 6550                 return ECORE_INVAL;
 6551         }
 6552 }
 6553 
 6554 void ecore_init_func_obj(struct bxe_softc *sc,
 6555                          struct ecore_func_sp_obj *obj,
 6556                          void *rdata, ecore_dma_addr_t rdata_mapping,
 6557                          void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
 6558                          struct ecore_func_sp_drv_ops *drv_iface)
 6559 {
 6560         ECORE_MEMSET(obj, 0, sizeof(*obj));
 6561 
 6562         ECORE_MUTEX_INIT(&obj->one_pending_mutex);
 6563 
 6564         obj->rdata = rdata;
 6565         obj->rdata_mapping = rdata_mapping;
 6566         obj->afex_rdata = afex_rdata;
 6567         obj->afex_rdata_mapping = afex_rdata_mapping;
 6568         obj->send_cmd = ecore_func_send_cmd;
 6569         obj->check_transition = ecore_func_chk_transition;
 6570         obj->complete_cmd = ecore_func_comp_cmd;
 6571         obj->wait_comp = ecore_func_wait_comp;
 6572         obj->drv = drv_iface;
 6573 }
 6574 
 6575 /**
 6576  * ecore_func_state_change - perform Function state change transition
 6577  *
 6578  * @sc:         device handle
 6579  * @params:     parameters to perform the transaction
 6580  *
 6581  * returns 0 in case of successfully completed transition,
 6582  *         negative error code in case of failure, positive
 6583  *         (EBUSY) value if there is a completion to that is
 6584  *         still pending (possible only if RAMROD_COMP_WAIT is
 6585  *         not set in params->ramrod_flags for asynchronous
 6586  *         commands).
 6587  */
 6588 int ecore_func_state_change(struct bxe_softc *sc,
 6589                             struct ecore_func_state_params *params)
 6590 {
 6591         struct ecore_func_sp_obj *o = params->f_obj;
 6592         int rc, cnt = 300;
 6593         enum ecore_func_cmd cmd = params->cmd;
 6594         unsigned long *pending = &o->pending;
 6595 
 6596         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
 6597 
 6598         /* Check that the requested transition is legal */
 6599         rc = o->check_transition(sc, o, params);
 6600         if ((rc == ECORE_BUSY) &&
 6601             (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
 6602                 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
 6603                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
 6604                         ECORE_MSLEEP(10);
 6605                         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
 6606                         rc = o->check_transition(sc, o, params);
 6607                 }
 6608                 if (rc == ECORE_BUSY) {
 6609                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
 6610                         ECORE_ERR("timeout waiting for previous ramrod completion\n");
 6611                         return rc;
 6612                 }
 6613         } else if (rc) {
 6614                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
 6615                 return rc;
 6616         }
 6617 
 6618         /* Set "pending" bit */
 6619         ECORE_SET_BIT(cmd, pending);
 6620 
 6621         /* Don't send a command if only driver cleanup was requested */
 6622         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
 6623                 ecore_func_state_change_comp(sc, o, cmd);
 6624                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
 6625         } else {
 6626                 /* Send a ramrod */
 6627                 rc = o->send_cmd(sc, params);
 6628 
 6629                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
 6630 
 6631                 if (rc) {
 6632                         o->next_state = ECORE_F_STATE_MAX;
 6633                         ECORE_CLEAR_BIT(cmd, pending);
 6634                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
 6635                         return rc;
 6636                 }
 6637 
 6638                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
 6639                         rc = o->wait_comp(sc, o, cmd);
 6640                         if (rc)
 6641                                 return rc;
 6642 
 6643                         return ECORE_SUCCESS;
 6644                 }
 6645         }
 6646 
 6647         return ECORE_RET_PENDING(cmd, pending);
 6648 }

Cache object: ebac2d71dd42d4b191cd543282af5455


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.