The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bxe/bxe_stats.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  *
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
   17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
   20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   26  * THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include "bxe.h"
   33 #include "bxe_stats.h"
   34 
   35 #ifdef __i386__
   36 #define BITS_PER_LONG 32
   37 #else
   38 #define BITS_PER_LONG 64
   39 #endif
   40 
   41 
   42 static inline long
   43 bxe_hilo(uint32_t *hiref)
   44 {
   45     uint32_t lo = *(hiref + 1);
   46 #if (BITS_PER_LONG == 64)
   47     uint32_t hi = *hiref;
   48     return (HILO_U64(hi, lo));
   49 #else
   50     return (lo);
   51 #endif
   52 }
   53 
   54 static inline uint16_t
   55 bxe_get_port_stats_dma_len(struct bxe_softc *sc)
   56 {
   57     uint16_t res = 0;
   58     uint32_t size;
   59 
   60     /* 'newest' convention - shmem2 contains the size of the port stats */
   61     if (SHMEM2_HAS(sc, sizeof_port_stats)) {
   62         size = SHMEM2_RD(sc, sizeof_port_stats);
   63         if (size) {
   64             res = size;
   65         }
   66 
   67         /* prevent newer BC from causing buffer overflow */
   68         if (res > sizeof(struct host_port_stats)) {
   69             res = sizeof(struct host_port_stats);
   70         }
   71     }
   72 
   73     /*
   74      * Older convention - all BCs support the port stats fields up until
   75      * the 'not_used' field
   76      */
   77     if (!res) {
   78         res = (offsetof(struct host_port_stats, not_used) + 4);
   79 
   80         /* if PFC stats are supported by the MFW, DMA them as well */
   81         if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
   82             res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
   83                     offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
   84         }
   85     }
   86 
   87     res >>= 2;
   88 
   89     DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n"));
   90     return (res);
   91 }
   92 
   93 /*
   94  * Init service functions
   95  */
   96 
   97 static void
   98 bxe_dp_stats(struct bxe_softc *sc)
   99 {
  100     int i;
  101 
  102     BLOGD(sc, DBG_STATS,
  103           "dumping stats:\n"
  104           "  fw_stats_req\n"
  105           "    hdr\n"
  106           "      cmd_num %d\n"
  107           "      reserved0 %d\n"
  108           "      drv_stats_counter %d\n"
  109           "      reserved1 %d\n"
  110           "      stats_counters_addrs %x %x\n",
  111           sc->fw_stats_req->hdr.cmd_num,
  112           sc->fw_stats_req->hdr.reserved0,
  113           sc->fw_stats_req->hdr.drv_stats_counter,
  114           sc->fw_stats_req->hdr.reserved1,
  115           sc->fw_stats_req->hdr.stats_counters_addrs.hi,
  116           sc->fw_stats_req->hdr.stats_counters_addrs.lo);
  117 
  118     for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) {
  119         BLOGD(sc, DBG_STATS,
  120               "query[%d]\n"
  121               "  kind %d\n"
  122               "  index %d\n"
  123               "  funcID %d\n"
  124               "  reserved %d\n"
  125               "  address %x %x\n",
  126               i,
  127               sc->fw_stats_req->query[i].kind,
  128               sc->fw_stats_req->query[i].index,
  129               sc->fw_stats_req->query[i].funcID,
  130               sc->fw_stats_req->query[i].reserved,
  131               sc->fw_stats_req->query[i].address.hi,
  132               sc->fw_stats_req->query[i].address.lo);
  133     }
  134 }
  135 
  136 /*
  137  * Post the next statistics ramrod. Protect it with the lock in
  138  * order to ensure the strict order between statistics ramrods
  139  * (each ramrod has a sequence number passed in a
  140  * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
  141  * sent in order).
  142  */
  143 static void
  144 bxe_storm_stats_post(struct bxe_softc *sc)
  145 {
  146     int rc;
  147 
  148     if (!sc->stats_pending) {
  149         BXE_STATS_LOCK(sc);
  150 
  151         if (sc->stats_pending) {
  152             BXE_STATS_UNLOCK(sc);
  153             return;
  154         }
  155 
  156         sc->fw_stats_req->hdr.drv_stats_counter =
  157             htole16(sc->stats_counter++);
  158 
  159         BLOGD(sc, DBG_STATS,
  160               "sending statistics ramrod %d\n",
  161               le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
  162 
  163         /* adjust the ramrod to include VF queues statistics */
  164         // XXX bxe_iov_adjust_stats_req(sc);
  165 
  166         bxe_dp_stats(sc);
  167 
  168         /* send FW stats ramrod */
  169         rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
  170                          U64_HI(sc->fw_stats_req_mapping),
  171                          U64_LO(sc->fw_stats_req_mapping),
  172                          NONE_CONNECTION_TYPE);
  173         if (rc == 0) {
  174             sc->stats_pending = 1;
  175         }
  176 
  177         BXE_STATS_UNLOCK(sc);
  178     }
  179 }
  180 
  181 static void
  182 bxe_hw_stats_post(struct bxe_softc *sc)
  183 {
  184     struct dmae_cmd *dmae = &sc->stats_dmae;
  185     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
  186     int loader_idx;
  187     uint32_t opcode;
  188 
  189     *stats_comp = DMAE_COMP_VAL;
  190     if (CHIP_REV_IS_SLOW(sc)) {
  191         return;
  192     }
  193 
  194     /* Update MCP's statistics if possible */
  195     if (sc->func_stx) {
  196         memcpy(BXE_SP(sc, func_stats), &sc->func_stats,
  197                sizeof(sc->func_stats));
  198     }
  199 
  200     /* loader */
  201     if (sc->executer_idx) {
  202         loader_idx = PMF_DMAE_C(sc);
  203         opcode =  bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
  204                                   TRUE, DMAE_COMP_GRC);
  205         opcode = bxe_dmae_opcode_clr_src_reset(opcode);
  206 
  207         memset(dmae, 0, sizeof(struct dmae_cmd));
  208         dmae->opcode = opcode;
  209         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
  210         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
  211         dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
  212                               sizeof(struct dmae_cmd) *
  213                               (loader_idx + 1)) >> 2);
  214         dmae->dst_addr_hi = 0;
  215         dmae->len = sizeof(struct dmae_cmd) >> 2;
  216         if (CHIP_IS_E1(sc)) {
  217             dmae->len--;
  218         }
  219         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
  220         dmae->comp_addr_hi = 0;
  221         dmae->comp_val = 1;
  222 
  223         *stats_comp = 0;
  224         bxe_post_dmae(sc, dmae, loader_idx);
  225     } else if (sc->func_stx) {
  226         *stats_comp = 0;
  227         bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
  228     }
  229 }
  230 
  231 static int
  232 bxe_stats_comp(struct bxe_softc *sc)
  233 {
  234     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
  235     int cnt = 10;
  236 
  237     while (*stats_comp != DMAE_COMP_VAL) {
  238         if (!cnt) {
  239             BLOGE(sc, "Timeout waiting for stats finished\n");
  240             BXE_SET_ERROR_BIT(sc, BXE_ERR_STATS_TO);
  241             taskqueue_enqueue_timeout(taskqueue_thread,
  242                 &sc->sp_err_timeout_task, hz/10);
  243             break;
  244 
  245         }
  246 
  247         cnt--;
  248         DELAY(1000);
  249     }
  250 
  251     return (1);
  252 }
  253 
  254 /*
  255  * Statistics service functions
  256  */
  257 
  258 static void
  259 bxe_stats_pmf_update(struct bxe_softc *sc)
  260 {
  261     struct dmae_cmd *dmae;
  262     uint32_t opcode;
  263     int loader_idx = PMF_DMAE_C(sc);
  264     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
  265 
  266     if (sc->devinfo.bc_ver <= 0x06001400) {
  267         /*
  268          * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
  269          * BRB registers while the BRB block is in reset. The DMA transfer
  270          * below triggers this issue resulting in the DMAE to stop
  271          * functioning. Skip this initial stats transfer for old bootcode
  272          * versions <= 6.0.20.
  273          */
  274         return;
  275     }
  276 
  277     /* sanity */
  278     if (!sc->port.pmf || !sc->port.port_stx) {
  279         BLOGE(sc, "BUG!\n");
  280         return;
  281     }
  282 
  283     sc->executer_idx = 0;
  284 
  285     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
  286 
  287     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  288     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
  289     dmae->src_addr_lo = (sc->port.port_stx >> 2);
  290     dmae->src_addr_hi = 0;
  291     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
  292     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
  293     dmae->len = DMAE_LEN32_RD_MAX;
  294     dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
  295     dmae->comp_addr_hi = 0;
  296     dmae->comp_val = 1;
  297 
  298     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  299     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
  300     dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
  301     dmae->src_addr_hi = 0;
  302     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
  303                                DMAE_LEN32_RD_MAX * 4);
  304     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
  305                                DMAE_LEN32_RD_MAX * 4);
  306     dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
  307 
  308     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
  309     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
  310     dmae->comp_val = DMAE_COMP_VAL;
  311 
  312     *stats_comp = 0;
  313     bxe_hw_stats_post(sc);
  314     bxe_stats_comp(sc);
  315 }
  316 
  317 static void
  318 bxe_port_stats_init(struct bxe_softc *sc)
  319 {
  320     struct dmae_cmd *dmae;
  321     int port = SC_PORT(sc);
  322     uint32_t opcode;
  323     int loader_idx = PMF_DMAE_C(sc);
  324     uint32_t mac_addr;
  325     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
  326 
  327     /* sanity */
  328     if (!sc->link_vars.link_up || !sc->port.pmf) {
  329         BLOGE(sc, "BUG!\n");
  330         return;
  331     }
  332 
  333     sc->executer_idx = 0;
  334 
  335     /* MCP */
  336     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
  337                              TRUE, DMAE_COMP_GRC);
  338 
  339     if (sc->port.port_stx) {
  340         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  341         dmae->opcode = opcode;
  342         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
  343         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
  344         dmae->dst_addr_lo = sc->port.port_stx >> 2;
  345         dmae->dst_addr_hi = 0;
  346         dmae->len = bxe_get_port_stats_dma_len(sc);
  347         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  348         dmae->comp_addr_hi = 0;
  349         dmae->comp_val = 1;
  350     }
  351 
  352     if (sc->func_stx) {
  353         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  354         dmae->opcode = opcode;
  355         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
  356         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
  357         dmae->dst_addr_lo = (sc->func_stx >> 2);
  358         dmae->dst_addr_hi = 0;
  359         dmae->len = (sizeof(struct host_func_stats) >> 2);
  360         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
  361         dmae->comp_addr_hi = 0;
  362         dmae->comp_val = 1;
  363     }
  364 
  365     /* MAC */
  366     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
  367                              TRUE, DMAE_COMP_GRC);
  368 
  369     /* EMAC is special */
  370     if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
  371         mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
  372 
  373         /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
  374         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  375         dmae->opcode = opcode;
  376         dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
  377         dmae->src_addr_hi = 0;
  378         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
  379         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
  380         dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
  381         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
  382         dmae->comp_addr_hi = 0;
  383         dmae->comp_val = 1;
  384 
  385         /* EMAC_REG_EMAC_RX_STAT_AC_28 */
  386         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  387         dmae->opcode = opcode;
  388         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
  389         dmae->src_addr_hi = 0;
  390         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
  391                                    offsetof(struct emac_stats,
  392                                             rx_stat_falsecarriererrors));
  393         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
  394                                    offsetof(struct emac_stats,
  395                                             rx_stat_falsecarriererrors));
  396         dmae->len = 1;
  397         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
  398         dmae->comp_addr_hi = 0;
  399         dmae->comp_val = 1;
  400 
  401         /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
  402         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  403         dmae->opcode = opcode;
  404         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
  405         dmae->src_addr_hi = 0;
  406         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
  407                                    offsetof(struct emac_stats,
  408                                             tx_stat_ifhcoutoctets));
  409         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
  410                                    offsetof(struct emac_stats,
  411                                             tx_stat_ifhcoutoctets));
  412         dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
  413         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
  414         dmae->comp_addr_hi = 0;
  415         dmae->comp_val = 1;
  416     } else {
  417         uint32_t tx_src_addr_lo, rx_src_addr_lo;
  418         uint16_t rx_len, tx_len;
  419 
  420         /* configure the params according to MAC type */
  421         switch (sc->link_vars.mac_type) {
  422         case ELINK_MAC_TYPE_BMAC:
  423             mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
  424                                 NIG_REG_INGRESS_BMAC0_MEM;
  425 
  426             /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
  427                BIGMAC_REGISTER_TX_STAT_GTBYT */
  428             if (CHIP_IS_E1x(sc)) {
  429                 tx_src_addr_lo =
  430                     ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
  431                 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
  432                            BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
  433                 rx_src_addr_lo =
  434                     ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
  435                 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
  436                            BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
  437             } else {
  438                 tx_src_addr_lo =
  439                     ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
  440                 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
  441                            BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
  442                 rx_src_addr_lo =
  443                     ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
  444                 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
  445                            BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
  446             }
  447 
  448             break;
  449 
  450         case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
  451         case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
  452         default:
  453             mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
  454             tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
  455             rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
  456             tx_len =
  457                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
  458             rx_len =
  459                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
  460             break;
  461         }
  462 
  463         /* TX stats */
  464         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  465         dmae->opcode = opcode;
  466         dmae->src_addr_lo = tx_src_addr_lo;
  467         dmae->src_addr_hi = 0;
  468         dmae->len = tx_len;
  469         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
  470         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
  471         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  472         dmae->comp_addr_hi = 0;
  473         dmae->comp_val = 1;
  474 
  475         /* RX stats */
  476         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  477         dmae->opcode = opcode;
  478         dmae->src_addr_hi = 0;
  479         dmae->src_addr_lo = rx_src_addr_lo;
  480         dmae->dst_addr_lo =
  481             U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
  482         dmae->dst_addr_hi =
  483             U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
  484         dmae->len = rx_len;
  485         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  486         dmae->comp_addr_hi = 0;
  487         dmae->comp_val = 1;
  488     }
  489 
  490     /* NIG */
  491     if (!CHIP_IS_E3(sc)) {
  492         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  493         dmae->opcode = opcode;
  494         dmae->src_addr_lo =
  495             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
  496                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
  497         dmae->src_addr_hi = 0;
  498         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
  499                                    offsetof(struct nig_stats,
  500                                             egress_mac_pkt0_lo));
  501         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
  502                                    offsetof(struct nig_stats,
  503                                             egress_mac_pkt0_lo));
  504         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
  505         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
  506         dmae->comp_addr_hi = 0;
  507         dmae->comp_val = 1;
  508 
  509         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  510         dmae->opcode = opcode;
  511         dmae->src_addr_lo =
  512             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
  513                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
  514         dmae->src_addr_hi = 0;
  515         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
  516                                    offsetof(struct nig_stats,
  517                                             egress_mac_pkt1_lo));
  518         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
  519                                    offsetof(struct nig_stats,
  520                                             egress_mac_pkt1_lo));
  521         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
  522         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
  523         dmae->comp_addr_hi = 0;
  524         dmae->comp_val = 1;
  525     }
  526 
  527     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
  528     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
  529                                    TRUE, DMAE_COMP_PCI);
  530     dmae->src_addr_lo =
  531         (port ? NIG_REG_STAT1_BRB_DISCARD :
  532                 NIG_REG_STAT0_BRB_DISCARD) >> 2;
  533     dmae->src_addr_hi = 0;
  534     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
  535     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
  536     dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
  537 
  538     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
  539     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
  540     dmae->comp_val = DMAE_COMP_VAL;
  541 
  542     *stats_comp = 0;
  543 }
  544 
  545 static void
  546 bxe_func_stats_init(struct bxe_softc *sc)
  547 {
  548     struct dmae_cmd *dmae = &sc->stats_dmae;
  549     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
  550 
  551     /* sanity */
  552     if (!sc->func_stx) {
  553         BLOGE(sc, "BUG!\n");
  554         return;
  555     }
  556 
  557     sc->executer_idx = 0;
  558     memset(dmae, 0, sizeof(struct dmae_cmd));
  559 
  560     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
  561                                    TRUE, DMAE_COMP_PCI);
  562     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
  563     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
  564     dmae->dst_addr_lo = (sc->func_stx >> 2);
  565     dmae->dst_addr_hi = 0;
  566     dmae->len = (sizeof(struct host_func_stats) >> 2);
  567     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
  568     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
  569     dmae->comp_val = DMAE_COMP_VAL;
  570 
  571     *stats_comp = 0;
  572 }
  573 
  574 static void
  575 bxe_stats_start(struct bxe_softc *sc)
  576 {
  577     /*
  578      * VFs travel through here as part of the statistics FSM, but no action
  579      * is required
  580      */
  581     if (IS_VF(sc)) {
  582         return;
  583     }
  584 
  585     if (sc->port.pmf) {
  586         bxe_port_stats_init(sc);
  587     }
  588 
  589     else if (sc->func_stx) {
  590         bxe_func_stats_init(sc);
  591     }
  592 
  593     bxe_hw_stats_post(sc);
  594     bxe_storm_stats_post(sc);
  595 }
  596 
  597 static void
  598 bxe_stats_pmf_start(struct bxe_softc *sc)
  599 {
  600     bxe_stats_comp(sc);
  601     bxe_stats_pmf_update(sc);
  602     bxe_stats_start(sc);
  603 }
  604 
  605 static void
  606 bxe_stats_restart(struct bxe_softc *sc)
  607 {
  608     /*
  609      * VFs travel through here as part of the statistics FSM, but no action
  610      * is required
  611      */
  612     if (IS_VF(sc)) {
  613         return;
  614     }
  615 
  616     bxe_stats_comp(sc);
  617     bxe_stats_start(sc);
  618 }
  619 
  620 static void
  621 bxe_bmac_stats_update(struct bxe_softc *sc)
  622 {
  623     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
  624     struct bxe_eth_stats *estats = &sc->eth_stats;
  625     struct {
  626         uint32_t lo;
  627         uint32_t hi;
  628     } diff;
  629 
  630     if (CHIP_IS_E1x(sc)) {
  631         struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats);
  632 
  633         /* the macros below will use "bmac1_stats" type */
  634         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
  635         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
  636         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
  637         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
  638         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
  639         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
  640         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
  641         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
  642         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
  643 
  644         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
  645         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
  646         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
  647         UPDATE_STAT64(tx_stat_gt127,
  648                       tx_stat_etherstatspkts65octetsto127octets);
  649         UPDATE_STAT64(tx_stat_gt255,
  650                       tx_stat_etherstatspkts128octetsto255octets);
  651         UPDATE_STAT64(tx_stat_gt511,
  652                       tx_stat_etherstatspkts256octetsto511octets);
  653         UPDATE_STAT64(tx_stat_gt1023,
  654                       tx_stat_etherstatspkts512octetsto1023octets);
  655         UPDATE_STAT64(tx_stat_gt1518,
  656                       tx_stat_etherstatspkts1024octetsto1522octets);
  657         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
  658         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
  659         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
  660         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
  661         UPDATE_STAT64(tx_stat_gterr,
  662                       tx_stat_dot3statsinternalmactransmiterrors);
  663         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
  664     } else {
  665         struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats);
  666         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
  667 
  668         /* the macros below will use "bmac2_stats" type */
  669         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
  670         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
  671         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
  672         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
  673         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
  674         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
  675         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
  676         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
  677         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
  678         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
  679         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
  680         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
  681         UPDATE_STAT64(tx_stat_gt127,
  682                       tx_stat_etherstatspkts65octetsto127octets);
  683         UPDATE_STAT64(tx_stat_gt255,
  684                       tx_stat_etherstatspkts128octetsto255octets);
  685         UPDATE_STAT64(tx_stat_gt511,
  686                       tx_stat_etherstatspkts256octetsto511octets);
  687         UPDATE_STAT64(tx_stat_gt1023,
  688                       tx_stat_etherstatspkts512octetsto1023octets);
  689         UPDATE_STAT64(tx_stat_gt1518,
  690                       tx_stat_etherstatspkts1024octetsto1522octets);
  691         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
  692         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
  693         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
  694         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
  695         UPDATE_STAT64(tx_stat_gterr,
  696                       tx_stat_dot3statsinternalmactransmiterrors);
  697         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
  698 
  699         /* collect PFC stats */
  700         pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
  701         pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
  702         ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
  703                pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
  704 
  705         pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
  706         pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
  707         ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
  708                pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
  709     }
  710 
  711     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
  712     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
  713 
  714     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
  715     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
  716 
  717     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
  718     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
  719     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
  720     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
  721 }
  722 
  723 static void
  724 bxe_mstat_stats_update(struct bxe_softc *sc)
  725 {
  726     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
  727     struct bxe_eth_stats *estats = &sc->eth_stats;
  728     struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats);
  729 
  730     ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
  731     ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
  732     ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
  733     ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
  734     ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
  735     ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
  736     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
  737     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
  738     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
  739     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
  740 
  741     /* collect pfc stats */
  742     ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
  743            pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
  744     ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
  745            pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
  746 
  747     ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
  748     ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
  749     ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
  750     ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
  751     ADD_STAT64(stats_tx.tx_gt1023,
  752                tx_stat_etherstatspkts512octetsto1023octets);
  753     ADD_STAT64(stats_tx.tx_gt1518,
  754                tx_stat_etherstatspkts1024octetsto1522octets);
  755     ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
  756 
  757     ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
  758     ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
  759     ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
  760 
  761     ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
  762     ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
  763 
  764     estats->etherstatspkts1024octetsto1522octets_hi =
  765         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
  766     estats->etherstatspkts1024octetsto1522octets_lo =
  767         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
  768 
  769     estats->etherstatspktsover1522octets_hi =
  770         pstats->mac_stx[1].tx_stat_mac_2047_hi;
  771     estats->etherstatspktsover1522octets_lo =
  772         pstats->mac_stx[1].tx_stat_mac_2047_lo;
  773 
  774     ADD_64(estats->etherstatspktsover1522octets_hi,
  775            pstats->mac_stx[1].tx_stat_mac_4095_hi,
  776            estats->etherstatspktsover1522octets_lo,
  777            pstats->mac_stx[1].tx_stat_mac_4095_lo);
  778 
  779     ADD_64(estats->etherstatspktsover1522octets_hi,
  780            pstats->mac_stx[1].tx_stat_mac_9216_hi,
  781            estats->etherstatspktsover1522octets_lo,
  782            pstats->mac_stx[1].tx_stat_mac_9216_lo);
  783 
  784     ADD_64(estats->etherstatspktsover1522octets_hi,
  785            pstats->mac_stx[1].tx_stat_mac_16383_hi,
  786            estats->etherstatspktsover1522octets_lo,
  787            pstats->mac_stx[1].tx_stat_mac_16383_lo);
  788 
  789     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
  790     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
  791 
  792     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
  793     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
  794 
  795     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
  796     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
  797     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
  798     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
  799 }
  800 
  801 static void
  802 bxe_emac_stats_update(struct bxe_softc *sc)
  803 {
  804     struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats);
  805     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
  806     struct bxe_eth_stats *estats = &sc->eth_stats;
  807 
  808     UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
  809     UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
  810     UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
  811     UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
  812     UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
  813     UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
  814     UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
  815     UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
  816     UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
  817     UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
  818     UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
  819     UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
  820     UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
  821     UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
  822     UPDATE_EXTEND_STAT(tx_stat_outxonsent);
  823     UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
  824     UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
  825     UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
  826     UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
  827     UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
  828     UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
  829     UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
  830     UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
  831     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
  832     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
  833     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
  834     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
  835     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
  836     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
  837     UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
  838     UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
  839 
  840     estats->pause_frames_received_hi =
  841         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
  842     estats->pause_frames_received_lo =
  843         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
  844     ADD_64(estats->pause_frames_received_hi,
  845            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
  846            estats->pause_frames_received_lo,
  847            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
  848 
  849     estats->pause_frames_sent_hi =
  850         pstats->mac_stx[1].tx_stat_outxonsent_hi;
  851     estats->pause_frames_sent_lo =
  852         pstats->mac_stx[1].tx_stat_outxonsent_lo;
  853     ADD_64(estats->pause_frames_sent_hi,
  854            pstats->mac_stx[1].tx_stat_outxoffsent_hi,
  855            estats->pause_frames_sent_lo,
  856            pstats->mac_stx[1].tx_stat_outxoffsent_lo);
  857 }
  858 
  859 static int
  860 bxe_hw_stats_update(struct bxe_softc *sc)
  861 {
  862     struct nig_stats *new = BXE_SP(sc, nig_stats);
  863     struct nig_stats *old = &(sc->port.old_nig_stats);
  864     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
  865     struct bxe_eth_stats *estats = &sc->eth_stats;
  866     uint32_t lpi_reg, nig_timer_max;
  867     struct {
  868         uint32_t lo;
  869         uint32_t hi;
  870     } diff;
  871 
  872     switch (sc->link_vars.mac_type) {
  873     case ELINK_MAC_TYPE_BMAC:
  874         bxe_bmac_stats_update(sc);
  875         break;
  876 
  877     case ELINK_MAC_TYPE_EMAC:
  878         bxe_emac_stats_update(sc);
  879         break;
  880 
  881     case ELINK_MAC_TYPE_UMAC:
  882     case ELINK_MAC_TYPE_XMAC:
  883         bxe_mstat_stats_update(sc);
  884         break;
  885 
  886     case ELINK_MAC_TYPE_NONE: /* unreached */
  887         BLOGD(sc, DBG_STATS,
  888               "stats updated by DMAE but no MAC active\n");
  889         return (-1);
  890 
  891     default: /* unreached */
  892         BLOGE(sc, "stats update failed, unknown MAC type\n");
  893     }
  894 
  895     ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
  896                   new->brb_discard - old->brb_discard);
  897     ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
  898                   new->brb_truncate - old->brb_truncate);
  899 
  900     if (!CHIP_IS_E3(sc)) {
  901         UPDATE_STAT64_NIG(egress_mac_pkt0,
  902                           etherstatspkts1024octetsto1522octets);
  903         UPDATE_STAT64_NIG(egress_mac_pkt1,
  904                           etherstatspktsover1522octets);
  905     }
  906 
  907     memcpy(old, new, sizeof(struct nig_stats));
  908 
  909     memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
  910            sizeof(struct mac_stx));
  911     estats->brb_drop_hi = pstats->brb_drop_hi;
  912     estats->brb_drop_lo = pstats->brb_drop_lo;
  913 
  914     pstats->host_port_stats_counter++;
  915 
  916     if (CHIP_IS_E3(sc)) {
  917         lpi_reg = (SC_PORT(sc)) ?
  918                       MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
  919                       MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
  920         estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
  921     }
  922 
  923     if (!BXE_NOMCP(sc)) {
  924         nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
  925         if (nig_timer_max != estats->nig_timer_max) {
  926             estats->nig_timer_max = nig_timer_max;
  927             /*NOTE: not setting error bit */
  928             BLOGE(sc, "invalid NIG timer max (%u)\n",
  929                   estats->nig_timer_max);
  930         }
  931     }
  932 
  933     return (0);
  934 }
  935 
  936 static int
  937 bxe_storm_stats_validate_counters(struct bxe_softc *sc)
  938 {
  939     struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
  940     uint16_t cur_stats_counter;
  941 
  942     /*
  943      * Make sure we use the value of the counter
  944      * used for sending the last stats ramrod.
  945      */
  946     BXE_STATS_LOCK(sc);
  947     cur_stats_counter = (sc->stats_counter - 1);
  948     BXE_STATS_UNLOCK(sc);
  949 
  950     /* are storm stats valid? */
  951     if (le16toh(counters->xstats_counter) != cur_stats_counter) {
  952         BLOGD(sc, DBG_STATS,
  953               "stats not updated by xstorm, "
  954               "counter 0x%x != stats_counter 0x%x\n",
  955               le16toh(counters->xstats_counter), sc->stats_counter);
  956         return (-EAGAIN);
  957     }
  958 
  959     if (le16toh(counters->ustats_counter) != cur_stats_counter) {
  960         BLOGD(sc, DBG_STATS,
  961               "stats not updated by ustorm, "
  962               "counter 0x%x != stats_counter 0x%x\n",
  963               le16toh(counters->ustats_counter), sc->stats_counter);
  964         return (-EAGAIN);
  965     }
  966 
  967     if (le16toh(counters->cstats_counter) != cur_stats_counter) {
  968         BLOGD(sc, DBG_STATS,
  969               "stats not updated by cstorm, "
  970               "counter 0x%x != stats_counter 0x%x\n",
  971               le16toh(counters->cstats_counter), sc->stats_counter);
  972         return (-EAGAIN);
  973     }
  974 
  975     if (le16toh(counters->tstats_counter) != cur_stats_counter) {
  976         BLOGD(sc, DBG_STATS,
  977               "stats not updated by tstorm, "
  978               "counter 0x%x != stats_counter 0x%x\n",
  979               le16toh(counters->tstats_counter), sc->stats_counter);
  980         return (-EAGAIN);
  981     }
  982 
  983     return (0);
  984 }
  985 
  986 static int
  987 bxe_storm_stats_update(struct bxe_softc *sc)
  988 {
  989     struct tstorm_per_port_stats *tport =
  990         &sc->fw_stats_data->port.tstorm_port_statistics;
  991     struct tstorm_per_pf_stats *tfunc =
  992         &sc->fw_stats_data->pf.tstorm_pf_statistics;
  993     struct host_func_stats *fstats = &sc->func_stats;
  994     struct bxe_eth_stats *estats = &sc->eth_stats;
  995     struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old;
  996     int i;
  997 
  998     /* vfs stat counter is managed by pf */
  999     if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) {
 1000         return (-EAGAIN);
 1001     }
 1002 
 1003     estats->error_bytes_received_hi = 0;
 1004     estats->error_bytes_received_lo = 0;
 1005 
 1006     for (i = 0; i < sc->num_queues; i++) {
 1007         struct bxe_fastpath *fp = &sc->fp[i];
 1008         struct tstorm_per_queue_stats *tclient =
 1009             &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
 1010         struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
 1011         struct ustorm_per_queue_stats *uclient =
 1012             &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
 1013         struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
 1014         struct xstorm_per_queue_stats *xclient =
 1015             &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
 1016         struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
 1017         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
 1018         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
 1019 
 1020         uint32_t diff;
 1021 
 1022         BLOGD(sc, DBG_STATS,
 1023               "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n",
 1024               i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
 1025               xclient->mcast_pkts_sent);
 1026 
 1027         BLOGD(sc, DBG_STATS, "---------------\n");
 1028 
 1029         UPDATE_QSTAT(tclient->rcv_bcast_bytes,
 1030                      total_broadcast_bytes_received);
 1031         UPDATE_QSTAT(tclient->rcv_mcast_bytes,
 1032                      total_multicast_bytes_received);
 1033         UPDATE_QSTAT(tclient->rcv_ucast_bytes,
 1034                      total_unicast_bytes_received);
 1035 
 1036         /*
 1037          * sum to total_bytes_received all
 1038          * unicast/multicast/broadcast
 1039          */
 1040         qstats->total_bytes_received_hi =
 1041             qstats->total_broadcast_bytes_received_hi;
 1042         qstats->total_bytes_received_lo =
 1043             qstats->total_broadcast_bytes_received_lo;
 1044 
 1045         ADD_64(qstats->total_bytes_received_hi,
 1046                qstats->total_multicast_bytes_received_hi,
 1047                qstats->total_bytes_received_lo,
 1048                qstats->total_multicast_bytes_received_lo);
 1049 
 1050         ADD_64(qstats->total_bytes_received_hi,
 1051                qstats->total_unicast_bytes_received_hi,
 1052                qstats->total_bytes_received_lo,
 1053                qstats->total_unicast_bytes_received_lo);
 1054 
 1055         qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
 1056         qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
 1057 
 1058         UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
 1059         UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
 1060         UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
 1061         UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
 1062                               etherstatsoverrsizepkts, 32);
 1063         UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
 1064 
 1065         SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
 1066         SUB_EXTEND_USTAT(mcast_no_buff_pkts,
 1067                          total_multicast_packets_received);
 1068         SUB_EXTEND_USTAT(bcast_no_buff_pkts,
 1069                          total_broadcast_packets_received);
 1070         UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
 1071         UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
 1072         UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
 1073 
 1074         UPDATE_QSTAT(xclient->bcast_bytes_sent,
 1075                      total_broadcast_bytes_transmitted);
 1076         UPDATE_QSTAT(xclient->mcast_bytes_sent,
 1077                      total_multicast_bytes_transmitted);
 1078         UPDATE_QSTAT(xclient->ucast_bytes_sent,
 1079                      total_unicast_bytes_transmitted);
 1080 
 1081         /*
 1082          * sum to total_bytes_transmitted all
 1083          * unicast/multicast/broadcast
 1084          */
 1085         qstats->total_bytes_transmitted_hi =
 1086             qstats->total_unicast_bytes_transmitted_hi;
 1087         qstats->total_bytes_transmitted_lo =
 1088             qstats->total_unicast_bytes_transmitted_lo;
 1089 
 1090         ADD_64(qstats->total_bytes_transmitted_hi,
 1091                qstats->total_broadcast_bytes_transmitted_hi,
 1092                qstats->total_bytes_transmitted_lo,
 1093                qstats->total_broadcast_bytes_transmitted_lo);
 1094 
 1095         ADD_64(qstats->total_bytes_transmitted_hi,
 1096                qstats->total_multicast_bytes_transmitted_hi,
 1097                qstats->total_bytes_transmitted_lo,
 1098                qstats->total_multicast_bytes_transmitted_lo);
 1099 
 1100         UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
 1101                             total_unicast_packets_transmitted);
 1102         UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
 1103                             total_multicast_packets_transmitted);
 1104         UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
 1105                             total_broadcast_packets_transmitted);
 1106 
 1107         UPDATE_EXTEND_TSTAT(checksum_discard,
 1108                             total_packets_received_checksum_discarded);
 1109         UPDATE_EXTEND_TSTAT(ttl0_discard,
 1110                             total_packets_received_ttl0_discarded);
 1111 
 1112         UPDATE_EXTEND_XSTAT(error_drop_pkts,
 1113                             total_transmitted_dropped_packets_error);
 1114 
 1115         /* TPA aggregations completed */
 1116         UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
 1117         /* Number of network frames aggregated by TPA */
 1118         UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames);
 1119         /* Total number of bytes in completed TPA aggregations */
 1120         UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
 1121 
 1122         UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
 1123 
 1124         UPDATE_FSTAT_QSTAT(total_bytes_received);
 1125         UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
 1126         UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
 1127         UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
 1128         UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
 1129         UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
 1130         UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
 1131         UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
 1132         UPDATE_FSTAT_QSTAT(valid_bytes_received);
 1133     }
 1134 
 1135     ADD_64(estats->total_bytes_received_hi,
 1136            estats->rx_stat_ifhcinbadoctets_hi,
 1137            estats->total_bytes_received_lo,
 1138            estats->rx_stat_ifhcinbadoctets_lo);
 1139 
 1140     ADD_64_LE(estats->total_bytes_received_hi,
 1141               tfunc->rcv_error_bytes.hi,
 1142               estats->total_bytes_received_lo,
 1143               tfunc->rcv_error_bytes.lo);
 1144 
 1145     ADD_64_LE(estats->error_bytes_received_hi,
 1146               tfunc->rcv_error_bytes.hi,
 1147               estats->error_bytes_received_lo,
 1148               tfunc->rcv_error_bytes.lo);
 1149 
 1150     UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
 1151 
 1152     ADD_64(estats->error_bytes_received_hi,
 1153            estats->rx_stat_ifhcinbadoctets_hi,
 1154            estats->error_bytes_received_lo,
 1155            estats->rx_stat_ifhcinbadoctets_lo);
 1156 
 1157     if (sc->port.pmf) {
 1158         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
 1159         UPDATE_FW_STAT(mac_filter_discard);
 1160         UPDATE_FW_STAT(mf_tag_discard);
 1161         UPDATE_FW_STAT(brb_truncate_discard);
 1162         UPDATE_FW_STAT(mac_discard);
 1163     }
 1164 
 1165     fstats->host_func_stats_start = ++fstats->host_func_stats_end;
 1166 
 1167     sc->stats_pending = 0;
 1168 
 1169     return (0);
 1170 }
 1171 
 1172 static void
 1173 bxe_net_stats_update(struct bxe_softc *sc)
 1174 {
 1175 
 1176     for (int i = 0; i < sc->num_queues; i++)
 1177         if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS,
 1178             le32toh(sc->fp[i].old_tclient.checksum_discard));
 1179 }
 1180 
 1181 uint64_t
 1182 bxe_get_counter(if_t ifp, ift_counter cnt)
 1183 {
 1184         struct bxe_softc *sc;
 1185         struct bxe_eth_stats *estats;
 1186 
 1187         sc = if_getsoftc(ifp);
 1188         estats = &sc->eth_stats;
 1189 
 1190         switch (cnt) {
 1191         case IFCOUNTER_IPACKETS:
 1192                 return (bxe_hilo(&estats->total_unicast_packets_received_hi) +
 1193                     bxe_hilo(&estats->total_multicast_packets_received_hi) +
 1194                     bxe_hilo(&estats->total_broadcast_packets_received_hi));
 1195         case IFCOUNTER_OPACKETS:
 1196                 return (bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
 1197                     bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
 1198                     bxe_hilo(&estats->total_broadcast_packets_transmitted_hi));
 1199         case IFCOUNTER_IBYTES:
 1200                 return (bxe_hilo(&estats->total_bytes_received_hi));
 1201         case IFCOUNTER_OBYTES:
 1202                 return (bxe_hilo(&estats->total_bytes_transmitted_hi));
 1203         case IFCOUNTER_IERRORS:
 1204                 return (bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
 1205                     bxe_hilo(&estats->etherstatsoverrsizepkts_hi) +
 1206                     bxe_hilo(&estats->brb_drop_hi) +
 1207                     bxe_hilo(&estats->brb_truncate_hi) +
 1208                     bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) +
 1209                     bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) +
 1210                     bxe_hilo(&estats->no_buff_discard_hi));
 1211         case IFCOUNTER_OERRORS:
 1212                 return (bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) +
 1213                     bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi));
 1214         case IFCOUNTER_IMCASTS:
 1215                 return (bxe_hilo(&estats->total_multicast_packets_received_hi));
 1216         case IFCOUNTER_COLLISIONS:
 1217                 return (bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) +
 1218                     bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
 1219                     bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi));
 1220         default:
 1221                 return (if_get_counter_default(ifp, cnt));
 1222         }
 1223 }
 1224 
 1225 static void
 1226 bxe_drv_stats_update(struct bxe_softc *sc)
 1227 {
 1228     struct bxe_eth_stats *estats = &sc->eth_stats;
 1229     int i;
 1230 
 1231     for (i = 0; i < sc->num_queues; i++) {
 1232         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
 1233         struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
 1234 
 1235         UPDATE_ESTAT_QSTAT(rx_calls);
 1236         UPDATE_ESTAT_QSTAT(rx_pkts);
 1237         UPDATE_ESTAT_QSTAT(rx_tpa_pkts);
 1238         UPDATE_ESTAT_QSTAT(rx_erroneous_jumbo_sge_pkts);
 1239         UPDATE_ESTAT_QSTAT(rx_bxe_service_rxsgl);
 1240         UPDATE_ESTAT_QSTAT(rx_jumbo_sge_pkts);
 1241         UPDATE_ESTAT_QSTAT(rx_soft_errors);
 1242         UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
 1243         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
 1244         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
 1245         UPDATE_ESTAT_QSTAT(rx_budget_reached);
 1246         UPDATE_ESTAT_QSTAT(tx_pkts);
 1247         UPDATE_ESTAT_QSTAT(tx_soft_errors);
 1248         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
 1249         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
 1250         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
 1251         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso);
 1252         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits);
 1253         UPDATE_ESTAT_QSTAT(tx_encap_failures);
 1254         UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
 1255         UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
 1256         UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
 1257         UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
 1258         UPDATE_ESTAT_QSTAT(tx_window_violation_std);
 1259         UPDATE_ESTAT_QSTAT(tx_window_violation_tso);
 1260         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6);
 1261         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp);
 1262         UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
 1263         UPDATE_ESTAT_QSTAT(tx_frames_deferred);
 1264         UPDATE_ESTAT_QSTAT(tx_queue_xoff);
 1265 
 1266         /* mbuf driver statistics */
 1267         UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
 1268         UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
 1269         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
 1270         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
 1271         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed);
 1272         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed);
 1273         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed);
 1274         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed);
 1275 
 1276         /* track the number of allocated mbufs */
 1277         UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
 1278         UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
 1279         UPDATE_ESTAT_QSTAT(mbuf_alloc_sge);
 1280         UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa);
 1281     }
 1282 }
 1283 
 1284 static uint8_t
 1285 bxe_edebug_stats_stopped(struct bxe_softc *sc)
 1286 {
 1287     uint32_t val;
 1288 
 1289     if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
 1290         val = SHMEM2_RD(sc, edebug_driver_if[1]);
 1291 
 1292         if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
 1293             return (TRUE);
 1294         }
 1295     }
 1296 
 1297     return (FALSE);
 1298 }
 1299 
 1300 static void
 1301 bxe_stats_update(struct bxe_softc *sc)
 1302 {
 1303     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
 1304 
 1305     if (bxe_edebug_stats_stopped(sc)) {
 1306         return;
 1307     }
 1308 
 1309     if (IS_PF(sc)) {
 1310         if (*stats_comp != DMAE_COMP_VAL) {
 1311             return;
 1312         }
 1313 
 1314         if (sc->port.pmf) {
 1315             bxe_hw_stats_update(sc);
 1316         }
 1317 
 1318         if (bxe_storm_stats_update(sc)) {
 1319             if (sc->stats_pending++ == 3) {
 1320                 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
 1321                     BLOGE(sc, "Storm stats not updated for 3 times, resetting\n");
 1322                     BXE_SET_ERROR_BIT(sc, BXE_ERR_STATS_TO);
 1323                     taskqueue_enqueue_timeout(taskqueue_thread,
 1324                             &sc->sp_err_timeout_task, hz/10);
 1325                 }
 1326             }
 1327             return;
 1328         }
 1329     } else {
 1330         /*
 1331          * VF doesn't collect HW statistics, and doesn't get completions,
 1332          * performs only update.
 1333          */
 1334         bxe_storm_stats_update(sc);
 1335     }
 1336 
 1337     bxe_net_stats_update(sc);
 1338     bxe_drv_stats_update(sc);
 1339 
 1340     /* vf is done */
 1341     if (IS_VF(sc)) {
 1342         return;
 1343     }
 1344 
 1345     bxe_hw_stats_post(sc);
 1346     bxe_storm_stats_post(sc);
 1347 }
 1348 
 1349 static void
 1350 bxe_port_stats_stop(struct bxe_softc *sc)
 1351 {
 1352     struct dmae_cmd *dmae;
 1353     uint32_t opcode;
 1354     int loader_idx = PMF_DMAE_C(sc);
 1355     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
 1356 
 1357     sc->executer_idx = 0;
 1358 
 1359     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
 1360 
 1361     if (sc->port.port_stx) {
 1362         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
 1363 
 1364         if (sc->func_stx) {
 1365             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
 1366         } else {
 1367             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
 1368         }
 1369 
 1370         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
 1371         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
 1372         dmae->dst_addr_lo = sc->port.port_stx >> 2;
 1373         dmae->dst_addr_hi = 0;
 1374         dmae->len = bxe_get_port_stats_dma_len(sc);
 1375         if (sc->func_stx) {
 1376             dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
 1377             dmae->comp_addr_hi = 0;
 1378             dmae->comp_val = 1;
 1379         } else {
 1380             dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
 1381             dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
 1382             dmae->comp_val = DMAE_COMP_VAL;
 1383 
 1384             *stats_comp = 0;
 1385         }
 1386     }
 1387 
 1388     if (sc->func_stx) {
 1389         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
 1390         dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
 1391         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
 1392         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
 1393         dmae->dst_addr_lo = (sc->func_stx >> 2);
 1394         dmae->dst_addr_hi = 0;
 1395         dmae->len = (sizeof(struct host_func_stats) >> 2);
 1396         dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
 1397         dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
 1398         dmae->comp_val = DMAE_COMP_VAL;
 1399 
 1400         *stats_comp = 0;
 1401     }
 1402 }
 1403 
 1404 static void
 1405 bxe_stats_stop(struct bxe_softc *sc)
 1406 {
 1407     uint8_t update = FALSE;
 1408 
 1409     bxe_stats_comp(sc);
 1410 
 1411     if (sc->port.pmf) {
 1412         update = bxe_hw_stats_update(sc) == 0;
 1413     }
 1414 
 1415     update |= bxe_storm_stats_update(sc) == 0;
 1416 
 1417     if (update) {
 1418         bxe_net_stats_update(sc);
 1419 
 1420         if (sc->port.pmf) {
 1421             bxe_port_stats_stop(sc);
 1422         }
 1423 
 1424         bxe_hw_stats_post(sc);
 1425         bxe_stats_comp(sc);
 1426     }
 1427 }
 1428 
 1429 static void
 1430 bxe_stats_do_nothing(struct bxe_softc *sc)
 1431 {
 1432     return;
 1433 }
 1434 
 1435 static const struct {
 1436     void (*action)(struct bxe_softc *sc);
 1437     enum bxe_stats_state next_state;
 1438 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
 1439     {
 1440     /* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED },
 1441     /*      LINK_UP */ { bxe_stats_start,      STATS_STATE_ENABLED },
 1442     /*      UPDATE  */ { bxe_stats_do_nothing, STATS_STATE_DISABLED },
 1443     /*      STOP    */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }
 1444     },
 1445     {
 1446     /* ENABLED  PMF */ { bxe_stats_pmf_start,  STATS_STATE_ENABLED },
 1447     /*      LINK_UP */ { bxe_stats_restart,    STATS_STATE_ENABLED },
 1448     /*      UPDATE  */ { bxe_stats_update,     STATS_STATE_ENABLED },
 1449     /*      STOP    */ { bxe_stats_stop,       STATS_STATE_DISABLED }
 1450     }
 1451 };
 1452 
 1453 void bxe_stats_handle(struct bxe_softc     *sc,
 1454                       enum bxe_stats_event event)
 1455 {
 1456     enum bxe_stats_state state;
 1457 
 1458     if (__predict_false(sc->panic)) {
 1459         return;
 1460     }
 1461 
 1462     BXE_STATS_LOCK(sc);
 1463     state = sc->stats_state;
 1464     sc->stats_state = bxe_stats_stm[state][event].next_state;
 1465     BXE_STATS_UNLOCK(sc);
 1466 
 1467     bxe_stats_stm[state][event].action(sc);
 1468 
 1469     if (event != STATS_EVENT_UPDATE) {
 1470         BLOGD(sc, DBG_STATS,
 1471               "state %d -> event %d -> state %d\n",
 1472               state, event, sc->stats_state);
 1473     }
 1474 }
 1475 
 1476 static void
 1477 bxe_port_stats_base_init(struct bxe_softc *sc)
 1478 {
 1479     struct dmae_cmd *dmae;
 1480     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
 1481 
 1482     /* sanity */
 1483     if (!sc->port.pmf || !sc->port.port_stx) {
 1484         BLOGE(sc, "BUG!\n");
 1485         return;
 1486     }
 1487 
 1488     sc->executer_idx = 0;
 1489 
 1490     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
 1491     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
 1492                                    TRUE, DMAE_COMP_PCI);
 1493     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
 1494     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
 1495     dmae->dst_addr_lo = (sc->port.port_stx >> 2);
 1496     dmae->dst_addr_hi = 0;
 1497     dmae->len = bxe_get_port_stats_dma_len(sc);
 1498     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
 1499     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
 1500     dmae->comp_val = DMAE_COMP_VAL;
 1501 
 1502     *stats_comp = 0;
 1503     bxe_hw_stats_post(sc);
 1504     bxe_stats_comp(sc);
 1505 }
 1506 
 1507 /*
 1508  * This function will prepare the statistics ramrod data the way
 1509  * we will only have to increment the statistics counter and
 1510  * send the ramrod each time we have to.
 1511  */
 1512 static void
 1513 bxe_prep_fw_stats_req(struct bxe_softc *sc)
 1514 {
 1515     int i;
 1516     int first_queue_query_index;
 1517     struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
 1518     bus_addr_t cur_data_offset;
 1519     struct stats_query_entry *cur_query_entry;
 1520 
 1521     stats_hdr->cmd_num = sc->fw_stats_num;
 1522     stats_hdr->drv_stats_counter = 0;
 1523 
 1524     /*
 1525      * The storm_counters struct contains the counters of completed
 1526      * statistics requests per storm which are incremented by FW
 1527      * each time it completes hadning a statistics ramrod. We will
 1528      * check these counters in the timer handler and discard a
 1529      * (statistics) ramrod completion.
 1530      */
 1531     cur_data_offset = (sc->fw_stats_data_mapping +
 1532                        offsetof(struct bxe_fw_stats_data, storm_counters));
 1533 
 1534     stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
 1535     stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
 1536 
 1537     /*
 1538      * Prepare the first stats ramrod (will be completed with
 1539      * the counters equal to zero) - init counters to somethig different.
 1540      */
 1541     memset(&sc->fw_stats_data->storm_counters, 0xff,
 1542            sizeof(struct stats_counter));
 1543 
 1544     /**** Port FW statistics data ****/
 1545     cur_data_offset = (sc->fw_stats_data_mapping +
 1546                        offsetof(struct bxe_fw_stats_data, port));
 1547 
 1548     cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX];
 1549 
 1550     cur_query_entry->kind = STATS_TYPE_PORT;
 1551     /* For port query index is a DONT CARE */
 1552     cur_query_entry->index = SC_PORT(sc);
 1553     /* For port query funcID is a DONT CARE */
 1554     cur_query_entry->funcID = htole16(SC_FUNC(sc));
 1555     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
 1556     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
 1557 
 1558     /**** PF FW statistics data ****/
 1559     cur_data_offset = (sc->fw_stats_data_mapping +
 1560                        offsetof(struct bxe_fw_stats_data, pf));
 1561 
 1562     cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX];
 1563 
 1564     cur_query_entry->kind = STATS_TYPE_PF;
 1565     /* For PF query index is a DONT CARE */
 1566     cur_query_entry->index = SC_PORT(sc);
 1567     cur_query_entry->funcID = htole16(SC_FUNC(sc));
 1568     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
 1569     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
 1570 
 1571     /**** Clients' queries ****/
 1572     cur_data_offset = (sc->fw_stats_data_mapping +
 1573                        offsetof(struct bxe_fw_stats_data, queue_stats));
 1574 
 1575     /*
 1576      * First queue query index depends whether FCoE offloaded request will
 1577      * be included in the ramrod
 1578      */
 1579     first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1);
 1580 
 1581     for (i = 0; i < sc->num_queues; i++) {
 1582         cur_query_entry =
 1583             &sc->fw_stats_req->query[first_queue_query_index + i];
 1584 
 1585         cur_query_entry->kind = STATS_TYPE_QUEUE;
 1586         cur_query_entry->index = bxe_stats_id(&sc->fp[i]);
 1587         cur_query_entry->funcID = htole16(SC_FUNC(sc));
 1588         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
 1589         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
 1590 
 1591         cur_data_offset += sizeof(struct per_queue_stats);
 1592     }
 1593 }
 1594 
 1595 void
 1596 bxe_stats_init(struct bxe_softc *sc)
 1597 {
 1598     int /*abs*/port = SC_PORT(sc);
 1599     int mb_idx = SC_FW_MB_IDX(sc);
 1600     int i;
 1601 
 1602     sc->stats_pending = 0;
 1603     sc->executer_idx = 0;
 1604     sc->stats_counter = 0;
 1605 
 1606     /* port and func stats for management */
 1607     if (!BXE_NOMCP(sc)) {
 1608         sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
 1609         sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
 1610     } else {
 1611         sc->port.port_stx = 0;
 1612         sc->func_stx = 0;
 1613     }
 1614 
 1615     BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n",
 1616           sc->port.port_stx, sc->func_stx);
 1617 
 1618     /* pmf should retrieve port statistics from SP on a non-init*/
 1619     if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
 1620         bxe_stats_handle(sc, STATS_EVENT_PMF);
 1621     }
 1622 
 1623     port = SC_PORT(sc);
 1624     /* port stats */
 1625     memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
 1626     sc->port.old_nig_stats.brb_discard =
 1627         REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
 1628     sc->port.old_nig_stats.brb_truncate =
 1629         REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
 1630     if (!CHIP_IS_E3(sc)) {
 1631         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
 1632                     &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
 1633         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
 1634                     &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
 1635     }
 1636 
 1637     /* function stats */
 1638     for (i = 0; i < sc->num_queues; i++) {
 1639         memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
 1640         memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
 1641         memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
 1642         if (sc->stats_init) {
 1643             memset(&sc->fp[i].eth_q_stats, 0,
 1644                    sizeof(sc->fp[i].eth_q_stats));
 1645             memset(&sc->fp[i].eth_q_stats_old, 0,
 1646                    sizeof(sc->fp[i].eth_q_stats_old));
 1647         }
 1648     }
 1649 
 1650     /* prepare statistics ramrod data */
 1651     bxe_prep_fw_stats_req(sc);
 1652 
 1653     if (sc->stats_init) {
 1654         memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
 1655         memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
 1656         memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
 1657         memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
 1658         memset(&sc->func_stats, 0, sizeof(sc->func_stats));
 1659 
 1660         /* Clean SP from previous statistics */
 1661         if (sc->func_stx) {
 1662             memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
 1663             bxe_func_stats_init(sc);
 1664             bxe_hw_stats_post(sc);
 1665             bxe_stats_comp(sc);
 1666         }
 1667     }
 1668 
 1669     sc->stats_state = STATS_STATE_DISABLED;
 1670 
 1671     if (sc->port.pmf && sc->port.port_stx) {
 1672         bxe_port_stats_base_init(sc);
 1673     }
 1674 
 1675     /* mark the end of statistics initialization */
 1676     sc->stats_init = FALSE;
 1677 }
 1678 
 1679 void
 1680 bxe_save_statistics(struct bxe_softc *sc)
 1681 {
 1682     int i;
 1683 
 1684     /* save queue statistics */
 1685     for (i = 0; i < sc->num_queues; i++) {
 1686         struct bxe_fastpath *fp = &sc->fp[i];
 1687         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
 1688         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
 1689 
 1690         UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
 1691         UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
 1692         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
 1693         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
 1694         UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
 1695         UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
 1696         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
 1697         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
 1698         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
 1699         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
 1700         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
 1701         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
 1702         UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
 1703         UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
 1704     }
 1705 
 1706     /* store port firmware statistics */
 1707     if (sc->port.pmf) {
 1708         struct bxe_eth_stats *estats = &sc->eth_stats;
 1709         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
 1710         struct host_port_stats *pstats = BXE_SP(sc, port_stats);
 1711 
 1712         fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
 1713         fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
 1714         fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
 1715         fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
 1716 
 1717         if (IS_MF(sc)) {
 1718             UPDATE_FW_STAT_OLD(mac_filter_discard);
 1719             UPDATE_FW_STAT_OLD(mf_tag_discard);
 1720             UPDATE_FW_STAT_OLD(brb_truncate_discard);
 1721             UPDATE_FW_STAT_OLD(mac_discard);
 1722         }
 1723     }
 1724 }
 1725 
 1726 void
 1727 bxe_afex_collect_stats(struct bxe_softc *sc,
 1728                        void             *void_afex_stats,
 1729                        uint32_t         stats_type)
 1730 {
 1731     int i;
 1732     struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
 1733     struct bxe_eth_stats *estats = &sc->eth_stats;
 1734 
 1735     memset(afex_stats, 0, sizeof(struct afex_stats));
 1736 
 1737     for (i = 0; i < sc->num_queues; i++) {
 1738         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
 1739 
 1740         ADD_64(afex_stats->rx_unicast_bytes_hi,
 1741                qstats->total_unicast_bytes_received_hi,
 1742                afex_stats->rx_unicast_bytes_lo,
 1743                qstats->total_unicast_bytes_received_lo);
 1744 
 1745         ADD_64(afex_stats->rx_broadcast_bytes_hi,
 1746                qstats->total_broadcast_bytes_received_hi,
 1747                afex_stats->rx_broadcast_bytes_lo,
 1748                qstats->total_broadcast_bytes_received_lo);
 1749 
 1750         ADD_64(afex_stats->rx_multicast_bytes_hi,
 1751                qstats->total_multicast_bytes_received_hi,
 1752                afex_stats->rx_multicast_bytes_lo,
 1753                qstats->total_multicast_bytes_received_lo);
 1754 
 1755         ADD_64(afex_stats->rx_unicast_frames_hi,
 1756                qstats->total_unicast_packets_received_hi,
 1757                afex_stats->rx_unicast_frames_lo,
 1758                qstats->total_unicast_packets_received_lo);
 1759 
 1760         ADD_64(afex_stats->rx_broadcast_frames_hi,
 1761                qstats->total_broadcast_packets_received_hi,
 1762                afex_stats->rx_broadcast_frames_lo,
 1763                qstats->total_broadcast_packets_received_lo);
 1764 
 1765         ADD_64(afex_stats->rx_multicast_frames_hi,
 1766                qstats->total_multicast_packets_received_hi,
 1767                afex_stats->rx_multicast_frames_lo,
 1768                qstats->total_multicast_packets_received_lo);
 1769 
 1770         /*
 1771          * sum to rx_frames_discarded all discarded
 1772          * packets due to size, ttl0 and checksum
 1773          */
 1774         ADD_64(afex_stats->rx_frames_discarded_hi,
 1775                qstats->total_packets_received_checksum_discarded_hi,
 1776                afex_stats->rx_frames_discarded_lo,
 1777                qstats->total_packets_received_checksum_discarded_lo);
 1778 
 1779         ADD_64(afex_stats->rx_frames_discarded_hi,
 1780                qstats->total_packets_received_ttl0_discarded_hi,
 1781                afex_stats->rx_frames_discarded_lo,
 1782                qstats->total_packets_received_ttl0_discarded_lo);
 1783 
 1784         ADD_64(afex_stats->rx_frames_discarded_hi,
 1785                qstats->etherstatsoverrsizepkts_hi,
 1786                afex_stats->rx_frames_discarded_lo,
 1787                qstats->etherstatsoverrsizepkts_lo);
 1788 
 1789         ADD_64(afex_stats->rx_frames_dropped_hi,
 1790                qstats->no_buff_discard_hi,
 1791                afex_stats->rx_frames_dropped_lo,
 1792                qstats->no_buff_discard_lo);
 1793 
 1794         ADD_64(afex_stats->tx_unicast_bytes_hi,
 1795                qstats->total_unicast_bytes_transmitted_hi,
 1796                afex_stats->tx_unicast_bytes_lo,
 1797                qstats->total_unicast_bytes_transmitted_lo);
 1798 
 1799         ADD_64(afex_stats->tx_broadcast_bytes_hi,
 1800                qstats->total_broadcast_bytes_transmitted_hi,
 1801                afex_stats->tx_broadcast_bytes_lo,
 1802                qstats->total_broadcast_bytes_transmitted_lo);
 1803 
 1804         ADD_64(afex_stats->tx_multicast_bytes_hi,
 1805                qstats->total_multicast_bytes_transmitted_hi,
 1806                afex_stats->tx_multicast_bytes_lo,
 1807                qstats->total_multicast_bytes_transmitted_lo);
 1808 
 1809         ADD_64(afex_stats->tx_unicast_frames_hi,
 1810                qstats->total_unicast_packets_transmitted_hi,
 1811                afex_stats->tx_unicast_frames_lo,
 1812                qstats->total_unicast_packets_transmitted_lo);
 1813 
 1814         ADD_64(afex_stats->tx_broadcast_frames_hi,
 1815                qstats->total_broadcast_packets_transmitted_hi,
 1816                afex_stats->tx_broadcast_frames_lo,
 1817                qstats->total_broadcast_packets_transmitted_lo);
 1818 
 1819         ADD_64(afex_stats->tx_multicast_frames_hi,
 1820                qstats->total_multicast_packets_transmitted_hi,
 1821                afex_stats->tx_multicast_frames_lo,
 1822                qstats->total_multicast_packets_transmitted_lo);
 1823 
 1824         ADD_64(afex_stats->tx_frames_dropped_hi,
 1825                qstats->total_transmitted_dropped_packets_error_hi,
 1826                afex_stats->tx_frames_dropped_lo,
 1827                qstats->total_transmitted_dropped_packets_error_lo);
 1828     }
 1829 
 1830     /*
 1831      * If port stats are requested, add them to the PMF
 1832      * stats, as anyway they will be accumulated by the
 1833      * MCP before sent to the switch
 1834      */
 1835     if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
 1836         ADD_64(afex_stats->rx_frames_dropped_hi,
 1837                0,
 1838                afex_stats->rx_frames_dropped_lo,
 1839                estats->mac_filter_discard);
 1840         ADD_64(afex_stats->rx_frames_dropped_hi,
 1841                0,
 1842                afex_stats->rx_frames_dropped_lo,
 1843                estats->brb_truncate_discard);
 1844         ADD_64(afex_stats->rx_frames_discarded_hi,
 1845                0,
 1846                afex_stats->rx_frames_discarded_lo,
 1847                estats->mac_discard);
 1848     }
 1849 }
 1850 

Cache object: e867b94f518daad878dffd5efb8193b9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.