The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgbe/common/t4_hw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include "opt_inet.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/eventhandler.h>
   36 
   37 #include "common.h"
   38 #include "t4_regs.h"
   39 #include "t4_regs_values.h"
   40 #include "firmware/t4fw_interface.h"
   41 
   42 #undef msleep
   43 #define msleep(x) do { \
   44         if (cold) \
   45                 DELAY((x) * 1000); \
   46         else \
   47                 pause("t4hw", (x) * hz / 1000); \
   48 } while (0)
   49 
   50 /**
   51  *      t4_wait_op_done_val - wait until an operation is completed
   52  *      @adapter: the adapter performing the operation
   53  *      @reg: the register to check for completion
   54  *      @mask: a single-bit field within @reg that indicates completion
   55  *      @polarity: the value of the field when the operation is completed
   56  *      @attempts: number of check iterations
   57  *      @delay: delay in usecs between iterations
   58  *      @valp: where to store the value of the register at completion time
   59  *
   60  *      Wait until an operation is completed by checking a bit in a register
   61  *      up to @attempts times.  If @valp is not NULL the value of the register
   62  *      at the time it indicated completion is stored there.  Returns 0 if the
   63  *      operation completes and -EAGAIN otherwise.
   64  */
   65 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
   66                                int polarity, int attempts, int delay, u32 *valp)
   67 {
   68         while (1) {
   69                 u32 val = t4_read_reg(adapter, reg);
   70 
   71                 if (!!(val & mask) == polarity) {
   72                         if (valp)
   73                                 *valp = val;
   74                         return 0;
   75                 }
   76                 if (--attempts == 0)
   77                         return -EAGAIN;
   78                 if (delay)
   79                         udelay(delay);
   80         }
   81 }
   82 
   83 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
   84                                   int polarity, int attempts, int delay)
   85 {
   86         return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
   87                                    delay, NULL);
   88 }
   89 
   90 /**
   91  *      t4_set_reg_field - set a register field to a value
   92  *      @adapter: the adapter to program
   93  *      @addr: the register address
   94  *      @mask: specifies the portion of the register to modify
   95  *      @val: the new value for the register field
   96  *
   97  *      Sets a register field specified by the supplied mask to the
   98  *      given value.
   99  */
  100 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
  101                       u32 val)
  102 {
  103         u32 v = t4_read_reg(adapter, addr) & ~mask;
  104 
  105         t4_write_reg(adapter, addr, v | val);
  106         (void) t4_read_reg(adapter, addr);      /* flush */
  107 }
  108 
  109 /**
  110  *      t4_read_indirect - read indirectly addressed registers
  111  *      @adap: the adapter
  112  *      @addr_reg: register holding the indirect address
  113  *      @data_reg: register holding the value of the indirect register
  114  *      @vals: where the read register values are stored
  115  *      @nregs: how many indirect registers to read
  116  *      @start_idx: index of first indirect register to read
  117  *
  118  *      Reads registers that are accessed indirectly through an address/data
  119  *      register pair.
  120  */
  121 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
  122                              unsigned int data_reg, u32 *vals,
  123                              unsigned int nregs, unsigned int start_idx)
  124 {
  125         while (nregs--) {
  126                 t4_write_reg(adap, addr_reg, start_idx);
  127                 *vals++ = t4_read_reg(adap, data_reg);
  128                 start_idx++;
  129         }
  130 }
  131 
  132 /**
  133  *      t4_write_indirect - write indirectly addressed registers
  134  *      @adap: the adapter
  135  *      @addr_reg: register holding the indirect addresses
  136  *      @data_reg: register holding the value for the indirect registers
  137  *      @vals: values to write
  138  *      @nregs: how many indirect registers to write
  139  *      @start_idx: address of first indirect register to write
  140  *
  141  *      Writes a sequential block of registers that are accessed indirectly
  142  *      through an address/data register pair.
  143  */
  144 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
  145                        unsigned int data_reg, const u32 *vals,
  146                        unsigned int nregs, unsigned int start_idx)
  147 {
  148         while (nregs--) {
  149                 t4_write_reg(adap, addr_reg, start_idx++);
  150                 t4_write_reg(adap, data_reg, *vals++);
  151         }
  152 }
  153 
  154 /*
  155  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
  156  * mechanism.  This guarantees that we get the real value even if we're
  157  * operating within a Virtual Machine and the Hypervisor is trapping our
  158  * Configuration Space accesses.
  159  *
  160  * N.B. This routine should only be used as a last resort: the firmware uses
  161  *      the backdoor registers on a regular basis and we can end up
  162  *      conflicting with it's uses!
  163  */
  164 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
  165 {
  166         u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
  167         u32 val;
  168 
  169         if (chip_id(adap) <= CHELSIO_T5)
  170                 req |= F_ENABLE;
  171         else
  172                 req |= F_T6_ENABLE;
  173 
  174         if (is_t4(adap))
  175                 req |= F_LOCALCFG;
  176 
  177         t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
  178         val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
  179 
  180         /*
  181          * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
  182          * Configuration Space read.  (None of the other fields matter when
  183          * F_ENABLE is 0 so a simple register write is easier than a
  184          * read-modify-write via t4_set_reg_field().)
  185          */
  186         t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
  187 
  188         return val;
  189 }
  190 
  191 /*
  192  * t4_report_fw_error - report firmware error
  193  * @adap: the adapter
  194  *
  195  * The adapter firmware can indicate error conditions to the host.
  196  * If the firmware has indicated an error, print out the reason for
  197  * the firmware error.
  198  */
  199 void t4_report_fw_error(struct adapter *adap)
  200 {
  201         static const char *const reason[] = {
  202                 "Crash",                        /* PCIE_FW_EVAL_CRASH */
  203                 "During Device Preparation",    /* PCIE_FW_EVAL_PREP */
  204                 "During Device Configuration",  /* PCIE_FW_EVAL_CONF */
  205                 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
  206                 "Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
  207                 "Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
  208                 "Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
  209                 "Reserved",                     /* reserved */
  210         };
  211         u32 pcie_fw;
  212 
  213         pcie_fw = t4_read_reg(adap, A_PCIE_FW);
  214         if (pcie_fw & F_PCIE_FW_ERR) {
  215                 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n",
  216                     reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw);
  217         }
  218 }
  219 
  220 /*
  221  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  222  */
  223 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
  224                          u32 mbox_addr)
  225 {
  226         for ( ; nflit; nflit--, mbox_addr += 8)
  227                 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
  228 }
  229 
  230 /*
  231  * Handle a FW assertion reported in a mailbox.
  232  */
  233 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
  234 {
  235         CH_ALERT(adap,
  236                   "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
  237                   asrt->u.assert.filename_0_7,
  238                   be32_to_cpu(asrt->u.assert.line),
  239                   be32_to_cpu(asrt->u.assert.x),
  240                   be32_to_cpu(asrt->u.assert.y));
  241 }
  242 
  243 struct port_tx_state {
  244         uint64_t rx_pause;
  245         uint64_t tx_frames;
  246 };
  247 
  248 static void
  249 read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
  250 {
  251         uint32_t rx_pause_reg, tx_frames_reg;
  252 
  253         if (is_t4(sc)) {
  254                 tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
  255                 rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
  256         } else {
  257                 tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
  258                 rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
  259         }
  260 
  261         tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
  262         tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
  263 }
  264 
  265 static void
  266 read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
  267 {
  268         int i;
  269 
  270         for_each_port(sc, i)
  271                 read_tx_state_one(sc, i, &tx_state[i]);
  272 }
  273 
  274 static void
  275 check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
  276 {
  277         uint32_t port_ctl_reg;
  278         uint64_t tx_frames, rx_pause;
  279         int i;
  280 
  281         for_each_port(sc, i) {
  282                 rx_pause = tx_state[i].rx_pause;
  283                 tx_frames = tx_state[i].tx_frames;
  284                 read_tx_state_one(sc, i, &tx_state[i]); /* update */
  285 
  286                 if (is_t4(sc))
  287                         port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL);
  288                 else
  289                         port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL);
  290                 if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
  291                     rx_pause != tx_state[i].rx_pause &&
  292                     tx_frames == tx_state[i].tx_frames) {
  293                         t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
  294                         mdelay(1);
  295                         t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
  296                 }
  297         }
  298 }
  299 
  300 #define X_CIM_PF_NOACCESS 0xeeeeeeee
  301 /**
  302  *      t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
  303  *      @adap: the adapter
  304  *      @mbox: index of the mailbox to use
  305  *      @cmd: the command to write
  306  *      @size: command length in bytes
  307  *      @rpl: where to optionally store the reply
  308  *      @sleep_ok: if true we may sleep while awaiting command completion
  309  *      @timeout: time to wait for command to finish before timing out
  310  *              (negative implies @sleep_ok=false)
  311  *
  312  *      Sends the given command to FW through the selected mailbox and waits
  313  *      for the FW to execute the command.  If @rpl is not %NULL it is used to
  314  *      store the FW's reply to the command.  The command and its optional
  315  *      reply are of the same length.  Some FW commands like RESET and
  316  *      INITIALIZE can take a considerable amount of time to execute.
  317  *      @sleep_ok determines whether we may sleep while awaiting the response.
  318  *      If sleeping is allowed we use progressive backoff otherwise we spin.
  319  *      Note that passing in a negative @timeout is an alternate mechanism
  320  *      for specifying @sleep_ok=false.  This is useful when a higher level
  321  *      interface allows for specification of @timeout but not @sleep_ok ...
  322  *
  323  *      The return value is 0 on success or a negative errno on failure.  A
  324  *      failure can happen either because we are not able to execute the
  325  *      command or FW executes it but signals an error.  In the latter case
  326  *      the return value is the error code indicated by FW (negated).
  327  */
  328 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
  329                             int size, void *rpl, bool sleep_ok, int timeout)
  330 {
  331         /*
  332          * We delay in small increments at first in an effort to maintain
  333          * responsiveness for simple, fast executing commands but then back
  334          * off to larger delays to a maximum retry delay.
  335          */
  336         static const int delay[] = {
  337                 1, 1, 3, 5, 10, 10, 20, 50, 100
  338         };
  339         u32 v;
  340         u64 res;
  341         int i, ms, delay_idx, ret, next_tx_check;
  342         u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
  343         u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
  344         u32 ctl;
  345         __be64 cmd_rpl[MBOX_LEN/8];
  346         u32 pcie_fw;
  347         struct port_tx_state tx_state[MAX_NPORTS];
  348 
  349         if (adap->flags & CHK_MBOX_ACCESS)
  350                 ASSERT_SYNCHRONIZED_OP(adap);
  351 
  352         if (size <= 0 || (size & 15) || size > MBOX_LEN)
  353                 return -EINVAL;
  354 
  355         if (adap->flags & IS_VF) {
  356                 if (is_t6(adap))
  357                         data_reg = FW_T6VF_MBDATA_BASE_ADDR;
  358                 else
  359                         data_reg = FW_T4VF_MBDATA_BASE_ADDR;
  360                 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
  361         }
  362 
  363         /*
  364          * If we have a negative timeout, that implies that we can't sleep.
  365          */
  366         if (timeout < 0) {
  367                 sleep_ok = false;
  368                 timeout = -timeout;
  369         }
  370 
  371         /*
  372          * Attempt to gain access to the mailbox.
  373          */
  374         pcie_fw = 0;
  375         if (!(adap->flags & IS_VF)) {
  376                 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
  377                 if (pcie_fw & F_PCIE_FW_ERR)
  378                         goto failed;
  379         }
  380         for (i = 0; i < 4; i++) {
  381                 ctl = t4_read_reg(adap, ctl_reg);
  382                 v = G_MBOWNER(ctl);
  383                 if (v != X_MBOWNER_NONE)
  384                         break;
  385         }
  386 
  387         /*
  388          * If we were unable to gain access, report the error to our caller.
  389          */
  390         if (v != X_MBOWNER_PL) {
  391                 if (!(adap->flags & IS_VF)) {
  392                         pcie_fw = t4_read_reg(adap, A_PCIE_FW);
  393                         if (pcie_fw & F_PCIE_FW_ERR)
  394                                 goto failed;
  395                 }
  396                 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
  397                 return ret;
  398         }
  399 
  400         /*
  401          * If we gain ownership of the mailbox and there's a "valid" message
  402          * in it, this is likely an asynchronous error message from the
  403          * firmware.  So we'll report that and then proceed on with attempting
  404          * to issue our own command ... which may well fail if the error
  405          * presaged the firmware crashing ...
  406          */
  407         if (ctl & F_MBMSGVALID) {
  408                 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true);
  409         }
  410 
  411         /*
  412          * Copy in the new mailbox command and send it on its way ...
  413          */
  414         memset(cmd_rpl, 0, sizeof(cmd_rpl));
  415         memcpy(cmd_rpl, cmd, size);
  416         CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false);
  417         for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++)
  418                 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i]));
  419 
  420         if (adap->flags & IS_VF) {
  421                 /*
  422                  * For the VFs, the Mailbox Data "registers" are
  423                  * actually backed by T4's "MA" interface rather than
  424                  * PL Registers (as is the case for the PFs).  Because
  425                  * these are in different coherency domains, the write
  426                  * to the VF's PL-register-backed Mailbox Control can
  427                  * race in front of the writes to the MA-backed VF
  428                  * Mailbox Data "registers".  So we need to do a
  429                  * read-back on at least one byte of the VF Mailbox
  430                  * Data registers before doing the write to the VF
  431                  * Mailbox Control register.
  432                  */
  433                 t4_read_reg(adap, data_reg);
  434         }
  435 
  436         t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
  437         read_tx_state(adap, &tx_state[0]);      /* also flushes the write_reg */
  438         next_tx_check = 1000;
  439         delay_idx = 0;
  440         ms = delay[0];
  441 
  442         /*
  443          * Loop waiting for the reply; bail out if we time out or the firmware
  444          * reports an error.
  445          */
  446         for (i = 0; i < timeout; i += ms) {
  447                 if (!(adap->flags & IS_VF)) {
  448                         pcie_fw = t4_read_reg(adap, A_PCIE_FW);
  449                         if (pcie_fw & F_PCIE_FW_ERR)
  450                                 break;
  451                 }
  452 
  453                 if (i >= next_tx_check) {
  454                         check_tx_state(adap, &tx_state[0]);
  455                         next_tx_check = i + 1000;
  456                 }
  457 
  458                 if (sleep_ok) {
  459                         ms = delay[delay_idx];  /* last element may repeat */
  460                         if (delay_idx < ARRAY_SIZE(delay) - 1)
  461                                 delay_idx++;
  462                         msleep(ms);
  463                 } else {
  464                         mdelay(ms);
  465                 }
  466 
  467                 v = t4_read_reg(adap, ctl_reg);
  468                 if (v == X_CIM_PF_NOACCESS)
  469                         continue;
  470                 if (G_MBOWNER(v) == X_MBOWNER_PL) {
  471                         if (!(v & F_MBMSGVALID)) {
  472                                 t4_write_reg(adap, ctl_reg,
  473                                              V_MBOWNER(X_MBOWNER_NONE));
  474                                 continue;
  475                         }
  476 
  477                         /*
  478                          * Retrieve the command reply and release the mailbox.
  479                          */
  480                         get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
  481                         CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false);
  482                         t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
  483 
  484                         res = be64_to_cpu(cmd_rpl[0]);
  485                         if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
  486                                 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
  487                                 res = V_FW_CMD_RETVAL(EIO);
  488                         } else if (rpl)
  489                                 memcpy(rpl, cmd_rpl, size);
  490                         return -G_FW_CMD_RETVAL((int)res);
  491                 }
  492         }
  493 
  494         /*
  495          * We timed out waiting for a reply to our mailbox command.  Report
  496          * the error and also check to see if the firmware reported any
  497          * errors ...
  498          */
  499         CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n",
  500             *(const u8 *)cmd, mbox, pcie_fw);
  501         CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true);
  502         CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true);
  503 failed:
  504         adap->flags &= ~FW_OK;
  505         ret = pcie_fw & F_PCIE_FW_ERR ? -ENXIO : -ETIMEDOUT;
  506         t4_fatal_err(adap, true);
  507         return ret;
  508 }
  509 
  510 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
  511                     void *rpl, bool sleep_ok)
  512 {
  513                 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
  514                                                sleep_ok, FW_CMD_MAX_TIMEOUT);
  515 
  516 }
  517 
  518 static int t4_edc_err_read(struct adapter *adap, int idx)
  519 {
  520         u32 edc_ecc_err_addr_reg;
  521         u32 edc_bist_status_rdata_reg;
  522 
  523         if (is_t4(adap)) {
  524                 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
  525                 return 0;
  526         }
  527         if (idx != MEM_EDC0 && idx != MEM_EDC1) {
  528                 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
  529                 return 0;
  530         }
  531 
  532         edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
  533         edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
  534 
  535         CH_WARN(adap,
  536                 "edc%d err addr 0x%x: 0x%x.\n",
  537                 idx, edc_ecc_err_addr_reg,
  538                 t4_read_reg(adap, edc_ecc_err_addr_reg));
  539         CH_WARN(adap,
  540                 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
  541                 edc_bist_status_rdata_reg,
  542                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
  543                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
  544                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
  545                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
  546                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
  547                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
  548                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
  549                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
  550                 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
  551 
  552         return 0;
  553 }
  554 
  555 /**
  556  *      t4_mc_read - read from MC through backdoor accesses
  557  *      @adap: the adapter
  558  *      @idx: which MC to access
  559  *      @addr: address of first byte requested
  560  *      @data: 64 bytes of data containing the requested address
  561  *      @ecc: where to store the corresponding 64-bit ECC word
  562  *
  563  *      Read 64 bytes of data from MC starting at a 64-byte-aligned address
  564  *      that covers the requested address @addr.  If @parity is not %NULL it
  565  *      is assigned the 64-bit ECC word for the read data.
  566  */
  567 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  568 {
  569         int i;
  570         u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
  571         u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
  572 
  573         if (is_t4(adap)) {
  574                 mc_bist_cmd_reg = A_MC_BIST_CMD;
  575                 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
  576                 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
  577                 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
  578                 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
  579         } else {
  580                 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
  581                 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
  582                 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
  583                 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
  584                                                   idx);
  585                 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
  586                                                   idx);
  587         }
  588 
  589         if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
  590                 return -EBUSY;
  591         t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
  592         t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
  593         t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
  594         t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
  595                      F_START_BIST | V_BIST_CMD_GAP(1));
  596         i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
  597         if (i)
  598                 return i;
  599 
  600 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
  601 
  602         for (i = 15; i >= 0; i--)
  603                 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
  604         if (ecc)
  605                 *ecc = t4_read_reg64(adap, MC_DATA(16));
  606 #undef MC_DATA
  607         return 0;
  608 }
  609 
  610 /**
  611  *      t4_edc_read - read from EDC through backdoor accesses
  612  *      @adap: the adapter
  613  *      @idx: which EDC to access
  614  *      @addr: address of first byte requested
  615  *      @data: 64 bytes of data containing the requested address
  616  *      @ecc: where to store the corresponding 64-bit ECC word
  617  *
  618  *      Read 64 bytes of data from EDC starting at a 64-byte-aligned address
  619  *      that covers the requested address @addr.  If @parity is not %NULL it
  620  *      is assigned the 64-bit ECC word for the read data.
  621  */
  622 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  623 {
  624         int i;
  625         u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
  626         u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
  627 
  628         if (is_t4(adap)) {
  629                 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
  630                 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
  631                 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
  632                 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
  633                                                     idx);
  634                 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
  635                                                     idx);
  636         } else {
  637 /*
  638  * These macro are missing in t4_regs.h file.
  639  * Added temporarily for testing.
  640  */
  641 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
  642 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
  643                 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
  644                 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
  645                 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
  646                 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
  647                                                     idx);
  648                 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
  649                                                     idx);
  650 #undef EDC_REG_T5
  651 #undef EDC_STRIDE_T5
  652         }
  653 
  654         if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
  655                 return -EBUSY;
  656         t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
  657         t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
  658         t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
  659         t4_write_reg(adap, edc_bist_cmd_reg,
  660                      V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
  661         i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
  662         if (i)
  663                 return i;
  664 
  665 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
  666 
  667         for (i = 15; i >= 0; i--)
  668                 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
  669         if (ecc)
  670                 *ecc = t4_read_reg64(adap, EDC_DATA(16));
  671 #undef EDC_DATA
  672         return 0;
  673 }
  674 
  675 /**
  676  *      t4_mem_read - read EDC 0, EDC 1 or MC into buffer
  677  *      @adap: the adapter
  678  *      @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  679  *      @addr: address within indicated memory type
  680  *      @len: amount of memory to read
  681  *      @buf: host memory buffer
  682  *
  683  *      Reads an [almost] arbitrary memory region in the firmware: the
  684  *      firmware memory address, length and host buffer must be aligned on
  685  *      32-bit boudaries.  The memory is returned as a raw byte sequence from
  686  *      the firmware's memory.  If this memory contains data structures which
  687  *      contain multi-byte integers, it's the callers responsibility to
  688  *      perform appropriate byte order conversions.
  689  */
  690 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
  691                 __be32 *buf)
  692 {
  693         u32 pos, start, end, offset;
  694         int ret;
  695 
  696         /*
  697          * Argument sanity checks ...
  698          */
  699         if ((addr & 0x3) || (len & 0x3))
  700                 return -EINVAL;
  701 
  702         /*
  703          * The underlaying EDC/MC read routines read 64 bytes at a time so we
  704          * need to round down the start and round up the end.  We'll start
  705          * copying out of the first line at (addr - start) a word at a time.
  706          */
  707         start = rounddown2(addr, 64);
  708         end = roundup2(addr + len, 64);
  709         offset = (addr - start)/sizeof(__be32);
  710 
  711         for (pos = start; pos < end; pos += 64, offset = 0) {
  712                 __be32 data[16];
  713 
  714                 /*
  715                  * Read the chip's memory block and bail if there's an error.
  716                  */
  717                 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
  718                         ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
  719                 else
  720                         ret = t4_edc_read(adap, mtype, pos, data, NULL);
  721                 if (ret)
  722                         return ret;
  723 
  724                 /*
  725                  * Copy the data into the caller's memory buffer.
  726                  */
  727                 while (offset < 16 && len > 0) {
  728                         *buf++ = data[offset++];
  729                         len -= sizeof(__be32);
  730                 }
  731         }
  732 
  733         return 0;
  734 }
  735 
  736 /*
  737  * Return the specified PCI-E Configuration Space register from our Physical
  738  * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
  739  * since we prefer to let the firmware own all of these registers, but if that
  740  * fails we go for it directly ourselves.
  741  */
  742 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
  743 {
  744 
  745         /*
  746          * If fw_attach != 0, construct and send the Firmware LDST Command to
  747          * retrieve the specified PCI-E Configuration Space register.
  748          */
  749         if (drv_fw_attach != 0) {
  750                 struct fw_ldst_cmd ldst_cmd;
  751                 int ret;
  752 
  753                 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
  754                 ldst_cmd.op_to_addrspace =
  755                         cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
  756                                     F_FW_CMD_REQUEST |
  757                                     F_FW_CMD_READ |
  758                                     V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
  759                 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
  760                 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
  761                 ldst_cmd.u.pcie.ctrl_to_fn =
  762                         (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
  763                 ldst_cmd.u.pcie.r = reg;
  764 
  765                 /*
  766                  * If the LDST Command succeeds, return the result, otherwise
  767                  * fall through to reading it directly ourselves ...
  768                  */
  769                 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
  770                                  &ldst_cmd);
  771                 if (ret == 0)
  772                         return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
  773 
  774                 CH_WARN(adap, "Firmware failed to return "
  775                         "Configuration Space register %d, err = %d\n",
  776                         reg, -ret);
  777         }
  778 
  779         /*
  780          * Read the desired Configuration Space register via the PCI-E
  781          * Backdoor mechanism.
  782          */
  783         return t4_hw_pci_read_cfg4(adap, reg);
  784 }
  785 
  786 /**
  787  *      t4_get_regs_len - return the size of the chips register set
  788  *      @adapter: the adapter
  789  *
  790  *      Returns the size of the chip's BAR0 register space.
  791  */
  792 unsigned int t4_get_regs_len(struct adapter *adapter)
  793 {
  794         unsigned int chip_version = chip_id(adapter);
  795 
  796         switch (chip_version) {
  797         case CHELSIO_T4:
  798                 if (adapter->flags & IS_VF)
  799                         return FW_T4VF_REGMAP_SIZE;
  800                 return T4_REGMAP_SIZE;
  801 
  802         case CHELSIO_T5:
  803         case CHELSIO_T6:
  804                 if (adapter->flags & IS_VF)
  805                         return FW_T4VF_REGMAP_SIZE;
  806                 return T5_REGMAP_SIZE;
  807         }
  808 
  809         CH_ERR(adapter,
  810                 "Unsupported chip version %d\n", chip_version);
  811         return 0;
  812 }
  813 
  814 /**
  815  *      t4_get_regs - read chip registers into provided buffer
  816  *      @adap: the adapter
  817  *      @buf: register buffer
  818  *      @buf_size: size (in bytes) of register buffer
  819  *
  820  *      If the provided register buffer isn't large enough for the chip's
  821  *      full register range, the register dump will be truncated to the
  822  *      register buffer's size.
  823  */
  824 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
  825 {
  826         static const unsigned int t4_reg_ranges[] = {
  827                 0x1008, 0x1108,
  828                 0x1180, 0x1184,
  829                 0x1190, 0x1194,
  830                 0x11a0, 0x11a4,
  831                 0x11b0, 0x11b4,
  832                 0x11fc, 0x123c,
  833                 0x1300, 0x173c,
  834                 0x1800, 0x18fc,
  835                 0x3000, 0x30d8,
  836                 0x30e0, 0x30e4,
  837                 0x30ec, 0x5910,
  838                 0x5920, 0x5924,
  839                 0x5960, 0x5960,
  840                 0x5968, 0x5968,
  841                 0x5970, 0x5970,
  842                 0x5978, 0x5978,
  843                 0x5980, 0x5980,
  844                 0x5988, 0x5988,
  845                 0x5990, 0x5990,
  846                 0x5998, 0x5998,
  847                 0x59a0, 0x59d4,
  848                 0x5a00, 0x5ae0,
  849                 0x5ae8, 0x5ae8,
  850                 0x5af0, 0x5af0,
  851                 0x5af8, 0x5af8,
  852                 0x6000, 0x6098,
  853                 0x6100, 0x6150,
  854                 0x6200, 0x6208,
  855                 0x6240, 0x6248,
  856                 0x6280, 0x62b0,
  857                 0x62c0, 0x6338,
  858                 0x6370, 0x638c,
  859                 0x6400, 0x643c,
  860                 0x6500, 0x6524,
  861                 0x6a00, 0x6a04,
  862                 0x6a14, 0x6a38,
  863                 0x6a60, 0x6a70,
  864                 0x6a78, 0x6a78,
  865                 0x6b00, 0x6b0c,
  866                 0x6b1c, 0x6b84,
  867                 0x6bf0, 0x6bf8,
  868                 0x6c00, 0x6c0c,
  869                 0x6c1c, 0x6c84,
  870                 0x6cf0, 0x6cf8,
  871                 0x6d00, 0x6d0c,
  872                 0x6d1c, 0x6d84,
  873                 0x6df0, 0x6df8,
  874                 0x6e00, 0x6e0c,
  875                 0x6e1c, 0x6e84,
  876                 0x6ef0, 0x6ef8,
  877                 0x6f00, 0x6f0c,
  878                 0x6f1c, 0x6f84,
  879                 0x6ff0, 0x6ff8,
  880                 0x7000, 0x700c,
  881                 0x701c, 0x7084,
  882                 0x70f0, 0x70f8,
  883                 0x7100, 0x710c,
  884                 0x711c, 0x7184,
  885                 0x71f0, 0x71f8,
  886                 0x7200, 0x720c,
  887                 0x721c, 0x7284,
  888                 0x72f0, 0x72f8,
  889                 0x7300, 0x730c,
  890                 0x731c, 0x7384,
  891                 0x73f0, 0x73f8,
  892                 0x7400, 0x7450,
  893                 0x7500, 0x7530,
  894                 0x7600, 0x760c,
  895                 0x7614, 0x761c,
  896                 0x7680, 0x76cc,
  897                 0x7700, 0x7798,
  898                 0x77c0, 0x77fc,
  899                 0x7900, 0x79fc,
  900                 0x7b00, 0x7b58,
  901                 0x7b60, 0x7b84,
  902                 0x7b8c, 0x7c38,
  903                 0x7d00, 0x7d38,
  904                 0x7d40, 0x7d80,
  905                 0x7d8c, 0x7ddc,
  906                 0x7de4, 0x7e04,
  907                 0x7e10, 0x7e1c,
  908                 0x7e24, 0x7e38,
  909                 0x7e40, 0x7e44,
  910                 0x7e4c, 0x7e78,
  911                 0x7e80, 0x7ea4,
  912                 0x7eac, 0x7edc,
  913                 0x7ee8, 0x7efc,
  914                 0x8dc0, 0x8e04,
  915                 0x8e10, 0x8e1c,
  916                 0x8e30, 0x8e78,
  917                 0x8ea0, 0x8eb8,
  918                 0x8ec0, 0x8f6c,
  919                 0x8fc0, 0x9008,
  920                 0x9010, 0x9058,
  921                 0x9060, 0x9060,
  922                 0x9068, 0x9074,
  923                 0x90fc, 0x90fc,
  924                 0x9400, 0x9408,
  925                 0x9410, 0x9458,
  926                 0x9600, 0x9600,
  927                 0x9608, 0x9638,
  928                 0x9640, 0x96bc,
  929                 0x9800, 0x9808,
  930                 0x9820, 0x983c,
  931                 0x9850, 0x9864,
  932                 0x9c00, 0x9c6c,
  933                 0x9c80, 0x9cec,
  934                 0x9d00, 0x9d6c,
  935                 0x9d80, 0x9dec,
  936                 0x9e00, 0x9e6c,
  937                 0x9e80, 0x9eec,
  938                 0x9f00, 0x9f6c,
  939                 0x9f80, 0x9fec,
  940                 0xd004, 0xd004,
  941                 0xd010, 0xd03c,
  942                 0xdfc0, 0xdfe0,
  943                 0xe000, 0xea7c,
  944                 0xf000, 0x11110,
  945                 0x11118, 0x11190,
  946                 0x19040, 0x1906c,
  947                 0x19078, 0x19080,
  948                 0x1908c, 0x190e4,
  949                 0x190f0, 0x190f8,
  950                 0x19100, 0x19110,
  951                 0x19120, 0x19124,
  952                 0x19150, 0x19194,
  953                 0x1919c, 0x191b0,
  954                 0x191d0, 0x191e8,
  955                 0x19238, 0x1924c,
  956                 0x193f8, 0x1943c,
  957                 0x1944c, 0x19474,
  958                 0x19490, 0x194e0,
  959                 0x194f0, 0x194f8,
  960                 0x19800, 0x19c08,
  961                 0x19c10, 0x19c90,
  962                 0x19ca0, 0x19ce4,
  963                 0x19cf0, 0x19d40,
  964                 0x19d50, 0x19d94,
  965                 0x19da0, 0x19de8,
  966                 0x19df0, 0x19e40,
  967                 0x19e50, 0x19e90,
  968                 0x19ea0, 0x19f4c,
  969                 0x1a000, 0x1a004,
  970                 0x1a010, 0x1a06c,
  971                 0x1a0b0, 0x1a0e4,
  972                 0x1a0ec, 0x1a0f4,
  973                 0x1a100, 0x1a108,
  974                 0x1a114, 0x1a120,
  975                 0x1a128, 0x1a130,
  976                 0x1a138, 0x1a138,
  977                 0x1a190, 0x1a1c4,
  978                 0x1a1fc, 0x1a1fc,
  979                 0x1e040, 0x1e04c,
  980                 0x1e284, 0x1e28c,
  981                 0x1e2c0, 0x1e2c0,
  982                 0x1e2e0, 0x1e2e0,
  983                 0x1e300, 0x1e384,
  984                 0x1e3c0, 0x1e3c8,
  985                 0x1e440, 0x1e44c,
  986                 0x1e684, 0x1e68c,
  987                 0x1e6c0, 0x1e6c0,
  988                 0x1e6e0, 0x1e6e0,
  989                 0x1e700, 0x1e784,
  990                 0x1e7c0, 0x1e7c8,
  991                 0x1e840, 0x1e84c,
  992                 0x1ea84, 0x1ea8c,
  993                 0x1eac0, 0x1eac0,
  994                 0x1eae0, 0x1eae0,
  995                 0x1eb00, 0x1eb84,
  996                 0x1ebc0, 0x1ebc8,
  997                 0x1ec40, 0x1ec4c,
  998                 0x1ee84, 0x1ee8c,
  999                 0x1eec0, 0x1eec0,
 1000                 0x1eee0, 0x1eee0,
 1001                 0x1ef00, 0x1ef84,
 1002                 0x1efc0, 0x1efc8,
 1003                 0x1f040, 0x1f04c,
 1004                 0x1f284, 0x1f28c,
 1005                 0x1f2c0, 0x1f2c0,
 1006                 0x1f2e0, 0x1f2e0,
 1007                 0x1f300, 0x1f384,
 1008                 0x1f3c0, 0x1f3c8,
 1009                 0x1f440, 0x1f44c,
 1010                 0x1f684, 0x1f68c,
 1011                 0x1f6c0, 0x1f6c0,
 1012                 0x1f6e0, 0x1f6e0,
 1013                 0x1f700, 0x1f784,
 1014                 0x1f7c0, 0x1f7c8,
 1015                 0x1f840, 0x1f84c,
 1016                 0x1fa84, 0x1fa8c,
 1017                 0x1fac0, 0x1fac0,
 1018                 0x1fae0, 0x1fae0,
 1019                 0x1fb00, 0x1fb84,
 1020                 0x1fbc0, 0x1fbc8,
 1021                 0x1fc40, 0x1fc4c,
 1022                 0x1fe84, 0x1fe8c,
 1023                 0x1fec0, 0x1fec0,
 1024                 0x1fee0, 0x1fee0,
 1025                 0x1ff00, 0x1ff84,
 1026                 0x1ffc0, 0x1ffc8,
 1027                 0x20000, 0x2002c,
 1028                 0x20100, 0x2013c,
 1029                 0x20190, 0x201a0,
 1030                 0x201a8, 0x201b8,
 1031                 0x201c4, 0x201c8,
 1032                 0x20200, 0x20318,
 1033                 0x20400, 0x204b4,
 1034                 0x204c0, 0x20528,
 1035                 0x20540, 0x20614,
 1036                 0x21000, 0x21040,
 1037                 0x2104c, 0x21060,
 1038                 0x210c0, 0x210ec,
 1039                 0x21200, 0x21268,
 1040                 0x21270, 0x21284,
 1041                 0x212fc, 0x21388,
 1042                 0x21400, 0x21404,
 1043                 0x21500, 0x21500,
 1044                 0x21510, 0x21518,
 1045                 0x2152c, 0x21530,
 1046                 0x2153c, 0x2153c,
 1047                 0x21550, 0x21554,
 1048                 0x21600, 0x21600,
 1049                 0x21608, 0x2161c,
 1050                 0x21624, 0x21628,
 1051                 0x21630, 0x21634,
 1052                 0x2163c, 0x2163c,
 1053                 0x21700, 0x2171c,
 1054                 0x21780, 0x2178c,
 1055                 0x21800, 0x21818,
 1056                 0x21820, 0x21828,
 1057                 0x21830, 0x21848,
 1058                 0x21850, 0x21854,
 1059                 0x21860, 0x21868,
 1060                 0x21870, 0x21870,
 1061                 0x21878, 0x21898,
 1062                 0x218a0, 0x218a8,
 1063                 0x218b0, 0x218c8,
 1064                 0x218d0, 0x218d4,
 1065                 0x218e0, 0x218e8,
 1066                 0x218f0, 0x218f0,
 1067                 0x218f8, 0x21a18,
 1068                 0x21a20, 0x21a28,
 1069                 0x21a30, 0x21a48,
 1070                 0x21a50, 0x21a54,
 1071                 0x21a60, 0x21a68,
 1072                 0x21a70, 0x21a70,
 1073                 0x21a78, 0x21a98,
 1074                 0x21aa0, 0x21aa8,
 1075                 0x21ab0, 0x21ac8,
 1076                 0x21ad0, 0x21ad4,
 1077                 0x21ae0, 0x21ae8,
 1078                 0x21af0, 0x21af0,
 1079                 0x21af8, 0x21c18,
 1080                 0x21c20, 0x21c20,
 1081                 0x21c28, 0x21c30,
 1082                 0x21c38, 0x21c38,
 1083                 0x21c80, 0x21c98,
 1084                 0x21ca0, 0x21ca8,
 1085                 0x21cb0, 0x21cc8,
 1086                 0x21cd0, 0x21cd4,
 1087                 0x21ce0, 0x21ce8,
 1088                 0x21cf0, 0x21cf0,
 1089                 0x21cf8, 0x21d7c,
 1090                 0x21e00, 0x21e04,
 1091                 0x22000, 0x2202c,
 1092                 0x22100, 0x2213c,
 1093                 0x22190, 0x221a0,
 1094                 0x221a8, 0x221b8,
 1095                 0x221c4, 0x221c8,
 1096                 0x22200, 0x22318,
 1097                 0x22400, 0x224b4,
 1098                 0x224c0, 0x22528,
 1099                 0x22540, 0x22614,
 1100                 0x23000, 0x23040,
 1101                 0x2304c, 0x23060,
 1102                 0x230c0, 0x230ec,
 1103                 0x23200, 0x23268,
 1104                 0x23270, 0x23284,
 1105                 0x232fc, 0x23388,
 1106                 0x23400, 0x23404,
 1107                 0x23500, 0x23500,
 1108                 0x23510, 0x23518,
 1109                 0x2352c, 0x23530,
 1110                 0x2353c, 0x2353c,
 1111                 0x23550, 0x23554,
 1112                 0x23600, 0x23600,
 1113                 0x23608, 0x2361c,
 1114                 0x23624, 0x23628,
 1115                 0x23630, 0x23634,
 1116                 0x2363c, 0x2363c,
 1117                 0x23700, 0x2371c,
 1118                 0x23780, 0x2378c,
 1119                 0x23800, 0x23818,
 1120                 0x23820, 0x23828,
 1121                 0x23830, 0x23848,
 1122                 0x23850, 0x23854,
 1123                 0x23860, 0x23868,
 1124                 0x23870, 0x23870,
 1125                 0x23878, 0x23898,
 1126                 0x238a0, 0x238a8,
 1127                 0x238b0, 0x238c8,
 1128                 0x238d0, 0x238d4,
 1129                 0x238e0, 0x238e8,
 1130                 0x238f0, 0x238f0,
 1131                 0x238f8, 0x23a18,
 1132                 0x23a20, 0x23a28,
 1133                 0x23a30, 0x23a48,
 1134                 0x23a50, 0x23a54,
 1135                 0x23a60, 0x23a68,
 1136                 0x23a70, 0x23a70,
 1137                 0x23a78, 0x23a98,
 1138                 0x23aa0, 0x23aa8,
 1139                 0x23ab0, 0x23ac8,
 1140                 0x23ad0, 0x23ad4,
 1141                 0x23ae0, 0x23ae8,
 1142                 0x23af0, 0x23af0,
 1143                 0x23af8, 0x23c18,
 1144                 0x23c20, 0x23c20,
 1145                 0x23c28, 0x23c30,
 1146                 0x23c38, 0x23c38,
 1147                 0x23c80, 0x23c98,
 1148                 0x23ca0, 0x23ca8,
 1149                 0x23cb0, 0x23cc8,
 1150                 0x23cd0, 0x23cd4,
 1151                 0x23ce0, 0x23ce8,
 1152                 0x23cf0, 0x23cf0,
 1153                 0x23cf8, 0x23d7c,
 1154                 0x23e00, 0x23e04,
 1155                 0x24000, 0x2402c,
 1156                 0x24100, 0x2413c,
 1157                 0x24190, 0x241a0,
 1158                 0x241a8, 0x241b8,
 1159                 0x241c4, 0x241c8,
 1160                 0x24200, 0x24318,
 1161                 0x24400, 0x244b4,
 1162                 0x244c0, 0x24528,
 1163                 0x24540, 0x24614,
 1164                 0x25000, 0x25040,
 1165                 0x2504c, 0x25060,
 1166                 0x250c0, 0x250ec,
 1167                 0x25200, 0x25268,
 1168                 0x25270, 0x25284,
 1169                 0x252fc, 0x25388,
 1170                 0x25400, 0x25404,
 1171                 0x25500, 0x25500,
 1172                 0x25510, 0x25518,
 1173                 0x2552c, 0x25530,
 1174                 0x2553c, 0x2553c,
 1175                 0x25550, 0x25554,
 1176                 0x25600, 0x25600,
 1177                 0x25608, 0x2561c,
 1178                 0x25624, 0x25628,
 1179                 0x25630, 0x25634,
 1180                 0x2563c, 0x2563c,
 1181                 0x25700, 0x2571c,
 1182                 0x25780, 0x2578c,
 1183                 0x25800, 0x25818,
 1184                 0x25820, 0x25828,
 1185                 0x25830, 0x25848,
 1186                 0x25850, 0x25854,
 1187                 0x25860, 0x25868,
 1188                 0x25870, 0x25870,
 1189                 0x25878, 0x25898,
 1190                 0x258a0, 0x258a8,
 1191                 0x258b0, 0x258c8,
 1192                 0x258d0, 0x258d4,
 1193                 0x258e0, 0x258e8,
 1194                 0x258f0, 0x258f0,
 1195                 0x258f8, 0x25a18,
 1196                 0x25a20, 0x25a28,
 1197                 0x25a30, 0x25a48,
 1198                 0x25a50, 0x25a54,
 1199                 0x25a60, 0x25a68,
 1200                 0x25a70, 0x25a70,
 1201                 0x25a78, 0x25a98,
 1202                 0x25aa0, 0x25aa8,
 1203                 0x25ab0, 0x25ac8,
 1204                 0x25ad0, 0x25ad4,
 1205                 0x25ae0, 0x25ae8,
 1206                 0x25af0, 0x25af0,
 1207                 0x25af8, 0x25c18,
 1208                 0x25c20, 0x25c20,
 1209                 0x25c28, 0x25c30,
 1210                 0x25c38, 0x25c38,
 1211                 0x25c80, 0x25c98,
 1212                 0x25ca0, 0x25ca8,
 1213                 0x25cb0, 0x25cc8,
 1214                 0x25cd0, 0x25cd4,
 1215                 0x25ce0, 0x25ce8,
 1216                 0x25cf0, 0x25cf0,
 1217                 0x25cf8, 0x25d7c,
 1218                 0x25e00, 0x25e04,
 1219                 0x26000, 0x2602c,
 1220                 0x26100, 0x2613c,
 1221                 0x26190, 0x261a0,
 1222                 0x261a8, 0x261b8,
 1223                 0x261c4, 0x261c8,
 1224                 0x26200, 0x26318,
 1225                 0x26400, 0x264b4,
 1226                 0x264c0, 0x26528,
 1227                 0x26540, 0x26614,
 1228                 0x27000, 0x27040,
 1229                 0x2704c, 0x27060,
 1230                 0x270c0, 0x270ec,
 1231                 0x27200, 0x27268,
 1232                 0x27270, 0x27284,
 1233                 0x272fc, 0x27388,
 1234                 0x27400, 0x27404,
 1235                 0x27500, 0x27500,
 1236                 0x27510, 0x27518,
 1237                 0x2752c, 0x27530,
 1238                 0x2753c, 0x2753c,
 1239                 0x27550, 0x27554,
 1240                 0x27600, 0x27600,
 1241                 0x27608, 0x2761c,
 1242                 0x27624, 0x27628,
 1243                 0x27630, 0x27634,
 1244                 0x2763c, 0x2763c,
 1245                 0x27700, 0x2771c,
 1246                 0x27780, 0x2778c,
 1247                 0x27800, 0x27818,
 1248                 0x27820, 0x27828,
 1249                 0x27830, 0x27848,
 1250                 0x27850, 0x27854,
 1251                 0x27860, 0x27868,
 1252                 0x27870, 0x27870,
 1253                 0x27878, 0x27898,
 1254                 0x278a0, 0x278a8,
 1255                 0x278b0, 0x278c8,
 1256                 0x278d0, 0x278d4,
 1257                 0x278e0, 0x278e8,
 1258                 0x278f0, 0x278f0,
 1259                 0x278f8, 0x27a18,
 1260                 0x27a20, 0x27a28,
 1261                 0x27a30, 0x27a48,
 1262                 0x27a50, 0x27a54,
 1263                 0x27a60, 0x27a68,
 1264                 0x27a70, 0x27a70,
 1265                 0x27a78, 0x27a98,
 1266                 0x27aa0, 0x27aa8,
 1267                 0x27ab0, 0x27ac8,
 1268                 0x27ad0, 0x27ad4,
 1269                 0x27ae0, 0x27ae8,
 1270                 0x27af0, 0x27af0,
 1271                 0x27af8, 0x27c18,
 1272                 0x27c20, 0x27c20,
 1273                 0x27c28, 0x27c30,
 1274                 0x27c38, 0x27c38,
 1275                 0x27c80, 0x27c98,
 1276                 0x27ca0, 0x27ca8,
 1277                 0x27cb0, 0x27cc8,
 1278                 0x27cd0, 0x27cd4,
 1279                 0x27ce0, 0x27ce8,
 1280                 0x27cf0, 0x27cf0,
 1281                 0x27cf8, 0x27d7c,
 1282                 0x27e00, 0x27e04,
 1283         };
 1284 
 1285         static const unsigned int t4vf_reg_ranges[] = {
 1286                 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
 1287                 VF_MPS_REG(A_MPS_VF_CTL),
 1288                 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
 1289                 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
 1290                 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
 1291                 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
 1292                 FW_T4VF_MBDATA_BASE_ADDR,
 1293                 FW_T4VF_MBDATA_BASE_ADDR +
 1294                 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
 1295         };
 1296 
 1297         static const unsigned int t5_reg_ranges[] = {
 1298                 0x1008, 0x10c0,
 1299                 0x10cc, 0x10f8,
 1300                 0x1100, 0x1100,
 1301                 0x110c, 0x1148,
 1302                 0x1180, 0x1184,
 1303                 0x1190, 0x1194,
 1304                 0x11a0, 0x11a4,
 1305                 0x11b0, 0x11b4,
 1306                 0x11fc, 0x123c,
 1307                 0x1280, 0x173c,
 1308                 0x1800, 0x18fc,
 1309                 0x3000, 0x3028,
 1310                 0x3060, 0x30b0,
 1311                 0x30b8, 0x30d8,
 1312                 0x30e0, 0x30fc,
 1313                 0x3140, 0x357c,
 1314                 0x35a8, 0x35cc,
 1315                 0x35ec, 0x35ec,
 1316                 0x3600, 0x5624,
 1317                 0x56cc, 0x56ec,
 1318                 0x56f4, 0x5720,
 1319                 0x5728, 0x575c,
 1320                 0x580c, 0x5814,
 1321                 0x5890, 0x589c,
 1322                 0x58a4, 0x58ac,
 1323                 0x58b8, 0x58bc,
 1324                 0x5940, 0x59c8,
 1325                 0x59d0, 0x59dc,
 1326                 0x59fc, 0x5a18,
 1327                 0x5a60, 0x5a70,
 1328                 0x5a80, 0x5a9c,
 1329                 0x5b94, 0x5bfc,
 1330                 0x6000, 0x6020,
 1331                 0x6028, 0x6040,
 1332                 0x6058, 0x609c,
 1333                 0x60a8, 0x614c,
 1334                 0x7700, 0x7798,
 1335                 0x77c0, 0x78fc,
 1336                 0x7b00, 0x7b58,
 1337                 0x7b60, 0x7b84,
 1338                 0x7b8c, 0x7c54,
 1339                 0x7d00, 0x7d38,
 1340                 0x7d40, 0x7d80,
 1341                 0x7d8c, 0x7ddc,
 1342                 0x7de4, 0x7e04,
 1343                 0x7e10, 0x7e1c,
 1344                 0x7e24, 0x7e38,
 1345                 0x7e40, 0x7e44,
 1346                 0x7e4c, 0x7e78,
 1347                 0x7e80, 0x7edc,
 1348                 0x7ee8, 0x7efc,
 1349                 0x8dc0, 0x8de0,
 1350                 0x8df8, 0x8e04,
 1351                 0x8e10, 0x8e84,
 1352                 0x8ea0, 0x8f84,
 1353                 0x8fc0, 0x9058,
 1354                 0x9060, 0x9060,
 1355                 0x9068, 0x90f8,
 1356                 0x9400, 0x9408,
 1357                 0x9410, 0x9470,
 1358                 0x9600, 0x9600,
 1359                 0x9608, 0x9638,
 1360                 0x9640, 0x96f4,
 1361                 0x9800, 0x9808,
 1362                 0x9810, 0x9864,
 1363                 0x9c00, 0x9c6c,
 1364                 0x9c80, 0x9cec,
 1365                 0x9d00, 0x9d6c,
 1366                 0x9d80, 0x9dec,
 1367                 0x9e00, 0x9e6c,
 1368                 0x9e80, 0x9eec,
 1369                 0x9f00, 0x9f6c,
 1370                 0x9f80, 0xa020,
 1371                 0xd000, 0xd004,
 1372                 0xd010, 0xd03c,
 1373                 0xdfc0, 0xdfe0,
 1374                 0xe000, 0x1106c,
 1375                 0x11074, 0x11088,
 1376                 0x1109c, 0x11110,
 1377                 0x11118, 0x1117c,
 1378                 0x11190, 0x11204,
 1379                 0x19040, 0x1906c,
 1380                 0x19078, 0x19080,
 1381                 0x1908c, 0x190e8,
 1382                 0x190f0, 0x190f8,
 1383                 0x19100, 0x19110,
 1384                 0x19120, 0x19124,
 1385                 0x19150, 0x19194,
 1386                 0x1919c, 0x191b0,
 1387                 0x191d0, 0x191e8,
 1388                 0x19238, 0x19290,
 1389                 0x193f8, 0x19428,
 1390                 0x19430, 0x19444,
 1391                 0x1944c, 0x1946c,
 1392                 0x19474, 0x19474,
 1393                 0x19490, 0x194cc,
 1394                 0x194f0, 0x194f8,
 1395                 0x19c00, 0x19c08,
 1396                 0x19c10, 0x19c60,
 1397                 0x19c94, 0x19ce4,
 1398                 0x19cf0, 0x19d40,
 1399                 0x19d50, 0x19d94,
 1400                 0x19da0, 0x19de8,
 1401                 0x19df0, 0x19e10,
 1402                 0x19e50, 0x19e90,
 1403                 0x19ea0, 0x19f24,
 1404                 0x19f34, 0x19f34,
 1405                 0x19f40, 0x19f50,
 1406                 0x19f90, 0x19fb4,
 1407                 0x19fc4, 0x19fe4,
 1408                 0x1a000, 0x1a004,
 1409                 0x1a010, 0x1a06c,
 1410                 0x1a0b0, 0x1a0e4,
 1411                 0x1a0ec, 0x1a0f8,
 1412                 0x1a100, 0x1a108,
 1413                 0x1a114, 0x1a130,
 1414                 0x1a138, 0x1a1c4,
 1415                 0x1a1fc, 0x1a1fc,
 1416                 0x1e008, 0x1e00c,
 1417                 0x1e040, 0x1e044,
 1418                 0x1e04c, 0x1e04c,
 1419                 0x1e284, 0x1e290,
 1420                 0x1e2c0, 0x1e2c0,
 1421                 0x1e2e0, 0x1e2e0,
 1422                 0x1e300, 0x1e384,
 1423                 0x1e3c0, 0x1e3c8,
 1424                 0x1e408, 0x1e40c,
 1425                 0x1e440, 0x1e444,
 1426                 0x1e44c, 0x1e44c,
 1427                 0x1e684, 0x1e690,
 1428                 0x1e6c0, 0x1e6c0,
 1429                 0x1e6e0, 0x1e6e0,
 1430                 0x1e700, 0x1e784,
 1431                 0x1e7c0, 0x1e7c8,
 1432                 0x1e808, 0x1e80c,
 1433                 0x1e840, 0x1e844,
 1434                 0x1e84c, 0x1e84c,
 1435                 0x1ea84, 0x1ea90,
 1436                 0x1eac0, 0x1eac0,
 1437                 0x1eae0, 0x1eae0,
 1438                 0x1eb00, 0x1eb84,
 1439                 0x1ebc0, 0x1ebc8,
 1440                 0x1ec08, 0x1ec0c,
 1441                 0x1ec40, 0x1ec44,
 1442                 0x1ec4c, 0x1ec4c,
 1443                 0x1ee84, 0x1ee90,
 1444                 0x1eec0, 0x1eec0,
 1445                 0x1eee0, 0x1eee0,
 1446                 0x1ef00, 0x1ef84,
 1447                 0x1efc0, 0x1efc8,
 1448                 0x1f008, 0x1f00c,
 1449                 0x1f040, 0x1f044,
 1450                 0x1f04c, 0x1f04c,
 1451                 0x1f284, 0x1f290,
 1452                 0x1f2c0, 0x1f2c0,
 1453                 0x1f2e0, 0x1f2e0,
 1454                 0x1f300, 0x1f384,
 1455                 0x1f3c0, 0x1f3c8,
 1456                 0x1f408, 0x1f40c,
 1457                 0x1f440, 0x1f444,
 1458                 0x1f44c, 0x1f44c,
 1459                 0x1f684, 0x1f690,
 1460                 0x1f6c0, 0x1f6c0,
 1461                 0x1f6e0, 0x1f6e0,
 1462                 0x1f700, 0x1f784,
 1463                 0x1f7c0, 0x1f7c8,
 1464                 0x1f808, 0x1f80c,
 1465                 0x1f840, 0x1f844,
 1466                 0x1f84c, 0x1f84c,
 1467                 0x1fa84, 0x1fa90,
 1468                 0x1fac0, 0x1fac0,
 1469                 0x1fae0, 0x1fae0,
 1470                 0x1fb00, 0x1fb84,
 1471                 0x1fbc0, 0x1fbc8,
 1472                 0x1fc08, 0x1fc0c,
 1473                 0x1fc40, 0x1fc44,
 1474                 0x1fc4c, 0x1fc4c,
 1475                 0x1fe84, 0x1fe90,
 1476                 0x1fec0, 0x1fec0,
 1477                 0x1fee0, 0x1fee0,
 1478                 0x1ff00, 0x1ff84,
 1479                 0x1ffc0, 0x1ffc8,
 1480                 0x30000, 0x30030,
 1481                 0x30100, 0x30144,
 1482                 0x30190, 0x301a0,
 1483                 0x301a8, 0x301b8,
 1484                 0x301c4, 0x301c8,
 1485                 0x301d0, 0x301d0,
 1486                 0x30200, 0x30318,
 1487                 0x30400, 0x304b4,
 1488                 0x304c0, 0x3052c,
 1489                 0x30540, 0x3061c,
 1490                 0x30800, 0x30828,
 1491                 0x30834, 0x30834,
 1492                 0x308c0, 0x30908,
 1493                 0x30910, 0x309ac,
 1494                 0x30a00, 0x30a14,
 1495                 0x30a1c, 0x30a2c,
 1496                 0x30a44, 0x30a50,
 1497                 0x30a74, 0x30a74,
 1498                 0x30a7c, 0x30afc,
 1499                 0x30b08, 0x30c24,
 1500                 0x30d00, 0x30d00,
 1501                 0x30d08, 0x30d14,
 1502                 0x30d1c, 0x30d20,
 1503                 0x30d3c, 0x30d3c,
 1504                 0x30d48, 0x30d50,
 1505                 0x31200, 0x3120c,
 1506                 0x31220, 0x31220,
 1507                 0x31240, 0x31240,
 1508                 0x31600, 0x3160c,
 1509                 0x31a00, 0x31a1c,
 1510                 0x31e00, 0x31e20,
 1511                 0x31e38, 0x31e3c,
 1512                 0x31e80, 0x31e80,
 1513                 0x31e88, 0x31ea8,
 1514                 0x31eb0, 0x31eb4,
 1515                 0x31ec8, 0x31ed4,
 1516                 0x31fb8, 0x32004,
 1517                 0x32200, 0x32200,
 1518                 0x32208, 0x32240,
 1519                 0x32248, 0x32280,
 1520                 0x32288, 0x322c0,
 1521                 0x322c8, 0x322fc,
 1522                 0x32600, 0x32630,
 1523                 0x32a00, 0x32abc,
 1524                 0x32b00, 0x32b10,
 1525                 0x32b20, 0x32b30,
 1526                 0x32b40, 0x32b50,
 1527                 0x32b60, 0x32b70,
 1528                 0x33000, 0x33028,
 1529                 0x33030, 0x33048,
 1530                 0x33060, 0x33068,
 1531                 0x33070, 0x3309c,
 1532                 0x330f0, 0x33128,
 1533                 0x33130, 0x33148,
 1534                 0x33160, 0x33168,
 1535                 0x33170, 0x3319c,
 1536                 0x331f0, 0x33238,
 1537                 0x33240, 0x33240,
 1538                 0x33248, 0x33250,
 1539                 0x3325c, 0x33264,
 1540                 0x33270, 0x332b8,
 1541                 0x332c0, 0x332e4,
 1542                 0x332f8, 0x33338,
 1543                 0x33340, 0x33340,
 1544                 0x33348, 0x33350,
 1545                 0x3335c, 0x33364,
 1546                 0x33370, 0x333b8,
 1547                 0x333c0, 0x333e4,
 1548                 0x333f8, 0x33428,
 1549                 0x33430, 0x33448,
 1550                 0x33460, 0x33468,
 1551                 0x33470, 0x3349c,
 1552                 0x334f0, 0x33528,
 1553                 0x33530, 0x33548,
 1554                 0x33560, 0x33568,
 1555                 0x33570, 0x3359c,
 1556                 0x335f0, 0x33638,
 1557                 0x33640, 0x33640,
 1558                 0x33648, 0x33650,
 1559                 0x3365c, 0x33664,
 1560                 0x33670, 0x336b8,
 1561                 0x336c0, 0x336e4,
 1562                 0x336f8, 0x33738,
 1563                 0x33740, 0x33740,
 1564                 0x33748, 0x33750,
 1565                 0x3375c, 0x33764,
 1566                 0x33770, 0x337b8,
 1567                 0x337c0, 0x337e4,
 1568                 0x337f8, 0x337fc,
 1569                 0x33814, 0x33814,
 1570                 0x3382c, 0x3382c,
 1571                 0x33880, 0x3388c,
 1572                 0x338e8, 0x338ec,
 1573                 0x33900, 0x33928,
 1574                 0x33930, 0x33948,
 1575                 0x33960, 0x33968,
 1576                 0x33970, 0x3399c,
 1577                 0x339f0, 0x33a38,
 1578                 0x33a40, 0x33a40,
 1579                 0x33a48, 0x33a50,
 1580                 0x33a5c, 0x33a64,
 1581                 0x33a70, 0x33ab8,
 1582                 0x33ac0, 0x33ae4,
 1583                 0x33af8, 0x33b10,
 1584                 0x33b28, 0x33b28,
 1585                 0x33b3c, 0x33b50,
 1586                 0x33bf0, 0x33c10,
 1587                 0x33c28, 0x33c28,
 1588                 0x33c3c, 0x33c50,
 1589                 0x33cf0, 0x33cfc,
 1590                 0x34000, 0x34030,
 1591                 0x34100, 0x34144,
 1592                 0x34190, 0x341a0,
 1593                 0x341a8, 0x341b8,
 1594                 0x341c4, 0x341c8,
 1595                 0x341d0, 0x341d0,
 1596                 0x34200, 0x34318,
 1597                 0x34400, 0x344b4,
 1598                 0x344c0, 0x3452c,
 1599                 0x34540, 0x3461c,
 1600                 0x34800, 0x34828,
 1601                 0x34834, 0x34834,
 1602                 0x348c0, 0x34908,
 1603                 0x34910, 0x349ac,
 1604                 0x34a00, 0x34a14,
 1605                 0x34a1c, 0x34a2c,
 1606                 0x34a44, 0x34a50,
 1607                 0x34a74, 0x34a74,
 1608                 0x34a7c, 0x34afc,
 1609                 0x34b08, 0x34c24,
 1610                 0x34d00, 0x34d00,
 1611                 0x34d08, 0x34d14,
 1612                 0x34d1c, 0x34d20,
 1613                 0x34d3c, 0x34d3c,
 1614                 0x34d48, 0x34d50,
 1615                 0x35200, 0x3520c,
 1616                 0x35220, 0x35220,
 1617                 0x35240, 0x35240,
 1618                 0x35600, 0x3560c,
 1619                 0x35a00, 0x35a1c,
 1620                 0x35e00, 0x35e20,
 1621                 0x35e38, 0x35e3c,
 1622                 0x35e80, 0x35e80,
 1623                 0x35e88, 0x35ea8,
 1624                 0x35eb0, 0x35eb4,
 1625                 0x35ec8, 0x35ed4,
 1626                 0x35fb8, 0x36004,
 1627                 0x36200, 0x36200,
 1628                 0x36208, 0x36240,
 1629                 0x36248, 0x36280,
 1630                 0x36288, 0x362c0,
 1631                 0x362c8, 0x362fc,
 1632                 0x36600, 0x36630,
 1633                 0x36a00, 0x36abc,
 1634                 0x36b00, 0x36b10,
 1635                 0x36b20, 0x36b30,
 1636                 0x36b40, 0x36b50,
 1637                 0x36b60, 0x36b70,
 1638                 0x37000, 0x37028,
 1639                 0x37030, 0x37048,
 1640                 0x37060, 0x37068,
 1641                 0x37070, 0x3709c,
 1642                 0x370f0, 0x37128,
 1643                 0x37130, 0x37148,
 1644                 0x37160, 0x37168,
 1645                 0x37170, 0x3719c,
 1646                 0x371f0, 0x37238,
 1647                 0x37240, 0x37240,
 1648                 0x37248, 0x37250,
 1649                 0x3725c, 0x37264,
 1650                 0x37270, 0x372b8,
 1651                 0x372c0, 0x372e4,
 1652                 0x372f8, 0x37338,
 1653                 0x37340, 0x37340,
 1654                 0x37348, 0x37350,
 1655                 0x3735c, 0x37364,
 1656                 0x37370, 0x373b8,
 1657                 0x373c0, 0x373e4,
 1658                 0x373f8, 0x37428,
 1659                 0x37430, 0x37448,
 1660                 0x37460, 0x37468,
 1661                 0x37470, 0x3749c,
 1662                 0x374f0, 0x37528,
 1663                 0x37530, 0x37548,
 1664                 0x37560, 0x37568,
 1665                 0x37570, 0x3759c,
 1666                 0x375f0, 0x37638,
 1667                 0x37640, 0x37640,
 1668                 0x37648, 0x37650,
 1669                 0x3765c, 0x37664,
 1670                 0x37670, 0x376b8,
 1671                 0x376c0, 0x376e4,
 1672                 0x376f8, 0x37738,
 1673                 0x37740, 0x37740,
 1674                 0x37748, 0x37750,
 1675                 0x3775c, 0x37764,
 1676                 0x37770, 0x377b8,
 1677                 0x377c0, 0x377e4,
 1678                 0x377f8, 0x377fc,
 1679                 0x37814, 0x37814,
 1680                 0x3782c, 0x3782c,
 1681                 0x37880, 0x3788c,
 1682                 0x378e8, 0x378ec,
 1683                 0x37900, 0x37928,
 1684                 0x37930, 0x37948,
 1685                 0x37960, 0x37968,
 1686                 0x37970, 0x3799c,
 1687                 0x379f0, 0x37a38,
 1688                 0x37a40, 0x37a40,
 1689                 0x37a48, 0x37a50,
 1690                 0x37a5c, 0x37a64,
 1691                 0x37a70, 0x37ab8,
 1692                 0x37ac0, 0x37ae4,
 1693                 0x37af8, 0x37b10,
 1694                 0x37b28, 0x37b28,
 1695                 0x37b3c, 0x37b50,
 1696                 0x37bf0, 0x37c10,
 1697                 0x37c28, 0x37c28,
 1698                 0x37c3c, 0x37c50,
 1699                 0x37cf0, 0x37cfc,
 1700                 0x38000, 0x38030,
 1701                 0x38100, 0x38144,
 1702                 0x38190, 0x381a0,
 1703                 0x381a8, 0x381b8,
 1704                 0x381c4, 0x381c8,
 1705                 0x381d0, 0x381d0,
 1706                 0x38200, 0x38318,
 1707                 0x38400, 0x384b4,
 1708                 0x384c0, 0x3852c,
 1709                 0x38540, 0x3861c,
 1710                 0x38800, 0x38828,
 1711                 0x38834, 0x38834,
 1712                 0x388c0, 0x38908,
 1713                 0x38910, 0x389ac,
 1714                 0x38a00, 0x38a14,
 1715                 0x38a1c, 0x38a2c,
 1716                 0x38a44, 0x38a50,
 1717                 0x38a74, 0x38a74,
 1718                 0x38a7c, 0x38afc,
 1719                 0x38b08, 0x38c24,
 1720                 0x38d00, 0x38d00,
 1721                 0x38d08, 0x38d14,
 1722                 0x38d1c, 0x38d20,
 1723                 0x38d3c, 0x38d3c,
 1724                 0x38d48, 0x38d50,
 1725                 0x39200, 0x3920c,
 1726                 0x39220, 0x39220,
 1727                 0x39240, 0x39240,
 1728                 0x39600, 0x3960c,
 1729                 0x39a00, 0x39a1c,
 1730                 0x39e00, 0x39e20,
 1731                 0x39e38, 0x39e3c,
 1732                 0x39e80, 0x39e80,
 1733                 0x39e88, 0x39ea8,
 1734                 0x39eb0, 0x39eb4,
 1735                 0x39ec8, 0x39ed4,
 1736                 0x39fb8, 0x3a004,
 1737                 0x3a200, 0x3a200,
 1738                 0x3a208, 0x3a240,
 1739                 0x3a248, 0x3a280,
 1740                 0x3a288, 0x3a2c0,
 1741                 0x3a2c8, 0x3a2fc,
 1742                 0x3a600, 0x3a630,
 1743                 0x3aa00, 0x3aabc,
 1744                 0x3ab00, 0x3ab10,
 1745                 0x3ab20, 0x3ab30,
 1746                 0x3ab40, 0x3ab50,
 1747                 0x3ab60, 0x3ab70,
 1748                 0x3b000, 0x3b028,
 1749                 0x3b030, 0x3b048,
 1750                 0x3b060, 0x3b068,
 1751                 0x3b070, 0x3b09c,
 1752                 0x3b0f0, 0x3b128,
 1753                 0x3b130, 0x3b148,
 1754                 0x3b160, 0x3b168,
 1755                 0x3b170, 0x3b19c,
 1756                 0x3b1f0, 0x3b238,
 1757                 0x3b240, 0x3b240,
 1758                 0x3b248, 0x3b250,
 1759                 0x3b25c, 0x3b264,
 1760                 0x3b270, 0x3b2b8,
 1761                 0x3b2c0, 0x3b2e4,
 1762                 0x3b2f8, 0x3b338,
 1763                 0x3b340, 0x3b340,
 1764                 0x3b348, 0x3b350,
 1765                 0x3b35c, 0x3b364,
 1766                 0x3b370, 0x3b3b8,
 1767                 0x3b3c0, 0x3b3e4,
 1768                 0x3b3f8, 0x3b428,
 1769                 0x3b430, 0x3b448,
 1770                 0x3b460, 0x3b468,
 1771                 0x3b470, 0x3b49c,
 1772                 0x3b4f0, 0x3b528,
 1773                 0x3b530, 0x3b548,
 1774                 0x3b560, 0x3b568,
 1775                 0x3b570, 0x3b59c,
 1776                 0x3b5f0, 0x3b638,
 1777                 0x3b640, 0x3b640,
 1778                 0x3b648, 0x3b650,
 1779                 0x3b65c, 0x3b664,
 1780                 0x3b670, 0x3b6b8,
 1781                 0x3b6c0, 0x3b6e4,
 1782                 0x3b6f8, 0x3b738,
 1783                 0x3b740, 0x3b740,
 1784                 0x3b748, 0x3b750,
 1785                 0x3b75c, 0x3b764,
 1786                 0x3b770, 0x3b7b8,
 1787                 0x3b7c0, 0x3b7e4,
 1788                 0x3b7f8, 0x3b7fc,
 1789                 0x3b814, 0x3b814,
 1790                 0x3b82c, 0x3b82c,
 1791                 0x3b880, 0x3b88c,
 1792                 0x3b8e8, 0x3b8ec,
 1793                 0x3b900, 0x3b928,
 1794                 0x3b930, 0x3b948,
 1795                 0x3b960, 0x3b968,
 1796                 0x3b970, 0x3b99c,
 1797                 0x3b9f0, 0x3ba38,
 1798                 0x3ba40, 0x3ba40,
 1799                 0x3ba48, 0x3ba50,
 1800                 0x3ba5c, 0x3ba64,
 1801                 0x3ba70, 0x3bab8,
 1802                 0x3bac0, 0x3bae4,
 1803                 0x3baf8, 0x3bb10,
 1804                 0x3bb28, 0x3bb28,
 1805                 0x3bb3c, 0x3bb50,
 1806                 0x3bbf0, 0x3bc10,
 1807                 0x3bc28, 0x3bc28,
 1808                 0x3bc3c, 0x3bc50,
 1809                 0x3bcf0, 0x3bcfc,
 1810                 0x3c000, 0x3c030,
 1811                 0x3c100, 0x3c144,
 1812                 0x3c190, 0x3c1a0,
 1813                 0x3c1a8, 0x3c1b8,
 1814                 0x3c1c4, 0x3c1c8,
 1815                 0x3c1d0, 0x3c1d0,
 1816                 0x3c200, 0x3c318,
 1817                 0x3c400, 0x3c4b4,
 1818                 0x3c4c0, 0x3c52c,
 1819                 0x3c540, 0x3c61c,
 1820                 0x3c800, 0x3c828,
 1821                 0x3c834, 0x3c834,
 1822                 0x3c8c0, 0x3c908,
 1823                 0x3c910, 0x3c9ac,
 1824                 0x3ca00, 0x3ca14,
 1825                 0x3ca1c, 0x3ca2c,
 1826                 0x3ca44, 0x3ca50,
 1827                 0x3ca74, 0x3ca74,
 1828                 0x3ca7c, 0x3cafc,
 1829                 0x3cb08, 0x3cc24,
 1830                 0x3cd00, 0x3cd00,
 1831                 0x3cd08, 0x3cd14,
 1832                 0x3cd1c, 0x3cd20,
 1833                 0x3cd3c, 0x3cd3c,
 1834                 0x3cd48, 0x3cd50,
 1835                 0x3d200, 0x3d20c,
 1836                 0x3d220, 0x3d220,
 1837                 0x3d240, 0x3d240,
 1838                 0x3d600, 0x3d60c,
 1839                 0x3da00, 0x3da1c,
 1840                 0x3de00, 0x3de20,
 1841                 0x3de38, 0x3de3c,
 1842                 0x3de80, 0x3de80,
 1843                 0x3de88, 0x3dea8,
 1844                 0x3deb0, 0x3deb4,
 1845                 0x3dec8, 0x3ded4,
 1846                 0x3dfb8, 0x3e004,
 1847                 0x3e200, 0x3e200,
 1848                 0x3e208, 0x3e240,
 1849                 0x3e248, 0x3e280,
 1850                 0x3e288, 0x3e2c0,
 1851                 0x3e2c8, 0x3e2fc,
 1852                 0x3e600, 0x3e630,
 1853                 0x3ea00, 0x3eabc,
 1854                 0x3eb00, 0x3eb10,
 1855                 0x3eb20, 0x3eb30,
 1856                 0x3eb40, 0x3eb50,
 1857                 0x3eb60, 0x3eb70,
 1858                 0x3f000, 0x3f028,
 1859                 0x3f030, 0x3f048,
 1860                 0x3f060, 0x3f068,
 1861                 0x3f070, 0x3f09c,
 1862                 0x3f0f0, 0x3f128,
 1863                 0x3f130, 0x3f148,
 1864                 0x3f160, 0x3f168,
 1865                 0x3f170, 0x3f19c,
 1866                 0x3f1f0, 0x3f238,
 1867                 0x3f240, 0x3f240,
 1868                 0x3f248, 0x3f250,
 1869                 0x3f25c, 0x3f264,
 1870                 0x3f270, 0x3f2b8,
 1871                 0x3f2c0, 0x3f2e4,
 1872                 0x3f2f8, 0x3f338,
 1873                 0x3f340, 0x3f340,
 1874                 0x3f348, 0x3f350,
 1875                 0x3f35c, 0x3f364,
 1876                 0x3f370, 0x3f3b8,
 1877                 0x3f3c0, 0x3f3e4,
 1878                 0x3f3f8, 0x3f428,
 1879                 0x3f430, 0x3f448,
 1880                 0x3f460, 0x3f468,
 1881                 0x3f470, 0x3f49c,
 1882                 0x3f4f0, 0x3f528,
 1883                 0x3f530, 0x3f548,
 1884                 0x3f560, 0x3f568,
 1885                 0x3f570, 0x3f59c,
 1886                 0x3f5f0, 0x3f638,
 1887                 0x3f640, 0x3f640,
 1888                 0x3f648, 0x3f650,
 1889                 0x3f65c, 0x3f664,
 1890                 0x3f670, 0x3f6b8,
 1891                 0x3f6c0, 0x3f6e4,
 1892                 0x3f6f8, 0x3f738,
 1893                 0x3f740, 0x3f740,
 1894                 0x3f748, 0x3f750,
 1895                 0x3f75c, 0x3f764,
 1896                 0x3f770, 0x3f7b8,
 1897                 0x3f7c0, 0x3f7e4,
 1898                 0x3f7f8, 0x3f7fc,
 1899                 0x3f814, 0x3f814,
 1900                 0x3f82c, 0x3f82c,
 1901                 0x3f880, 0x3f88c,
 1902                 0x3f8e8, 0x3f8ec,
 1903                 0x3f900, 0x3f928,
 1904                 0x3f930, 0x3f948,
 1905                 0x3f960, 0x3f968,
 1906                 0x3f970, 0x3f99c,
 1907                 0x3f9f0, 0x3fa38,
 1908                 0x3fa40, 0x3fa40,
 1909                 0x3fa48, 0x3fa50,
 1910                 0x3fa5c, 0x3fa64,
 1911                 0x3fa70, 0x3fab8,
 1912                 0x3fac0, 0x3fae4,
 1913                 0x3faf8, 0x3fb10,
 1914                 0x3fb28, 0x3fb28,
 1915                 0x3fb3c, 0x3fb50,
 1916                 0x3fbf0, 0x3fc10,
 1917                 0x3fc28, 0x3fc28,
 1918                 0x3fc3c, 0x3fc50,
 1919                 0x3fcf0, 0x3fcfc,
 1920                 0x40000, 0x4000c,
 1921                 0x40040, 0x40050,
 1922                 0x40060, 0x40068,
 1923                 0x4007c, 0x4008c,
 1924                 0x40094, 0x400b0,
 1925                 0x400c0, 0x40144,
 1926                 0x40180, 0x4018c,
 1927                 0x40200, 0x40254,
 1928                 0x40260, 0x40264,
 1929                 0x40270, 0x40288,
 1930                 0x40290, 0x40298,
 1931                 0x402ac, 0x402c8,
 1932                 0x402d0, 0x402e0,
 1933                 0x402f0, 0x402f0,
 1934                 0x40300, 0x4033c,
 1935                 0x403f8, 0x403fc,
 1936                 0x41304, 0x413c4,
 1937                 0x41400, 0x4140c,
 1938                 0x41414, 0x4141c,
 1939                 0x41480, 0x414d0,
 1940                 0x44000, 0x44054,
 1941                 0x4405c, 0x44078,
 1942                 0x440c0, 0x44174,
 1943                 0x44180, 0x441ac,
 1944                 0x441b4, 0x441b8,
 1945                 0x441c0, 0x44254,
 1946                 0x4425c, 0x44278,
 1947                 0x442c0, 0x44374,
 1948                 0x44380, 0x443ac,
 1949                 0x443b4, 0x443b8,
 1950                 0x443c0, 0x44454,
 1951                 0x4445c, 0x44478,
 1952                 0x444c0, 0x44574,
 1953                 0x44580, 0x445ac,
 1954                 0x445b4, 0x445b8,
 1955                 0x445c0, 0x44654,
 1956                 0x4465c, 0x44678,
 1957                 0x446c0, 0x44774,
 1958                 0x44780, 0x447ac,
 1959                 0x447b4, 0x447b8,
 1960                 0x447c0, 0x44854,
 1961                 0x4485c, 0x44878,
 1962                 0x448c0, 0x44974,
 1963                 0x44980, 0x449ac,
 1964                 0x449b4, 0x449b8,
 1965                 0x449c0, 0x449fc,
 1966                 0x45000, 0x45004,
 1967                 0x45010, 0x45030,
 1968                 0x45040, 0x45060,
 1969                 0x45068, 0x45068,
 1970                 0x45080, 0x45084,
 1971                 0x450a0, 0x450b0,
 1972                 0x45200, 0x45204,
 1973                 0x45210, 0x45230,
 1974                 0x45240, 0x45260,
 1975                 0x45268, 0x45268,
 1976                 0x45280, 0x45284,
 1977                 0x452a0, 0x452b0,
 1978                 0x460c0, 0x460e4,
 1979                 0x47000, 0x4703c,
 1980                 0x47044, 0x4708c,
 1981                 0x47200, 0x47250,
 1982                 0x47400, 0x47408,
 1983                 0x47414, 0x47420,
 1984                 0x47600, 0x47618,
 1985                 0x47800, 0x47814,
 1986                 0x48000, 0x4800c,
 1987                 0x48040, 0x48050,
 1988                 0x48060, 0x48068,
 1989                 0x4807c, 0x4808c,
 1990                 0x48094, 0x480b0,
 1991                 0x480c0, 0x48144,
 1992                 0x48180, 0x4818c,
 1993                 0x48200, 0x48254,
 1994                 0x48260, 0x48264,
 1995                 0x48270, 0x48288,
 1996                 0x48290, 0x48298,
 1997                 0x482ac, 0x482c8,
 1998                 0x482d0, 0x482e0,
 1999                 0x482f0, 0x482f0,
 2000                 0x48300, 0x4833c,
 2001                 0x483f8, 0x483fc,
 2002                 0x49304, 0x493c4,
 2003                 0x49400, 0x4940c,
 2004                 0x49414, 0x4941c,
 2005                 0x49480, 0x494d0,
 2006                 0x4c000, 0x4c054,
 2007                 0x4c05c, 0x4c078,
 2008                 0x4c0c0, 0x4c174,
 2009                 0x4c180, 0x4c1ac,
 2010                 0x4c1b4, 0x4c1b8,
 2011                 0x4c1c0, 0x4c254,
 2012                 0x4c25c, 0x4c278,
 2013                 0x4c2c0, 0x4c374,
 2014                 0x4c380, 0x4c3ac,
 2015                 0x4c3b4, 0x4c3b8,
 2016                 0x4c3c0, 0x4c454,
 2017                 0x4c45c, 0x4c478,
 2018                 0x4c4c0, 0x4c574,
 2019                 0x4c580, 0x4c5ac,
 2020                 0x4c5b4, 0x4c5b8,
 2021                 0x4c5c0, 0x4c654,
 2022                 0x4c65c, 0x4c678,
 2023                 0x4c6c0, 0x4c774,
 2024                 0x4c780, 0x4c7ac,
 2025                 0x4c7b4, 0x4c7b8,
 2026                 0x4c7c0, 0x4c854,
 2027                 0x4c85c, 0x4c878,
 2028                 0x4c8c0, 0x4c974,
 2029                 0x4c980, 0x4c9ac,
 2030                 0x4c9b4, 0x4c9b8,
 2031                 0x4c9c0, 0x4c9fc,
 2032                 0x4d000, 0x4d004,
 2033                 0x4d010, 0x4d030,
 2034                 0x4d040, 0x4d060,
 2035                 0x4d068, 0x4d068,
 2036                 0x4d080, 0x4d084,
 2037                 0x4d0a0, 0x4d0b0,
 2038                 0x4d200, 0x4d204,
 2039                 0x4d210, 0x4d230,
 2040                 0x4d240, 0x4d260,
 2041                 0x4d268, 0x4d268,
 2042                 0x4d280, 0x4d284,
 2043                 0x4d2a0, 0x4d2b0,
 2044                 0x4e0c0, 0x4e0e4,
 2045                 0x4f000, 0x4f03c,
 2046                 0x4f044, 0x4f08c,
 2047                 0x4f200, 0x4f250,
 2048                 0x4f400, 0x4f408,
 2049                 0x4f414, 0x4f420,
 2050                 0x4f600, 0x4f618,
 2051                 0x4f800, 0x4f814,
 2052                 0x50000, 0x50084,
 2053                 0x50090, 0x500cc,
 2054                 0x50400, 0x50400,
 2055                 0x50800, 0x50884,
 2056                 0x50890, 0x508cc,
 2057                 0x50c00, 0x50c00,
 2058                 0x51000, 0x5101c,
 2059                 0x51300, 0x51308,
 2060         };
 2061 
 2062         static const unsigned int t5vf_reg_ranges[] = {
 2063                 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
 2064                 VF_MPS_REG(A_MPS_VF_CTL),
 2065                 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
 2066                 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
 2067                 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
 2068                 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
 2069                 FW_T4VF_MBDATA_BASE_ADDR,
 2070                 FW_T4VF_MBDATA_BASE_ADDR +
 2071                 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
 2072         };
 2073 
 2074         static const unsigned int t6_reg_ranges[] = {
 2075                 0x1008, 0x101c,
 2076                 0x1024, 0x10a8,
 2077                 0x10b4, 0x10f8,
 2078                 0x1100, 0x1114,
 2079                 0x111c, 0x112c,
 2080                 0x1138, 0x113c,
 2081                 0x1144, 0x114c,
 2082                 0x1180, 0x1184,
 2083                 0x1190, 0x1194,
 2084                 0x11a0, 0x11a4,
 2085                 0x11b0, 0x11c4,
 2086                 0x11fc, 0x123c,
 2087                 0x1254, 0x1274,
 2088                 0x1280, 0x133c,
 2089                 0x1800, 0x18fc,
 2090                 0x3000, 0x302c,
 2091                 0x3060, 0x30b0,
 2092                 0x30b8, 0x30d8,
 2093                 0x30e0, 0x30fc,
 2094                 0x3140, 0x357c,
 2095                 0x35a8, 0x35cc,
 2096                 0x35ec, 0x35ec,
 2097                 0x3600, 0x5624,
 2098                 0x56cc, 0x56ec,
 2099                 0x56f4, 0x5720,
 2100                 0x5728, 0x575c,
 2101                 0x580c, 0x5814,
 2102                 0x5890, 0x589c,
 2103                 0x58a4, 0x58ac,
 2104                 0x58b8, 0x58bc,
 2105                 0x5940, 0x595c,
 2106                 0x5980, 0x598c,
 2107                 0x59b0, 0x59c8,
 2108                 0x59d0, 0x59dc,
 2109                 0x59fc, 0x5a18,
 2110                 0x5a60, 0x5a6c,
 2111                 0x5a80, 0x5a8c,
 2112                 0x5a94, 0x5a9c,
 2113                 0x5b94, 0x5bfc,
 2114                 0x5c10, 0x5e48,
 2115                 0x5e50, 0x5e94,
 2116                 0x5ea0, 0x5eb0,
 2117                 0x5ec0, 0x5ec0,
 2118                 0x5ec8, 0x5ed0,
 2119                 0x5ee0, 0x5ee0,
 2120                 0x5ef0, 0x5ef0,
 2121                 0x5f00, 0x5f00,
 2122                 0x6000, 0x6020,
 2123                 0x6028, 0x6040,
 2124                 0x6058, 0x609c,
 2125                 0x60a8, 0x619c,
 2126                 0x7700, 0x7798,
 2127                 0x77c0, 0x7880,
 2128                 0x78cc, 0x78fc,
 2129                 0x7b00, 0x7b58,
 2130                 0x7b60, 0x7b84,
 2131                 0x7b8c, 0x7c54,
 2132                 0x7d00, 0x7d38,
 2133                 0x7d40, 0x7d84,
 2134                 0x7d8c, 0x7ddc,
 2135                 0x7de4, 0x7e04,
 2136                 0x7e10, 0x7e1c,
 2137                 0x7e24, 0x7e38,
 2138                 0x7e40, 0x7e44,
 2139                 0x7e4c, 0x7e78,
 2140                 0x7e80, 0x7edc,
 2141                 0x7ee8, 0x7efc,
 2142                 0x8dc0, 0x8de0,
 2143                 0x8df8, 0x8e04,
 2144                 0x8e10, 0x8e84,
 2145                 0x8ea0, 0x8f88,
 2146                 0x8fb8, 0x9058,
 2147                 0x9060, 0x9060,
 2148                 0x9068, 0x90f8,
 2149                 0x9100, 0x9124,
 2150                 0x9400, 0x9470,
 2151                 0x9600, 0x9600,
 2152                 0x9608, 0x9638,
 2153                 0x9640, 0x9704,
 2154                 0x9710, 0x971c,
 2155                 0x9800, 0x9808,
 2156                 0x9810, 0x9864,
 2157                 0x9c00, 0x9c6c,
 2158                 0x9c80, 0x9cec,
 2159                 0x9d00, 0x9d6c,
 2160                 0x9d80, 0x9dec,
 2161                 0x9e00, 0x9e6c,
 2162                 0x9e80, 0x9eec,
 2163                 0x9f00, 0x9f6c,
 2164                 0x9f80, 0xa020,
 2165                 0xd000, 0xd03c,
 2166                 0xd100, 0xd118,
 2167                 0xd200, 0xd214,
 2168                 0xd220, 0xd234,
 2169                 0xd240, 0xd254,
 2170                 0xd260, 0xd274,
 2171                 0xd280, 0xd294,
 2172                 0xd2a0, 0xd2b4,
 2173                 0xd2c0, 0xd2d4,
 2174                 0xd2e0, 0xd2f4,
 2175                 0xd300, 0xd31c,
 2176                 0xdfc0, 0xdfe0,
 2177                 0xe000, 0xf008,
 2178                 0xf010, 0xf018,
 2179                 0xf020, 0xf028,
 2180                 0x11000, 0x11014,
 2181                 0x11048, 0x1106c,
 2182                 0x11074, 0x11088,
 2183                 0x11098, 0x11120,
 2184                 0x1112c, 0x1117c,
 2185                 0x11190, 0x112e0,
 2186                 0x11300, 0x1130c,
 2187                 0x12000, 0x1206c,
 2188                 0x19040, 0x1906c,
 2189                 0x19078, 0x19080,
 2190                 0x1908c, 0x190e8,
 2191                 0x190f0, 0x190f8,
 2192                 0x19100, 0x19110,
 2193                 0x19120, 0x19124,
 2194                 0x19150, 0x19194,
 2195                 0x1919c, 0x191b0,
 2196                 0x191d0, 0x191e8,
 2197                 0x19238, 0x19290,
 2198                 0x192a4, 0x192b0,
 2199                 0x19348, 0x1934c,
 2200                 0x193f8, 0x19418,
 2201                 0x19420, 0x19428,
 2202                 0x19430, 0x19444,
 2203                 0x1944c, 0x1946c,
 2204                 0x19474, 0x19474,
 2205                 0x19490, 0x194cc,
 2206                 0x194f0, 0x194f8,
 2207                 0x19c00, 0x19c48,
 2208                 0x19c50, 0x19c80,
 2209                 0x19c94, 0x19c98,
 2210                 0x19ca0, 0x19cbc,
 2211                 0x19ce4, 0x19ce4,
 2212                 0x19cf0, 0x19cf8,
 2213                 0x19d00, 0x19d28,
 2214                 0x19d50, 0x19d78,
 2215                 0x19d94, 0x19d98,
 2216                 0x19da0, 0x19de0,
 2217                 0x19df0, 0x19e10,
 2218                 0x19e50, 0x19e6c,
 2219                 0x19ea0, 0x19ebc,
 2220                 0x19ec4, 0x19ef4,
 2221                 0x19f04, 0x19f2c,
 2222                 0x19f34, 0x19f34,
 2223                 0x19f40, 0x19f50,
 2224                 0x19f90, 0x19fac,
 2225                 0x19fc4, 0x19fc8,
 2226                 0x19fd0, 0x19fe4,
 2227                 0x1a000, 0x1a004,
 2228                 0x1a010, 0x1a06c,
 2229                 0x1a0b0, 0x1a0e4,
 2230                 0x1a0ec, 0x1a0f8,
 2231                 0x1a100, 0x1a108,
 2232                 0x1a114, 0x1a130,
 2233                 0x1a138, 0x1a1c4,
 2234                 0x1a1fc, 0x1a1fc,
 2235                 0x1e008, 0x1e00c,
 2236                 0x1e040, 0x1e044,
 2237                 0x1e04c, 0x1e04c,
 2238                 0x1e284, 0x1e290,
 2239                 0x1e2c0, 0x1e2c0,
 2240                 0x1e2e0, 0x1e2e0,
 2241                 0x1e300, 0x1e384,
 2242                 0x1e3c0, 0x1e3c8,
 2243                 0x1e408, 0x1e40c,
 2244                 0x1e440, 0x1e444,
 2245                 0x1e44c, 0x1e44c,
 2246                 0x1e684, 0x1e690,
 2247                 0x1e6c0, 0x1e6c0,
 2248                 0x1e6e0, 0x1e6e0,
 2249                 0x1e700, 0x1e784,
 2250                 0x1e7c0, 0x1e7c8,
 2251                 0x1e808, 0x1e80c,
 2252                 0x1e840, 0x1e844,
 2253                 0x1e84c, 0x1e84c,
 2254                 0x1ea84, 0x1ea90,
 2255                 0x1eac0, 0x1eac0,
 2256                 0x1eae0, 0x1eae0,
 2257                 0x1eb00, 0x1eb84,
 2258                 0x1ebc0, 0x1ebc8,
 2259                 0x1ec08, 0x1ec0c,
 2260                 0x1ec40, 0x1ec44,
 2261                 0x1ec4c, 0x1ec4c,
 2262                 0x1ee84, 0x1ee90,
 2263                 0x1eec0, 0x1eec0,
 2264                 0x1eee0, 0x1eee0,
 2265                 0x1ef00, 0x1ef84,
 2266                 0x1efc0, 0x1efc8,
 2267                 0x1f008, 0x1f00c,
 2268                 0x1f040, 0x1f044,
 2269                 0x1f04c, 0x1f04c,
 2270                 0x1f284, 0x1f290,
 2271                 0x1f2c0, 0x1f2c0,
 2272                 0x1f2e0, 0x1f2e0,
 2273                 0x1f300, 0x1f384,
 2274                 0x1f3c0, 0x1f3c8,
 2275                 0x1f408, 0x1f40c,
 2276                 0x1f440, 0x1f444,
 2277                 0x1f44c, 0x1f44c,
 2278                 0x1f684, 0x1f690,
 2279                 0x1f6c0, 0x1f6c0,
 2280                 0x1f6e0, 0x1f6e0,
 2281                 0x1f700, 0x1f784,
 2282                 0x1f7c0, 0x1f7c8,
 2283                 0x1f808, 0x1f80c,
 2284                 0x1f840, 0x1f844,
 2285                 0x1f84c, 0x1f84c,
 2286                 0x1fa84, 0x1fa90,
 2287                 0x1fac0, 0x1fac0,
 2288                 0x1fae0, 0x1fae0,
 2289                 0x1fb00, 0x1fb84,
 2290                 0x1fbc0, 0x1fbc8,
 2291                 0x1fc08, 0x1fc0c,
 2292                 0x1fc40, 0x1fc44,
 2293                 0x1fc4c, 0x1fc4c,
 2294                 0x1fe84, 0x1fe90,
 2295                 0x1fec0, 0x1fec0,
 2296                 0x1fee0, 0x1fee0,
 2297                 0x1ff00, 0x1ff84,
 2298                 0x1ffc0, 0x1ffc8,
 2299                 0x30000, 0x30030,
 2300                 0x30100, 0x30168,
 2301                 0x30190, 0x301a0,
 2302                 0x301a8, 0x301b8,
 2303                 0x301c4, 0x301c8,
 2304                 0x301d0, 0x301d0,
 2305                 0x30200, 0x30320,
 2306                 0x30400, 0x304b4,
 2307                 0x304c0, 0x3052c,
 2308                 0x30540, 0x3061c,
 2309                 0x30800, 0x308a0,
 2310                 0x308c0, 0x30908,
 2311                 0x30910, 0x309b8,
 2312                 0x30a00, 0x30a04,
 2313                 0x30a0c, 0x30a14,
 2314                 0x30a1c, 0x30a2c,
 2315                 0x30a44, 0x30a50,
 2316                 0x30a74, 0x30a74,
 2317                 0x30a7c, 0x30afc,
 2318                 0x30b08, 0x30c24,
 2319                 0x30d00, 0x30d14,
 2320                 0x30d1c, 0x30d3c,
 2321                 0x30d44, 0x30d4c,
 2322                 0x30d54, 0x30d74,
 2323                 0x30d7c, 0x30d7c,
 2324                 0x30de0, 0x30de0,
 2325                 0x30e00, 0x30ed4,
 2326                 0x30f00, 0x30fa4,
 2327                 0x30fc0, 0x30fc4,
 2328                 0x31000, 0x31004,
 2329                 0x31080, 0x310fc,
 2330                 0x31208, 0x31220,
 2331                 0x3123c, 0x31254,
 2332                 0x31300, 0x31300,
 2333                 0x31308, 0x3131c,
 2334                 0x31338, 0x3133c,
 2335                 0x31380, 0x31380,
 2336                 0x31388, 0x313a8,
 2337                 0x313b4, 0x313b4,
 2338                 0x31400, 0x31420,
 2339                 0x31438, 0x3143c,
 2340                 0x31480, 0x31480,
 2341                 0x314a8, 0x314a8,
 2342                 0x314b0, 0x314b4,
 2343                 0x314c8, 0x314d4,
 2344                 0x31a40, 0x31a4c,
 2345                 0x31af0, 0x31b20,
 2346                 0x31b38, 0x31b3c,
 2347                 0x31b80, 0x31b80,
 2348                 0x31ba8, 0x31ba8,
 2349                 0x31bb0, 0x31bb4,
 2350                 0x31bc8, 0x31bd4,
 2351                 0x32140, 0x3218c,
 2352                 0x321f0, 0x321f4,
 2353                 0x32200, 0x32200,
 2354                 0x32218, 0x32218,
 2355                 0x32400, 0x32400,
 2356                 0x32408, 0x3241c,
 2357                 0x32618, 0x32620,
 2358                 0x32664, 0x32664,
 2359                 0x326a8, 0x326a8,
 2360                 0x326ec, 0x326ec,
 2361                 0x32a00, 0x32abc,
 2362                 0x32b00, 0x32b18,
 2363                 0x32b20, 0x32b38,
 2364                 0x32b40, 0x32b58,
 2365                 0x32b60, 0x32b78,
 2366                 0x32c00, 0x32c00,
 2367                 0x32c08, 0x32c3c,
 2368                 0x33000, 0x3302c,
 2369                 0x33034, 0x33050,
 2370                 0x33058, 0x33058,
 2371                 0x33060, 0x3308c,
 2372                 0x3309c, 0x330ac,
 2373                 0x330c0, 0x330c0,
 2374                 0x330c8, 0x330d0,
 2375                 0x330d8, 0x330e0,
 2376                 0x330ec, 0x3312c,
 2377                 0x33134, 0x33150,
 2378                 0x33158, 0x33158,
 2379                 0x33160, 0x3318c,
 2380                 0x3319c, 0x331ac,
 2381                 0x331c0, 0x331c0,
 2382                 0x331c8, 0x331d0,
 2383                 0x331d8, 0x331e0,
 2384                 0x331ec, 0x33290,
 2385                 0x33298, 0x332c4,
 2386                 0x332e4, 0x33390,
 2387                 0x33398, 0x333c4,
 2388                 0x333e4, 0x3342c,
 2389                 0x33434, 0x33450,
 2390                 0x33458, 0x33458,
 2391                 0x33460, 0x3348c,
 2392                 0x3349c, 0x334ac,
 2393                 0x334c0, 0x334c0,
 2394                 0x334c8, 0x334d0,
 2395                 0x334d8, 0x334e0,
 2396                 0x334ec, 0x3352c,
 2397                 0x33534, 0x33550,
 2398                 0x33558, 0x33558,
 2399                 0x33560, 0x3358c,
 2400                 0x3359c, 0x335ac,
 2401                 0x335c0, 0x335c0,
 2402                 0x335c8, 0x335d0,
 2403                 0x335d8, 0x335e0,
 2404                 0x335ec, 0x33690,
 2405                 0x33698, 0x336c4,
 2406                 0x336e4, 0x33790,
 2407                 0x33798, 0x337c4,
 2408                 0x337e4, 0x337fc,
 2409                 0x33814, 0x33814,
 2410                 0x33854, 0x33868,
 2411                 0x33880, 0x3388c,
 2412                 0x338c0, 0x338d0,
 2413                 0x338e8, 0x338ec,
 2414                 0x33900, 0x3392c,
 2415                 0x33934, 0x33950,
 2416                 0x33958, 0x33958,
 2417                 0x33960, 0x3398c,
 2418                 0x3399c, 0x339ac,
 2419                 0x339c0, 0x339c0,
 2420                 0x339c8, 0x339d0,
 2421                 0x339d8, 0x339e0,
 2422                 0x339ec, 0x33a90,
 2423                 0x33a98, 0x33ac4,
 2424                 0x33ae4, 0x33b10,
 2425                 0x33b24, 0x33b28,
 2426                 0x33b38, 0x33b50,
 2427                 0x33bf0, 0x33c10,
 2428                 0x33c24, 0x33c28,
 2429                 0x33c38, 0x33c50,
 2430                 0x33cf0, 0x33cfc,
 2431                 0x34000, 0x34030,
 2432                 0x34100, 0x34168,
 2433                 0x34190, 0x341a0,
 2434                 0x341a8, 0x341b8,
 2435                 0x341c4, 0x341c8,
 2436                 0x341d0, 0x341d0,
 2437                 0x34200, 0x34320,
 2438                 0x34400, 0x344b4,
 2439                 0x344c0, 0x3452c,
 2440                 0x34540, 0x3461c,
 2441                 0x34800, 0x348a0,
 2442                 0x348c0, 0x34908,
 2443                 0x34910, 0x349b8,
 2444                 0x34a00, 0x34a04,
 2445                 0x34a0c, 0x34a14,
 2446                 0x34a1c, 0x34a2c,
 2447                 0x34a44, 0x34a50,
 2448                 0x34a74, 0x34a74,
 2449                 0x34a7c, 0x34afc,
 2450                 0x34b08, 0x34c24,
 2451                 0x34d00, 0x34d14,
 2452                 0x34d1c, 0x34d3c,
 2453                 0x34d44, 0x34d4c,
 2454                 0x34d54, 0x34d74,
 2455                 0x34d7c, 0x34d7c,
 2456                 0x34de0, 0x34de0,
 2457                 0x34e00, 0x34ed4,
 2458                 0x34f00, 0x34fa4,
 2459                 0x34fc0, 0x34fc4,
 2460                 0x35000, 0x35004,
 2461                 0x35080, 0x350fc,
 2462                 0x35208, 0x35220,
 2463                 0x3523c, 0x35254,
 2464                 0x35300, 0x35300,
 2465                 0x35308, 0x3531c,
 2466                 0x35338, 0x3533c,
 2467                 0x35380, 0x35380,
 2468                 0x35388, 0x353a8,
 2469                 0x353b4, 0x353b4,
 2470                 0x35400, 0x35420,
 2471                 0x35438, 0x3543c,
 2472                 0x35480, 0x35480,
 2473                 0x354a8, 0x354a8,
 2474                 0x354b0, 0x354b4,
 2475                 0x354c8, 0x354d4,
 2476                 0x35a40, 0x35a4c,
 2477                 0x35af0, 0x35b20,
 2478                 0x35b38, 0x35b3c,
 2479                 0x35b80, 0x35b80,
 2480                 0x35ba8, 0x35ba8,
 2481                 0x35bb0, 0x35bb4,
 2482                 0x35bc8, 0x35bd4,
 2483                 0x36140, 0x3618c,
 2484                 0x361f0, 0x361f4,
 2485                 0x36200, 0x36200,
 2486                 0x36218, 0x36218,
 2487                 0x36400, 0x36400,
 2488                 0x36408, 0x3641c,
 2489                 0x36618, 0x36620,
 2490                 0x36664, 0x36664,
 2491                 0x366a8, 0x366a8,
 2492                 0x366ec, 0x366ec,
 2493                 0x36a00, 0x36abc,
 2494                 0x36b00, 0x36b18,
 2495                 0x36b20, 0x36b38,
 2496                 0x36b40, 0x36b58,
 2497                 0x36b60, 0x36b78,
 2498                 0x36c00, 0x36c00,
 2499                 0x36c08, 0x36c3c,
 2500                 0x37000, 0x3702c,
 2501                 0x37034, 0x37050,
 2502                 0x37058, 0x37058,
 2503                 0x37060, 0x3708c,
 2504                 0x3709c, 0x370ac,
 2505                 0x370c0, 0x370c0,
 2506                 0x370c8, 0x370d0,
 2507                 0x370d8, 0x370e0,
 2508                 0x370ec, 0x3712c,
 2509                 0x37134, 0x37150,
 2510                 0x37158, 0x37158,
 2511                 0x37160, 0x3718c,
 2512                 0x3719c, 0x371ac,
 2513                 0x371c0, 0x371c0,
 2514                 0x371c8, 0x371d0,
 2515                 0x371d8, 0x371e0,
 2516                 0x371ec, 0x37290,
 2517                 0x37298, 0x372c4,
 2518                 0x372e4, 0x37390,
 2519                 0x37398, 0x373c4,
 2520                 0x373e4, 0x3742c,
 2521                 0x37434, 0x37450,
 2522                 0x37458, 0x37458,
 2523                 0x37460, 0x3748c,
 2524                 0x3749c, 0x374ac,
 2525                 0x374c0, 0x374c0,
 2526                 0x374c8, 0x374d0,
 2527                 0x374d8, 0x374e0,
 2528                 0x374ec, 0x3752c,
 2529                 0x37534, 0x37550,
 2530                 0x37558, 0x37558,
 2531                 0x37560, 0x3758c,
 2532                 0x3759c, 0x375ac,
 2533                 0x375c0, 0x375c0,
 2534                 0x375c8, 0x375d0,
 2535                 0x375d8, 0x375e0,
 2536                 0x375ec, 0x37690,
 2537                 0x37698, 0x376c4,
 2538                 0x376e4, 0x37790,
 2539                 0x37798, 0x377c4,
 2540                 0x377e4, 0x377fc,
 2541                 0x37814, 0x37814,
 2542                 0x37854, 0x37868,
 2543                 0x37880, 0x3788c,
 2544                 0x378c0, 0x378d0,
 2545                 0x378e8, 0x378ec,
 2546                 0x37900, 0x3792c,
 2547                 0x37934, 0x37950,
 2548                 0x37958, 0x37958,
 2549                 0x37960, 0x3798c,
 2550                 0x3799c, 0x379ac,
 2551                 0x379c0, 0x379c0,
 2552                 0x379c8, 0x379d0,
 2553                 0x379d8, 0x379e0,
 2554                 0x379ec, 0x37a90,
 2555                 0x37a98, 0x37ac4,
 2556                 0x37ae4, 0x37b10,
 2557                 0x37b24, 0x37b28,
 2558                 0x37b38, 0x37b50,
 2559                 0x37bf0, 0x37c10,
 2560                 0x37c24, 0x37c28,
 2561                 0x37c38, 0x37c50,
 2562                 0x37cf0, 0x37cfc,
 2563                 0x40040, 0x40040,
 2564                 0x40080, 0x40084,
 2565                 0x40100, 0x40100,
 2566                 0x40140, 0x401bc,
 2567                 0x40200, 0x40214,
 2568                 0x40228, 0x40228,
 2569                 0x40240, 0x40258,
 2570                 0x40280, 0x40280,
 2571                 0x40304, 0x40304,
 2572                 0x40330, 0x4033c,
 2573                 0x41304, 0x413c8,
 2574                 0x413d0, 0x413dc,
 2575                 0x413f0, 0x413f0,
 2576                 0x41400, 0x4140c,
 2577                 0x41414, 0x4141c,
 2578                 0x41480, 0x414d0,
 2579                 0x44000, 0x4407c,
 2580                 0x440c0, 0x441ac,
 2581                 0x441b4, 0x4427c,
 2582                 0x442c0, 0x443ac,
 2583                 0x443b4, 0x4447c,
 2584                 0x444c0, 0x445ac,
 2585                 0x445b4, 0x4467c,
 2586                 0x446c0, 0x447ac,
 2587                 0x447b4, 0x4487c,
 2588                 0x448c0, 0x449ac,
 2589                 0x449b4, 0x44a7c,
 2590                 0x44ac0, 0x44bac,
 2591                 0x44bb4, 0x44c7c,
 2592                 0x44cc0, 0x44dac,
 2593                 0x44db4, 0x44e7c,
 2594                 0x44ec0, 0x44fac,
 2595                 0x44fb4, 0x4507c,
 2596                 0x450c0, 0x451ac,
 2597                 0x451b4, 0x451fc,
 2598                 0x45800, 0x45804,
 2599                 0x45810, 0x45830,
 2600                 0x45840, 0x45860,
 2601                 0x45868, 0x45868,
 2602                 0x45880, 0x45884,
 2603                 0x458a0, 0x458b0,
 2604                 0x45a00, 0x45a04,
 2605                 0x45a10, 0x45a30,
 2606                 0x45a40, 0x45a60,
 2607                 0x45a68, 0x45a68,
 2608                 0x45a80, 0x45a84,
 2609                 0x45aa0, 0x45ab0,
 2610                 0x460c0, 0x460e4,
 2611                 0x47000, 0x4703c,
 2612                 0x47044, 0x4708c,
 2613                 0x47200, 0x47250,
 2614                 0x47400, 0x47408,
 2615                 0x47414, 0x47420,
 2616                 0x47600, 0x47618,
 2617                 0x47800, 0x47814,
 2618                 0x47820, 0x4782c,
 2619                 0x50000, 0x50084,
 2620                 0x50090, 0x500cc,
 2621                 0x50300, 0x50384,
 2622                 0x50400, 0x50400,
 2623                 0x50800, 0x50884,
 2624                 0x50890, 0x508cc,
 2625                 0x50b00, 0x50b84,
 2626                 0x50c00, 0x50c00,
 2627                 0x51000, 0x51020,
 2628                 0x51028, 0x510b0,
 2629                 0x51300, 0x51324,
 2630         };
 2631 
 2632         static const unsigned int t6vf_reg_ranges[] = {
 2633                 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
 2634                 VF_MPS_REG(A_MPS_VF_CTL),
 2635                 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
 2636                 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
 2637                 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
 2638                 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
 2639                 FW_T6VF_MBDATA_BASE_ADDR,
 2640                 FW_T6VF_MBDATA_BASE_ADDR +
 2641                 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
 2642         };
 2643 
 2644         u32 *buf_end = (u32 *)(buf + buf_size);
 2645         const unsigned int *reg_ranges;
 2646         int reg_ranges_size, range;
 2647         unsigned int chip_version = chip_id(adap);
 2648 
 2649         /*
 2650          * Select the right set of register ranges to dump depending on the
 2651          * adapter chip type.
 2652          */
 2653         switch (chip_version) {
 2654         case CHELSIO_T4:
 2655                 if (adap->flags & IS_VF) {
 2656                         reg_ranges = t4vf_reg_ranges;
 2657                         reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
 2658                 } else {
 2659                         reg_ranges = t4_reg_ranges;
 2660                         reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
 2661                 }
 2662                 break;
 2663 
 2664         case CHELSIO_T5:
 2665                 if (adap->flags & IS_VF) {
 2666                         reg_ranges = t5vf_reg_ranges;
 2667                         reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
 2668                 } else {
 2669                         reg_ranges = t5_reg_ranges;
 2670                         reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
 2671                 }
 2672                 break;
 2673 
 2674         case CHELSIO_T6:
 2675                 if (adap->flags & IS_VF) {
 2676                         reg_ranges = t6vf_reg_ranges;
 2677                         reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
 2678                 } else {
 2679                         reg_ranges = t6_reg_ranges;
 2680                         reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
 2681                 }
 2682                 break;
 2683 
 2684         default:
 2685                 CH_ERR(adap,
 2686                         "Unsupported chip version %d\n", chip_version);
 2687                 return;
 2688         }
 2689 
 2690         /*
 2691          * Clear the register buffer and insert the appropriate register
 2692          * values selected by the above register ranges.
 2693          */
 2694         memset(buf, 0, buf_size);
 2695         for (range = 0; range < reg_ranges_size; range += 2) {
 2696                 unsigned int reg = reg_ranges[range];
 2697                 unsigned int last_reg = reg_ranges[range + 1];
 2698                 u32 *bufp = (u32 *)(buf + reg);
 2699 
 2700                 /*
 2701                  * Iterate across the register range filling in the register
 2702                  * buffer but don't write past the end of the register buffer.
 2703                  */
 2704                 while (reg <= last_reg && bufp < buf_end) {
 2705                         *bufp++ = t4_read_reg(adap, reg);
 2706                         reg += sizeof(u32);
 2707                 }
 2708         }
 2709 }
 2710 
 2711 /*
 2712  * Partial EEPROM Vital Product Data structure.  The VPD starts with one ID
 2713  * header followed by one or more VPD-R sections, each with its own header.
 2714  */
 2715 struct t4_vpd_hdr {
 2716         u8  id_tag;
 2717         u8  id_len[2];
 2718         u8  id_data[ID_LEN];
 2719 };
 2720 
 2721 struct t4_vpdr_hdr {
 2722         u8  vpdr_tag;
 2723         u8  vpdr_len[2];
 2724 };
 2725 
 2726 /*
 2727  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
 2728  */
 2729 #define EEPROM_DELAY            10              /* 10us per poll spin */
 2730 #define EEPROM_MAX_POLL         5000            /* x 5000 == 50ms */
 2731 
 2732 #define EEPROM_STAT_ADDR        0x7bfc
 2733 #define VPD_SIZE                0x800
 2734 #define VPD_BASE                0x400
 2735 #define VPD_BASE_OLD            0
 2736 #define VPD_LEN                 1024
 2737 #define VPD_INFO_FLD_HDR_SIZE   3
 2738 #define CHELSIO_VPD_UNIQUE_ID   0x82
 2739 
 2740 /*
 2741  * Small utility function to wait till any outstanding VPD Access is complete.
 2742  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
 2743  * VPD Access in flight.  This allows us to handle the problem of having a
 2744  * previous VPD Access time out and prevent an attempt to inject a new VPD
 2745  * Request before any in-flight VPD reguest has completed.
 2746  */
 2747 static int t4_seeprom_wait(struct adapter *adapter)
 2748 {
 2749         unsigned int base = adapter->params.pci.vpd_cap_addr;
 2750         int max_poll;
 2751 
 2752         /*
 2753          * If no VPD Access is in flight, we can just return success right
 2754          * away.
 2755          */
 2756         if (!adapter->vpd_busy)
 2757                 return 0;
 2758 
 2759         /*
 2760          * Poll the VPD Capability Address/Flag register waiting for it
 2761          * to indicate that the operation is complete.
 2762          */
 2763         max_poll = EEPROM_MAX_POLL;
 2764         do {
 2765                 u16 val;
 2766 
 2767                 udelay(EEPROM_DELAY);
 2768                 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
 2769 
 2770                 /*
 2771                  * If the operation is complete, mark the VPD as no longer
 2772                  * busy and return success.
 2773                  */
 2774                 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
 2775                         adapter->vpd_busy = 0;
 2776                         return 0;
 2777                 }
 2778         } while (--max_poll);
 2779 
 2780         /*
 2781          * Failure!  Note that we leave the VPD Busy status set in order to
 2782          * avoid pushing a new VPD Access request into the VPD Capability till
 2783          * the current operation eventually succeeds.  It's a bug to issue a
 2784          * new request when an existing request is in flight and will result
 2785          * in corrupt hardware state.
 2786          */
 2787         return -ETIMEDOUT;
 2788 }
 2789 
 2790 /**
 2791  *      t4_seeprom_read - read a serial EEPROM location
 2792  *      @adapter: adapter to read
 2793  *      @addr: EEPROM virtual address
 2794  *      @data: where to store the read data
 2795  *
 2796  *      Read a 32-bit word from a location in serial EEPROM using the card's PCI
 2797  *      VPD capability.  Note that this function must be called with a virtual
 2798  *      address.
 2799  */
 2800 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
 2801 {
 2802         unsigned int base = adapter->params.pci.vpd_cap_addr;
 2803         int ret;
 2804 
 2805         /*
 2806          * VPD Accesses must alway be 4-byte aligned!
 2807          */
 2808         if (addr >= EEPROMVSIZE || (addr & 3))
 2809                 return -EINVAL;
 2810 
 2811         /*
 2812          * Wait for any previous operation which may still be in flight to
 2813          * complete.
 2814          */
 2815         ret = t4_seeprom_wait(adapter);
 2816         if (ret) {
 2817                 CH_ERR(adapter, "VPD still busy from previous operation\n");
 2818                 return ret;
 2819         }
 2820 
 2821         /*
 2822          * Issue our new VPD Read request, mark the VPD as being busy and wait
 2823          * for our request to complete.  If it doesn't complete, note the
 2824          * error and return it to our caller.  Note that we do not reset the
 2825          * VPD Busy status!
 2826          */
 2827         t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
 2828         adapter->vpd_busy = 1;
 2829         adapter->vpd_flag = PCI_VPD_ADDR_F;
 2830         ret = t4_seeprom_wait(adapter);
 2831         if (ret) {
 2832                 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
 2833                 return ret;
 2834         }
 2835 
 2836         /*
 2837          * Grab the returned data, swizzle it into our endianness and
 2838          * return success.
 2839          */
 2840         t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
 2841         *data = le32_to_cpu(*data);
 2842         return 0;
 2843 }
 2844 
 2845 /**
 2846  *      t4_seeprom_write - write a serial EEPROM location
 2847  *      @adapter: adapter to write
 2848  *      @addr: virtual EEPROM address
 2849  *      @data: value to write
 2850  *
 2851  *      Write a 32-bit word to a location in serial EEPROM using the card's PCI
 2852  *      VPD capability.  Note that this function must be called with a virtual
 2853  *      address.
 2854  */
 2855 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
 2856 {
 2857         unsigned int base = adapter->params.pci.vpd_cap_addr;
 2858         int ret;
 2859         u32 stats_reg;
 2860         int max_poll;
 2861 
 2862         /*
 2863          * VPD Accesses must alway be 4-byte aligned!
 2864          */
 2865         if (addr >= EEPROMVSIZE || (addr & 3))
 2866                 return -EINVAL;
 2867 
 2868         /*
 2869          * Wait for any previous operation which may still be in flight to
 2870          * complete.
 2871          */
 2872         ret = t4_seeprom_wait(adapter);
 2873         if (ret) {
 2874                 CH_ERR(adapter, "VPD still busy from previous operation\n");
 2875                 return ret;
 2876         }
 2877 
 2878         /*
 2879          * Issue our new VPD Read request, mark the VPD as being busy and wait
 2880          * for our request to complete.  If it doesn't complete, note the
 2881          * error and return it to our caller.  Note that we do not reset the
 2882          * VPD Busy status!
 2883          */
 2884         t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
 2885                                  cpu_to_le32(data));
 2886         t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
 2887                                  (u16)addr | PCI_VPD_ADDR_F);
 2888         adapter->vpd_busy = 1;
 2889         adapter->vpd_flag = 0;
 2890         ret = t4_seeprom_wait(adapter);
 2891         if (ret) {
 2892                 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
 2893                 return ret;
 2894         }
 2895 
 2896         /*
 2897          * Reset PCI_VPD_DATA register after a transaction and wait for our
 2898          * request to complete. If it doesn't complete, return error.
 2899          */
 2900         t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
 2901         max_poll = EEPROM_MAX_POLL;
 2902         do {
 2903                 udelay(EEPROM_DELAY);
 2904                 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
 2905         } while ((stats_reg & 0x1) && --max_poll);
 2906         if (!max_poll)
 2907                 return -ETIMEDOUT;
 2908 
 2909         /* Return success! */
 2910         return 0;
 2911 }
 2912 
 2913 /**
 2914  *      t4_eeprom_ptov - translate a physical EEPROM address to virtual
 2915  *      @phys_addr: the physical EEPROM address
 2916  *      @fn: the PCI function number
 2917  *      @sz: size of function-specific area
 2918  *
 2919  *      Translate a physical EEPROM address to virtual.  The first 1K is
 2920  *      accessed through virtual addresses starting at 31K, the rest is
 2921  *      accessed through virtual addresses starting at 0.
 2922  *
 2923  *      The mapping is as follows:
 2924  *      [0..1K) -> [31K..32K)
 2925  *      [1K..1K+A) -> [ES-A..ES)
 2926  *      [1K+A..ES) -> [0..ES-A-1K)
 2927  *
 2928  *      where A = @fn * @sz, and ES = EEPROM size.
 2929  */
 2930 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
 2931 {
 2932         fn *= sz;
 2933         if (phys_addr < 1024)
 2934                 return phys_addr + (31 << 10);
 2935         if (phys_addr < 1024 + fn)
 2936                 return EEPROMSIZE - fn + phys_addr - 1024;
 2937         if (phys_addr < EEPROMSIZE)
 2938                 return phys_addr - 1024 - fn;
 2939         return -EINVAL;
 2940 }
 2941 
 2942 /**
 2943  *      t4_seeprom_wp - enable/disable EEPROM write protection
 2944  *      @adapter: the adapter
 2945  *      @enable: whether to enable or disable write protection
 2946  *
 2947  *      Enables or disables write protection on the serial EEPROM.
 2948  */
 2949 int t4_seeprom_wp(struct adapter *adapter, int enable)
 2950 {
 2951         return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
 2952 }
 2953 
 2954 /**
 2955  *      get_vpd_keyword_val - Locates an information field keyword in the VPD
 2956  *      @vpd: Pointer to buffered vpd data structure
 2957  *      @kw: The keyword to search for
 2958  *      @region: VPD region to search (starting from 0)
 2959  *
 2960  *      Returns the value of the information field keyword or
 2961  *      -ENOENT otherwise.
 2962  */
 2963 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
 2964 {
 2965         int i, tag;
 2966         unsigned int offset, len;
 2967         const struct t4_vpdr_hdr *vpdr;
 2968 
 2969         offset = sizeof(struct t4_vpd_hdr);
 2970         vpdr = (const void *)(vpd + offset);
 2971         tag = vpdr->vpdr_tag;
 2972         len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
 2973         while (region--) {
 2974                 offset += sizeof(struct t4_vpdr_hdr) + len;
 2975                 vpdr = (const void *)(vpd + offset);
 2976                 if (++tag != vpdr->vpdr_tag)
 2977                         return -ENOENT;
 2978                 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
 2979         }
 2980         offset += sizeof(struct t4_vpdr_hdr);
 2981 
 2982         if (offset + len > VPD_LEN) {
 2983                 return -ENOENT;
 2984         }
 2985 
 2986         for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
 2987                 if (memcmp(vpd + i , kw , 2) == 0){
 2988                         i += VPD_INFO_FLD_HDR_SIZE;
 2989                         return i;
 2990                 }
 2991 
 2992                 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
 2993         }
 2994 
 2995         return -ENOENT;
 2996 }
 2997 
 2998 
 2999 /**
 3000  *      get_vpd_params - read VPD parameters from VPD EEPROM
 3001  *      @adapter: adapter to read
 3002  *      @p: where to store the parameters
 3003  *      @vpd: caller provided temporary space to read the VPD into
 3004  *
 3005  *      Reads card parameters stored in VPD EEPROM.
 3006  */
 3007 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
 3008     uint16_t device_id, u32 *buf)
 3009 {
 3010         int i, ret, addr;
 3011         int ec, sn, pn, na, md;
 3012         u8 csum;
 3013         const u8 *vpd = (const u8 *)buf;
 3014 
 3015         /*
 3016          * Card information normally starts at VPD_BASE but early cards had
 3017          * it at 0.
 3018          */
 3019         ret = t4_seeprom_read(adapter, VPD_BASE, buf);
 3020         if (ret)
 3021                 return (ret);
 3022 
 3023         /*
 3024          * The VPD shall have a unique identifier specified by the PCI SIG.
 3025          * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
 3026          * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
 3027          * is expected to automatically put this entry at the
 3028          * beginning of the VPD.
 3029          */
 3030         addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
 3031 
 3032         for (i = 0; i < VPD_LEN; i += 4) {
 3033                 ret = t4_seeprom_read(adapter, addr + i, buf++);
 3034                 if (ret)
 3035                         return ret;
 3036         }
 3037 
 3038 #define FIND_VPD_KW(var,name) do { \
 3039         var = get_vpd_keyword_val(vpd, name, 0); \
 3040         if (var < 0) { \
 3041                 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
 3042                 return -EINVAL; \
 3043         } \
 3044 } while (0)
 3045 
 3046         FIND_VPD_KW(i, "RV");
 3047         for (csum = 0; i >= 0; i--)
 3048                 csum += vpd[i];
 3049 
 3050         if (csum) {
 3051                 CH_ERR(adapter,
 3052                         "corrupted VPD EEPROM, actual csum %u\n", csum);
 3053                 return -EINVAL;
 3054         }
 3055 
 3056         FIND_VPD_KW(ec, "EC");
 3057         FIND_VPD_KW(sn, "SN");
 3058         FIND_VPD_KW(pn, "PN");
 3059         FIND_VPD_KW(na, "NA");
 3060 #undef FIND_VPD_KW
 3061 
 3062         memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
 3063         strstrip(p->id);
 3064         memcpy(p->ec, vpd + ec, EC_LEN);
 3065         strstrip(p->ec);
 3066         i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
 3067         memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
 3068         strstrip(p->sn);
 3069         i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
 3070         memcpy(p->pn, vpd + pn, min(i, PN_LEN));
 3071         strstrip((char *)p->pn);
 3072         i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
 3073         memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
 3074         strstrip((char *)p->na);
 3075 
 3076         if (device_id & 0x80)
 3077                 return 0;       /* Custom card */
 3078 
 3079         md = get_vpd_keyword_val(vpd, "VF", 1);
 3080         if (md < 0) {
 3081                 snprintf(p->md, sizeof(p->md), "unknown");
 3082         } else {
 3083                 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
 3084                 memcpy(p->md, vpd + md, min(i, MD_LEN));
 3085                 strstrip((char *)p->md);
 3086         }
 3087 
 3088         return 0;
 3089 }
 3090 
 3091 /* serial flash and firmware constants and flash config file constants */
 3092 enum {
 3093         SF_ATTEMPTS = 10,       /* max retries for SF operations */
 3094 
 3095         /* flash command opcodes */
 3096         SF_PROG_PAGE    = 2,    /* program 256B page */
 3097         SF_WR_DISABLE   = 4,    /* disable writes */
 3098         SF_RD_STATUS    = 5,    /* read status register */
 3099         SF_WR_ENABLE    = 6,    /* enable writes */
 3100         SF_RD_DATA_FAST = 0xb,  /* read flash */
 3101         SF_RD_ID        = 0x9f, /* read ID */
 3102         SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
 3103 };
 3104 
 3105 /**
 3106  *      sf1_read - read data from the serial flash
 3107  *      @adapter: the adapter
 3108  *      @byte_cnt: number of bytes to read
 3109  *      @cont: whether another operation will be chained
 3110  *      @lock: whether to lock SF for PL access only
 3111  *      @valp: where to store the read data
 3112  *
 3113  *      Reads up to 4 bytes of data from the serial flash.  The location of
 3114  *      the read needs to be specified prior to calling this by issuing the
 3115  *      appropriate commands to the serial flash.
 3116  */
 3117 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
 3118                     int lock, u32 *valp)
 3119 {
 3120         int ret;
 3121 
 3122         if (!byte_cnt || byte_cnt > 4)
 3123                 return -EINVAL;
 3124         if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
 3125                 return -EBUSY;
 3126         t4_write_reg(adapter, A_SF_OP,
 3127                      V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
 3128         ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
 3129         if (!ret)
 3130                 *valp = t4_read_reg(adapter, A_SF_DATA);
 3131         return ret;
 3132 }
 3133 
 3134 /**
 3135  *      sf1_write - write data to the serial flash
 3136  *      @adapter: the adapter
 3137  *      @byte_cnt: number of bytes to write
 3138  *      @cont: whether another operation will be chained
 3139  *      @lock: whether to lock SF for PL access only
 3140  *      @val: value to write
 3141  *
 3142  *      Writes up to 4 bytes of data to the serial flash.  The location of
 3143  *      the write needs to be specified prior to calling this by issuing the
 3144  *      appropriate commands to the serial flash.
 3145  */
 3146 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
 3147                      int lock, u32 val)
 3148 {
 3149         if (!byte_cnt || byte_cnt > 4)
 3150                 return -EINVAL;
 3151         if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
 3152                 return -EBUSY;
 3153         t4_write_reg(adapter, A_SF_DATA, val);
 3154         t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
 3155                      V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
 3156         return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
 3157 }
 3158 
 3159 /**
 3160  *      flash_wait_op - wait for a flash operation to complete
 3161  *      @adapter: the adapter
 3162  *      @attempts: max number of polls of the status register
 3163  *      @delay: delay between polls in ms
 3164  *
 3165  *      Wait for a flash operation to complete by polling the status register.
 3166  */
 3167 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
 3168 {
 3169         int ret;
 3170         u32 status;
 3171 
 3172         while (1) {
 3173                 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
 3174                     (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
 3175                         return ret;
 3176                 if (!(status & 1))
 3177                         return 0;
 3178                 if (--attempts == 0)
 3179                         return -EAGAIN;
 3180                 if (delay)
 3181                         msleep(delay);
 3182         }
 3183 }
 3184 
 3185 /**
 3186  *      t4_read_flash - read words from serial flash
 3187  *      @adapter: the adapter
 3188  *      @addr: the start address for the read
 3189  *      @nwords: how many 32-bit words to read
 3190  *      @data: where to store the read data
 3191  *      @byte_oriented: whether to store data as bytes or as words
 3192  *
 3193  *      Read the specified number of 32-bit words from the serial flash.
 3194  *      If @byte_oriented is set the read data is stored as a byte array
 3195  *      (i.e., big-endian), otherwise as 32-bit words in the platform's
 3196  *      natural endianness.
 3197  */
 3198 int t4_read_flash(struct adapter *adapter, unsigned int addr,
 3199                   unsigned int nwords, u32 *data, int byte_oriented)
 3200 {
 3201         int ret;
 3202 
 3203         if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
 3204                 return -EINVAL;
 3205 
 3206         addr = swab32(addr) | SF_RD_DATA_FAST;
 3207 
 3208         if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
 3209             (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
 3210                 return ret;
 3211 
 3212         for ( ; nwords; nwords--, data++) {
 3213                 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
 3214                 if (nwords == 1)
 3215                         t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
 3216                 if (ret)
 3217                         return ret;
 3218                 if (byte_oriented)
 3219                         *data = (__force __u32)(cpu_to_be32(*data));
 3220         }
 3221         return 0;
 3222 }
 3223 
 3224 /**
 3225  *      t4_write_flash - write up to a page of data to the serial flash
 3226  *      @adapter: the adapter
 3227  *      @addr: the start address to write
 3228  *      @n: length of data to write in bytes
 3229  *      @data: the data to write
 3230  *      @byte_oriented: whether to store data as bytes or as words
 3231  *
 3232  *      Writes up to a page of data (256 bytes) to the serial flash starting
 3233  *      at the given address.  All the data must be written to the same page.
 3234  *      If @byte_oriented is set the write data is stored as byte stream
 3235  *      (i.e. matches what on disk), otherwise in big-endian.
 3236  */
 3237 int t4_write_flash(struct adapter *adapter, unsigned int addr,
 3238                           unsigned int n, const u8 *data, int byte_oriented)
 3239 {
 3240         int ret;
 3241         u32 buf[SF_PAGE_SIZE / 4];
 3242         unsigned int i, c, left, val, offset = addr & 0xff;
 3243 
 3244         if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
 3245                 return -EINVAL;
 3246 
 3247         val = swab32(addr) | SF_PROG_PAGE;
 3248 
 3249         if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
 3250             (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
 3251                 goto unlock;
 3252 
 3253         for (left = n; left; left -= c) {
 3254                 c = min(left, 4U);
 3255                 for (val = 0, i = 0; i < c; ++i)
 3256                         val = (val << 8) + *data++;
 3257 
 3258                 if (!byte_oriented)
 3259                         val = cpu_to_be32(val);
 3260 
 3261                 ret = sf1_write(adapter, c, c != left, 1, val);
 3262                 if (ret)
 3263                         goto unlock;
 3264         }
 3265         ret = flash_wait_op(adapter, 8, 1);
 3266         if (ret)
 3267                 goto unlock;
 3268 
 3269         t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
 3270 
 3271         /* Read the page to verify the write succeeded */
 3272         ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
 3273                             byte_oriented);
 3274         if (ret)
 3275                 return ret;
 3276 
 3277         if (memcmp(data - n, (u8 *)buf + offset, n)) {
 3278                 CH_ERR(adapter,
 3279                         "failed to correctly write the flash page at %#x\n",
 3280                         addr);
 3281                 return -EIO;
 3282         }
 3283         return 0;
 3284 
 3285 unlock:
 3286         t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
 3287         return ret;
 3288 }
 3289 
 3290 /**
 3291  *      t4_get_fw_version - read the firmware version
 3292  *      @adapter: the adapter
 3293  *      @vers: where to place the version
 3294  *
 3295  *      Reads the FW version from flash.
 3296  */
 3297 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
 3298 {
 3299         return t4_read_flash(adapter, FLASH_FW_START +
 3300                              offsetof(struct fw_hdr, fw_ver), 1,
 3301                              vers, 0);
 3302 }
 3303 
 3304 /**
 3305  *      t4_get_fw_hdr - read the firmware header
 3306  *      @adapter: the adapter
 3307  *      @hdr: where to place the version
 3308  *
 3309  *      Reads the FW header from flash into caller provided buffer.
 3310  */
 3311 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
 3312 {
 3313         return t4_read_flash(adapter, FLASH_FW_START,
 3314             sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
 3315 }
 3316 
 3317 /**
 3318  *      t4_get_bs_version - read the firmware bootstrap version
 3319  *      @adapter: the adapter
 3320  *      @vers: where to place the version
 3321  *
 3322  *      Reads the FW Bootstrap version from flash.
 3323  */
 3324 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
 3325 {
 3326         return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
 3327                              offsetof(struct fw_hdr, fw_ver), 1,
 3328                              vers, 0);
 3329 }
 3330 
 3331 /**
 3332  *      t4_get_tp_version - read the TP microcode version
 3333  *      @adapter: the adapter
 3334  *      @vers: where to place the version
 3335  *
 3336  *      Reads the TP microcode version from flash.
 3337  */
 3338 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
 3339 {
 3340         return t4_read_flash(adapter, FLASH_FW_START +
 3341                              offsetof(struct fw_hdr, tp_microcode_ver),
 3342                              1, vers, 0);
 3343 }
 3344 
 3345 /**
 3346  *      t4_get_exprom_version - return the Expansion ROM version (if any)
 3347  *      @adapter: the adapter
 3348  *      @vers: where to place the version
 3349  *
 3350  *      Reads the Expansion ROM header from FLASH and returns the version
 3351  *      number (if present) through the @vers return value pointer.  We return
 3352  *      this in the Firmware Version Format since it's convenient.  Return
 3353  *      0 on success, -ENOENT if no Expansion ROM is present.
 3354  */
 3355 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
 3356 {
 3357         struct exprom_header {
 3358                 unsigned char hdr_arr[16];      /* must start with 0x55aa */
 3359                 unsigned char hdr_ver[4];       /* Expansion ROM version */
 3360         } *hdr;
 3361         u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
 3362                                            sizeof(u32))];
 3363         int ret;
 3364 
 3365         ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
 3366                             ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
 3367                             0);
 3368         if (ret)
 3369                 return ret;
 3370 
 3371         hdr = (struct exprom_header *)exprom_header_buf;
 3372         if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
 3373                 return -ENOENT;
 3374 
 3375         *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
 3376                  V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
 3377                  V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
 3378                  V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
 3379         return 0;
 3380 }
 3381 
 3382 /**
 3383  *      t4_get_scfg_version - return the Serial Configuration version
 3384  *      @adapter: the adapter
 3385  *      @vers: where to place the version
 3386  *
 3387  *      Reads the Serial Configuration Version via the Firmware interface
 3388  *      (thus this can only be called once we're ready to issue Firmware
 3389  *      commands).  The format of the Serial Configuration version is
 3390  *      adapter specific.  Returns 0 on success, an error on failure.
 3391  *
 3392  *      Note that early versions of the Firmware didn't include the ability
 3393  *      to retrieve the Serial Configuration version, so we zero-out the
 3394  *      return-value parameter in that case to avoid leaving it with
 3395  *      garbage in it.
 3396  *
 3397  *      Also note that the Firmware will return its cached copy of the Serial
 3398  *      Initialization Revision ID, not the actual Revision ID as written in
 3399  *      the Serial EEPROM.  This is only an issue if a new VPD has been written
 3400  *      and the Firmware/Chip haven't yet gone through a RESET sequence.  So
 3401  *      it's best to defer calling this routine till after a FW_RESET_CMD has
 3402  *      been issued if the Host Driver will be performing a full adapter
 3403  *      initialization.
 3404  */
 3405 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
 3406 {
 3407         u32 scfgrev_param;
 3408         int ret;
 3409 
 3410         scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
 3411                          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
 3412         ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
 3413                               1, &scfgrev_param, vers);
 3414         if (ret)
 3415                 *vers = 0;
 3416         return ret;
 3417 }
 3418 
 3419 /**
 3420  *      t4_get_vpd_version - return the VPD version
 3421  *      @adapter: the adapter
 3422  *      @vers: where to place the version
 3423  *
 3424  *      Reads the VPD via the Firmware interface (thus this can only be called
 3425  *      once we're ready to issue Firmware commands).  The format of the
 3426  *      VPD version is adapter specific.  Returns 0 on success, an error on
 3427  *      failure.
 3428  *
 3429  *      Note that early versions of the Firmware didn't include the ability
 3430  *      to retrieve the VPD version, so we zero-out the return-value parameter
 3431  *      in that case to avoid leaving it with garbage in it.
 3432  *
 3433  *      Also note that the Firmware will return its cached copy of the VPD
 3434  *      Revision ID, not the actual Revision ID as written in the Serial
 3435  *      EEPROM.  This is only an issue if a new VPD has been written and the
 3436  *      Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
 3437  *      to defer calling this routine till after a FW_RESET_CMD has been issued
 3438  *      if the Host Driver will be performing a full adapter initialization.
 3439  */
 3440 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
 3441 {
 3442         u32 vpdrev_param;
 3443         int ret;
 3444 
 3445         vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
 3446                         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
 3447         ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
 3448                               1, &vpdrev_param, vers);
 3449         if (ret)
 3450                 *vers = 0;
 3451         return ret;
 3452 }
 3453 
 3454 /**
 3455  *      t4_get_version_info - extract various chip/firmware version information
 3456  *      @adapter: the adapter
 3457  *
 3458  *      Reads various chip/firmware version numbers and stores them into the
 3459  *      adapter Adapter Parameters structure.  If any of the efforts fails
 3460  *      the first failure will be returned, but all of the version numbers
 3461  *      will be read.
 3462  */
 3463 int t4_get_version_info(struct adapter *adapter)
 3464 {
 3465         int ret = 0;
 3466 
 3467         #define FIRST_RET(__getvinfo) \
 3468         do { \
 3469                 int __ret = __getvinfo; \
 3470                 if (__ret && !ret) \
 3471                         ret = __ret; \
 3472         } while (0)
 3473 
 3474         FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
 3475         FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
 3476         FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
 3477         FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
 3478         FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
 3479         FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
 3480 
 3481         #undef FIRST_RET
 3482 
 3483         return ret;
 3484 }
 3485 
 3486 /**
 3487  *      t4_flash_erase_sectors - erase a range of flash sectors
 3488  *      @adapter: the adapter
 3489  *      @start: the first sector to erase
 3490  *      @end: the last sector to erase
 3491  *
 3492  *      Erases the sectors in the given inclusive range.
 3493  */
 3494 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
 3495 {
 3496         int ret = 0;
 3497 
 3498         if (end >= adapter->params.sf_nsec)
 3499                 return -EINVAL;
 3500 
 3501         while (start <= end) {
 3502                 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
 3503                     (ret = sf1_write(adapter, 4, 0, 1,
 3504                                      SF_ERASE_SECTOR | (start << 8))) != 0 ||
 3505                     (ret = flash_wait_op(adapter, 14, 500)) != 0) {
 3506                         CH_ERR(adapter,
 3507                                 "erase of flash sector %d failed, error %d\n",
 3508                                 start, ret);
 3509                         break;
 3510                 }
 3511                 start++;
 3512         }
 3513         t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
 3514         return ret;
 3515 }
 3516 
 3517 /**
 3518  *      t4_flash_cfg_addr - return the address of the flash configuration file
 3519  *      @adapter: the adapter
 3520  *
 3521  *      Return the address within the flash where the Firmware Configuration
 3522  *      File is stored, or an error if the device FLASH is too small to contain
 3523  *      a Firmware Configuration File.
 3524  */
 3525 int t4_flash_cfg_addr(struct adapter *adapter)
 3526 {
 3527         /*
 3528          * If the device FLASH isn't large enough to hold a Firmware
 3529          * Configuration File, return an error.
 3530          */
 3531         if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
 3532                 return -ENOSPC;
 3533 
 3534         return FLASH_CFG_START;
 3535 }
 3536 
 3537 /*
 3538  * Return TRUE if the specified firmware matches the adapter.  I.e. T4
 3539  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
 3540  * and emit an error message for mismatched firmware to save our caller the
 3541  * effort ...
 3542  */
 3543 static int t4_fw_matches_chip(struct adapter *adap,
 3544                               const struct fw_hdr *hdr)
 3545 {
 3546         /*
 3547          * The expression below will return FALSE for any unsupported adapter
 3548          * which will keep us "honest" in the future ...
 3549          */
 3550         if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
 3551             (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
 3552             (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
 3553                 return 1;
 3554 
 3555         CH_ERR(adap,
 3556                 "FW image (%d) is not suitable for this adapter (%d)\n",
 3557                 hdr->chip, chip_id(adap));
 3558         return 0;
 3559 }
 3560 
 3561 /**
 3562  *      t4_load_fw - download firmware
 3563  *      @adap: the adapter
 3564  *      @fw_data: the firmware image to write
 3565  *      @size: image size
 3566  *
 3567  *      Write the supplied firmware image to the card's serial flash.
 3568  */
 3569 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
 3570 {
 3571         u32 csum;
 3572         int ret, addr;
 3573         unsigned int i;
 3574         u8 first_page[SF_PAGE_SIZE];
 3575         const u32 *p = (const u32 *)fw_data;
 3576         const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
 3577         unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
 3578         unsigned int fw_start_sec;
 3579         unsigned int fw_start;
 3580         unsigned int fw_size;
 3581 
 3582         if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
 3583                 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
 3584                 fw_start = FLASH_FWBOOTSTRAP_START;
 3585                 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
 3586         } else {
 3587                 fw_start_sec = FLASH_FW_START_SEC;
 3588                 fw_start = FLASH_FW_START;
 3589                 fw_size = FLASH_FW_MAX_SIZE;
 3590         }
 3591 
 3592         if (!size) {
 3593                 CH_ERR(adap, "FW image has no data\n");
 3594                 return -EINVAL;
 3595         }
 3596         if (size & 511) {
 3597                 CH_ERR(adap,
 3598                         "FW image size not multiple of 512 bytes\n");
 3599                 return -EINVAL;
 3600         }
 3601         if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
 3602                 CH_ERR(adap,
 3603                         "FW image size differs from size in FW header\n");
 3604                 return -EINVAL;
 3605         }
 3606         if (size > fw_size) {
 3607                 CH_ERR(adap, "FW image too large, max is %u bytes\n",
 3608                         fw_size);
 3609                 return -EFBIG;
 3610         }
 3611         if (!t4_fw_matches_chip(adap, hdr))
 3612                 return -EINVAL;
 3613 
 3614         for (csum = 0, i = 0; i < size / sizeof(csum); i++)
 3615                 csum += be32_to_cpu(p[i]);
 3616 
 3617         if (csum != 0xffffffff) {
 3618                 CH_ERR(adap,
 3619                         "corrupted firmware image, checksum %#x\n", csum);
 3620                 return -EINVAL;
 3621         }
 3622 
 3623         i = DIV_ROUND_UP(size, sf_sec_size);    /* # of sectors spanned */
 3624         ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
 3625         if (ret)
 3626                 goto out;
 3627 
 3628         /*
 3629          * We write the correct version at the end so the driver can see a bad
 3630          * version if the FW write fails.  Start by writing a copy of the
 3631          * first page with a bad version.
 3632          */
 3633         memcpy(first_page, fw_data, SF_PAGE_SIZE);
 3634         ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
 3635         ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
 3636         if (ret)
 3637                 goto out;
 3638 
 3639         addr = fw_start;
 3640         for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
 3641                 addr += SF_PAGE_SIZE;
 3642                 fw_data += SF_PAGE_SIZE;
 3643                 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
 3644                 if (ret)
 3645                         goto out;
 3646         }
 3647 
 3648         ret = t4_write_flash(adap,
 3649                              fw_start + offsetof(struct fw_hdr, fw_ver),
 3650                              sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
 3651 out:
 3652         if (ret)
 3653                 CH_ERR(adap, "firmware download failed, error %d\n",
 3654                         ret);
 3655         return ret;
 3656 }
 3657 
 3658 /**
 3659  *      t4_fwcache - firmware cache operation
 3660  *      @adap: the adapter
 3661  *      @op  : the operation (flush or flush and invalidate)
 3662  */
 3663 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
 3664 {
 3665         struct fw_params_cmd c;
 3666 
 3667         memset(&c, 0, sizeof(c));
 3668         c.op_to_vfn =
 3669             cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
 3670                             F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 3671                                 V_FW_PARAMS_CMD_PFN(adap->pf) |
 3672                                 V_FW_PARAMS_CMD_VFN(0));
 3673         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 3674         c.param[0].mnem =
 3675             cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
 3676                             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
 3677         c.param[0].val = (__force __be32)op;
 3678 
 3679         return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
 3680 }
 3681 
 3682 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
 3683                         unsigned int *pif_req_wrptr,
 3684                         unsigned int *pif_rsp_wrptr)
 3685 {
 3686         int i, j;
 3687         u32 cfg, val, req, rsp;
 3688 
 3689         cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
 3690         if (cfg & F_LADBGEN)
 3691                 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
 3692 
 3693         val = t4_read_reg(adap, A_CIM_DEBUGSTS);
 3694         req = G_POLADBGWRPTR(val);
 3695         rsp = G_PILADBGWRPTR(val);
 3696         if (pif_req_wrptr)
 3697                 *pif_req_wrptr = req;
 3698         if (pif_rsp_wrptr)
 3699                 *pif_rsp_wrptr = rsp;
 3700 
 3701         for (i = 0; i < CIM_PIFLA_SIZE; i++) {
 3702                 for (j = 0; j < 6; j++) {
 3703                         t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
 3704                                      V_PILADBGRDPTR(rsp));
 3705                         *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
 3706                         *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
 3707                         req++;
 3708                         rsp++;
 3709                 }
 3710                 req = (req + 2) & M_POLADBGRDPTR;
 3711                 rsp = (rsp + 2) & M_PILADBGRDPTR;
 3712         }
 3713         t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
 3714 }
 3715 
 3716 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
 3717 {
 3718         u32 cfg;
 3719         int i, j, idx;
 3720 
 3721         cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
 3722         if (cfg & F_LADBGEN)
 3723                 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
 3724 
 3725         for (i = 0; i < CIM_MALA_SIZE; i++) {
 3726                 for (j = 0; j < 5; j++) {
 3727                         idx = 8 * i + j;
 3728                         t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
 3729                                      V_PILADBGRDPTR(idx));
 3730                         *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
 3731                         *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
 3732                 }
 3733         }
 3734         t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
 3735 }
 3736 
 3737 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
 3738 {
 3739         unsigned int i, j;
 3740 
 3741         for (i = 0; i < 8; i++) {
 3742                 u32 *p = la_buf + i;
 3743 
 3744                 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
 3745                 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
 3746                 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
 3747                 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
 3748                         *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
 3749         }
 3750 }
 3751 
 3752 /**
 3753  *      fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
 3754  *      @caps16: a 16-bit Port Capabilities value
 3755  *
 3756  *      Returns the equivalent 32-bit Port Capabilities value.
 3757  */
 3758 static uint32_t fwcaps16_to_caps32(uint16_t caps16)
 3759 {
 3760         uint32_t caps32 = 0;
 3761 
 3762         #define CAP16_TO_CAP32(__cap) \
 3763                 do { \
 3764                         if (caps16 & FW_PORT_CAP_##__cap) \
 3765                                 caps32 |= FW_PORT_CAP32_##__cap; \
 3766                 } while (0)
 3767 
 3768         CAP16_TO_CAP32(SPEED_100M);
 3769         CAP16_TO_CAP32(SPEED_1G);
 3770         CAP16_TO_CAP32(SPEED_25G);
 3771         CAP16_TO_CAP32(SPEED_10G);
 3772         CAP16_TO_CAP32(SPEED_40G);
 3773         CAP16_TO_CAP32(SPEED_100G);
 3774         CAP16_TO_CAP32(FC_RX);
 3775         CAP16_TO_CAP32(FC_TX);
 3776         CAP16_TO_CAP32(ANEG);
 3777         CAP16_TO_CAP32(FORCE_PAUSE);
 3778         CAP16_TO_CAP32(MDIAUTO);
 3779         CAP16_TO_CAP32(MDISTRAIGHT);
 3780         CAP16_TO_CAP32(FEC_RS);
 3781         CAP16_TO_CAP32(FEC_BASER_RS);
 3782         CAP16_TO_CAP32(802_3_PAUSE);
 3783         CAP16_TO_CAP32(802_3_ASM_DIR);
 3784 
 3785         #undef CAP16_TO_CAP32
 3786 
 3787         return caps32;
 3788 }
 3789 
 3790 /**
 3791  *      fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
 3792  *      @caps32: a 32-bit Port Capabilities value
 3793  *
 3794  *      Returns the equivalent 16-bit Port Capabilities value.  Note that
 3795  *      not all 32-bit Port Capabilities can be represented in the 16-bit
 3796  *      Port Capabilities and some fields/values may not make it.
 3797  */
 3798 static uint16_t fwcaps32_to_caps16(uint32_t caps32)
 3799 {
 3800         uint16_t caps16 = 0;
 3801 
 3802         #define CAP32_TO_CAP16(__cap) \
 3803                 do { \
 3804                         if (caps32 & FW_PORT_CAP32_##__cap) \
 3805                                 caps16 |= FW_PORT_CAP_##__cap; \
 3806                 } while (0)
 3807 
 3808         CAP32_TO_CAP16(SPEED_100M);
 3809         CAP32_TO_CAP16(SPEED_1G);
 3810         CAP32_TO_CAP16(SPEED_10G);
 3811         CAP32_TO_CAP16(SPEED_25G);
 3812         CAP32_TO_CAP16(SPEED_40G);
 3813         CAP32_TO_CAP16(SPEED_100G);
 3814         CAP32_TO_CAP16(FC_RX);
 3815         CAP32_TO_CAP16(FC_TX);
 3816         CAP32_TO_CAP16(802_3_PAUSE);
 3817         CAP32_TO_CAP16(802_3_ASM_DIR);
 3818         CAP32_TO_CAP16(ANEG);
 3819         CAP32_TO_CAP16(FORCE_PAUSE);
 3820         CAP32_TO_CAP16(MDIAUTO);
 3821         CAP32_TO_CAP16(MDISTRAIGHT);
 3822         CAP32_TO_CAP16(FEC_RS);
 3823         CAP32_TO_CAP16(FEC_BASER_RS);
 3824 
 3825         #undef CAP32_TO_CAP16
 3826 
 3827         return caps16;
 3828 }
 3829 
 3830 static int8_t fwcap_to_fec(uint32_t caps, bool unset_means_none)
 3831 {
 3832         int8_t fec = 0;
 3833 
 3834         if ((caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) == 0)
 3835                 return (unset_means_none ? FEC_NONE : 0);
 3836 
 3837         if (caps & FW_PORT_CAP32_FEC_RS)
 3838                 fec |= FEC_RS;
 3839         if (caps & FW_PORT_CAP32_FEC_BASER_RS)
 3840                 fec |= FEC_BASER_RS;
 3841         if (caps & FW_PORT_CAP32_FEC_NO_FEC)
 3842                 fec |= FEC_NONE;
 3843 
 3844         return (fec);
 3845 }
 3846 
 3847 /*
 3848  * Note that 0 is not translated to NO_FEC.
 3849  */
 3850 static uint32_t fec_to_fwcap(int8_t fec)
 3851 {
 3852         uint32_t caps = 0;
 3853 
 3854         /* Only real FECs allowed. */
 3855         MPASS((fec & ~M_FW_PORT_CAP32_FEC) == 0);
 3856 
 3857         if (fec & FEC_RS)
 3858                 caps |= FW_PORT_CAP32_FEC_RS;
 3859         if (fec & FEC_BASER_RS)
 3860                 caps |= FW_PORT_CAP32_FEC_BASER_RS;
 3861         if (fec & FEC_NONE)
 3862                 caps |= FW_PORT_CAP32_FEC_NO_FEC;
 3863 
 3864         return (caps);
 3865 }
 3866 
 3867 /**
 3868  *      t4_link_l1cfg - apply link configuration to MAC/PHY
 3869  *      @phy: the PHY to setup
 3870  *      @mac: the MAC to setup
 3871  *      @lc: the requested link configuration
 3872  *
 3873  *      Set up a port's MAC and PHY according to a desired link configuration.
 3874  *      - If the PHY can auto-negotiate first decide what to advertise, then
 3875  *        enable/disable auto-negotiation as desired, and reset.
 3876  *      - If the PHY does not auto-negotiate just reset it.
 3877  *      - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
 3878  *        otherwise do it later based on the outcome of auto-negotiation.
 3879  */
 3880 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
 3881                   struct link_config *lc)
 3882 {
 3883         struct fw_port_cmd c;
 3884         unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
 3885         unsigned int aneg, fc, fec, speed, rcap;
 3886 
 3887         fc = 0;
 3888         if (lc->requested_fc & PAUSE_RX)
 3889                 fc |= FW_PORT_CAP32_FC_RX;
 3890         if (lc->requested_fc & PAUSE_TX)
 3891                 fc |= FW_PORT_CAP32_FC_TX;
 3892         if (!(lc->requested_fc & PAUSE_AUTONEG))
 3893                 fc |= FW_PORT_CAP32_FORCE_PAUSE;
 3894 
 3895         if (lc->requested_aneg == AUTONEG_DISABLE)
 3896                 aneg = 0;
 3897         else if (lc->requested_aneg == AUTONEG_ENABLE)
 3898                 aneg = FW_PORT_CAP32_ANEG;
 3899         else
 3900                 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
 3901 
 3902         if (aneg) {
 3903                 speed = lc->pcaps &
 3904                     V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
 3905         } else if (lc->requested_speed != 0)
 3906                 speed = speed_to_fwcap(lc->requested_speed);
 3907         else
 3908                 speed = fwcap_top_speed(lc->pcaps);
 3909 
 3910         fec = 0;
 3911         if (fec_supported(speed)) {
 3912                 int force_fec;
 3913 
 3914                 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
 3915                         force_fec = lc->force_fec;
 3916                 else
 3917                         force_fec = 0;
 3918 
 3919                 if (lc->requested_fec == FEC_AUTO) {
 3920                         if (force_fec > 0) {
 3921                                 /*
 3922                                  * Must use FORCE_FEC even though requested FEC
 3923                                  * is AUTO. Set all the FEC bits valid for the
 3924                                  * speed and let the firmware pick one.
 3925                                  */
 3926                                 fec |= FW_PORT_CAP32_FORCE_FEC;
 3927                                 if (speed & FW_PORT_CAP32_SPEED_100G) {
 3928                                         fec |= FW_PORT_CAP32_FEC_RS;
 3929                                         fec |= FW_PORT_CAP32_FEC_NO_FEC;
 3930                                 } else if (speed & FW_PORT_CAP32_SPEED_50G) {
 3931                                         fec |= FW_PORT_CAP32_FEC_BASER_RS;
 3932                                         fec |= FW_PORT_CAP32_FEC_NO_FEC;
 3933                                 } else {
 3934                                         fec |= FW_PORT_CAP32_FEC_RS;
 3935                                         fec |= FW_PORT_CAP32_FEC_BASER_RS;
 3936                                         fec |= FW_PORT_CAP32_FEC_NO_FEC;
 3937                                 }
 3938                         } else {
 3939                                 /*
 3940                                  * Set only 1b. Old firmwares can't deal with
 3941                                  * multiple bits and new firmwares are free to
 3942                                  * ignore this and try whatever FECs they want
 3943                                  * because we aren't setting FORCE_FEC here.
 3944                                  */
 3945                                 fec |= fec_to_fwcap(lc->fec_hint);
 3946                                 MPASS(powerof2(fec));
 3947 
 3948                                 /*
 3949                                  * Override the hint if the FEC is not valid for
 3950                                  * the potential top speed.  Request the best
 3951                                  * FEC at that speed instead.
 3952                                  */
 3953                                 if (speed & FW_PORT_CAP32_SPEED_100G) {
 3954                                         if (fec == FW_PORT_CAP32_FEC_BASER_RS)
 3955                                                 fec = FW_PORT_CAP32_FEC_RS;
 3956                                 } else if (speed & FW_PORT_CAP32_SPEED_50G) {
 3957                                         if (fec == FW_PORT_CAP32_FEC_RS)
 3958                                                 fec = FW_PORT_CAP32_FEC_BASER_RS;
 3959                                 }
 3960                         }
 3961                 } else {
 3962                         /*
 3963                          * User has explicitly requested some FEC(s). Set
 3964                          * FORCE_FEC unless prohibited from using it.
 3965                          */
 3966                         if (force_fec != 0)
 3967                                 fec |= FW_PORT_CAP32_FORCE_FEC;
 3968                         fec |= fec_to_fwcap(lc->requested_fec &
 3969                             M_FW_PORT_CAP32_FEC);
 3970                         if (lc->requested_fec & FEC_MODULE)
 3971                                 fec |= fec_to_fwcap(lc->fec_hint);
 3972                 }
 3973 
 3974                 /*
 3975                  * This is for compatibility with old firmwares. The original
 3976                  * way to request NO_FEC was to not set any of the FEC bits. New
 3977                  * firmwares understand this too.
 3978                  */
 3979                 if (fec == FW_PORT_CAP32_FEC_NO_FEC)
 3980                         fec = 0;
 3981         }
 3982 
 3983         /* Force AN on for BT cards. */
 3984         if (isset(&adap->bt_map, port))
 3985                 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
 3986 
 3987         rcap = aneg | speed | fc | fec;
 3988         if ((rcap | lc->pcaps) != lc->pcaps) {
 3989 #ifdef INVARIANTS
 3990                 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x, removed 0x%x\n", rcap,
 3991                     lc->pcaps, rcap & (rcap ^ lc->pcaps));
 3992 #endif
 3993                 rcap &= lc->pcaps;
 3994         }
 3995         rcap |= mdi;
 3996 
 3997         memset(&c, 0, sizeof(c));
 3998         c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
 3999                                      F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 4000                                      V_FW_PORT_CMD_PORTID(port));
 4001         if (adap->params.port_caps32) {
 4002                 c.action_to_len16 =
 4003                     cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
 4004                         FW_LEN16(c));
 4005                 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
 4006         } else {
 4007                 c.action_to_len16 =
 4008                     cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
 4009                             FW_LEN16(c));
 4010                 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
 4011         }
 4012 
 4013         lc->requested_caps = rcap;
 4014         return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
 4015 }
 4016 
 4017 /**
 4018  *      t4_restart_aneg - restart autonegotiation
 4019  *      @adap: the adapter
 4020  *      @mbox: mbox to use for the FW command
 4021  *      @port: the port id
 4022  *
 4023  *      Restarts autonegotiation for the selected port.
 4024  */
 4025 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
 4026 {
 4027         struct fw_port_cmd c;
 4028 
 4029         memset(&c, 0, sizeof(c));
 4030         c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
 4031                                      F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 4032                                      V_FW_PORT_CMD_PORTID(port));
 4033         c.action_to_len16 =
 4034                 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
 4035                             FW_LEN16(c));
 4036         c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
 4037         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 4038 }
 4039 
 4040 struct intr_details {
 4041         u32 mask;
 4042         const char *msg;
 4043 };
 4044 
 4045 struct intr_action {
 4046         u32 mask;
 4047         int arg;
 4048         bool (*action)(struct adapter *, int, bool);
 4049 };
 4050 
 4051 #define NONFATAL_IF_DISABLED 1
 4052 struct intr_info {
 4053         const char *name;       /* name of the INT_CAUSE register */
 4054         int cause_reg;          /* INT_CAUSE register */
 4055         int enable_reg;         /* INT_ENABLE register */
 4056         u32 fatal;              /* bits that are fatal */
 4057         int flags;              /* hints */
 4058         const struct intr_details *details;
 4059         const struct intr_action *actions;
 4060 };
 4061 
 4062 static inline char
 4063 intr_alert_char(u32 cause, u32 enable, u32 fatal)
 4064 {
 4065 
 4066         if (cause & fatal)
 4067                 return ('!');
 4068         if (cause & enable)
 4069                 return ('*');
 4070         return ('-');
 4071 }
 4072 
 4073 static void
 4074 t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
 4075 {
 4076         u32 enable, fatal, leftover;
 4077         const struct intr_details *details;
 4078         char alert;
 4079 
 4080         enable = t4_read_reg(adap, ii->enable_reg);
 4081         if (ii->flags & NONFATAL_IF_DISABLED)
 4082                 fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg);
 4083         else
 4084                 fatal = ii->fatal;
 4085         alert = intr_alert_char(cause, enable, fatal);
 4086         CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
 4087             alert, ii->name, ii->cause_reg, cause, enable, fatal);
 4088 
 4089         leftover = cause;
 4090         for (details = ii->details; details && details->mask != 0; details++) {
 4091                 u32 msgbits = details->mask & cause;
 4092                 if (msgbits == 0)
 4093                         continue;
 4094                 alert = intr_alert_char(msgbits, enable, ii->fatal);
 4095                 CH_ALERT(adap, "  %c [0x%08x] %s\n", alert, msgbits,
 4096                     details->msg);
 4097                 leftover &= ~msgbits;
 4098         }
 4099         if (leftover != 0 && leftover != cause)
 4100                 CH_ALERT(adap, "  ? [0x%08x]\n", leftover);
 4101 }
 4102 
 4103 /*
 4104  * Returns true for fatal error.
 4105  */
 4106 static bool
 4107 t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
 4108     u32 additional_cause, bool verbose)
 4109 {
 4110         u32 cause, fatal;
 4111         bool rc;
 4112         const struct intr_action *action;
 4113 
 4114         /*
 4115          * Read and display cause.  Note that the top level PL_INT_CAUSE is a
 4116          * bit special and we need to completely ignore the bits that are not in
 4117          * PL_INT_ENABLE.
 4118          */
 4119         cause = t4_read_reg(adap, ii->cause_reg);
 4120         if (ii->cause_reg == A_PL_INT_CAUSE)
 4121                 cause &= t4_read_reg(adap, ii->enable_reg);
 4122         if (verbose || cause != 0)
 4123                 t4_show_intr_info(adap, ii, cause);
 4124         fatal = cause & ii->fatal;
 4125         if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED)
 4126                 fatal &= t4_read_reg(adap, ii->enable_reg);
 4127         cause |= additional_cause;
 4128         if (cause == 0)
 4129                 return (false);
 4130 
 4131         rc = fatal != 0;
 4132         for (action = ii->actions; action && action->mask != 0; action++) {
 4133                 if (!(action->mask & cause))
 4134                         continue;
 4135                 rc |= (action->action)(adap, action->arg, verbose);
 4136         }
 4137 
 4138         /* clear */
 4139         t4_write_reg(adap, ii->cause_reg, cause);
 4140         (void)t4_read_reg(adap, ii->cause_reg);
 4141 
 4142         return (rc);
 4143 }
 4144 
 4145 /*
 4146  * Interrupt handler for the PCIE module.
 4147  */
 4148 static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
 4149 {
 4150         static const struct intr_details sysbus_intr_details[] = {
 4151                 { F_RNPP, "RXNP array parity error" },
 4152                 { F_RPCP, "RXPC array parity error" },
 4153                 { F_RCIP, "RXCIF array parity error" },
 4154                 { F_RCCP, "Rx completions control array parity error" },
 4155                 { F_RFTP, "RXFT array parity error" },
 4156                 { 0 }
 4157         };
 4158         static const struct intr_info sysbus_intr_info = {
 4159                 .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS",
 4160                 .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
 4161                 .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE,
 4162                 .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP,
 4163                 .flags = 0,
 4164                 .details = sysbus_intr_details,
 4165                 .actions = NULL,
 4166         };
 4167         static const struct intr_details pcie_port_intr_details[] = {
 4168                 { F_TPCP, "TXPC array parity error" },
 4169                 { F_TNPP, "TXNP array parity error" },
 4170                 { F_TFTP, "TXFT array parity error" },
 4171                 { F_TCAP, "TXCA array parity error" },
 4172                 { F_TCIP, "TXCIF array parity error" },
 4173                 { F_RCAP, "RXCA array parity error" },
 4174                 { F_OTDD, "outbound request TLP discarded" },
 4175                 { F_RDPE, "Rx data parity error" },
 4176                 { F_TDUE, "Tx uncorrectable data error" },
 4177                 { 0 }
 4178         };
 4179         static const struct intr_info pcie_port_intr_info = {
 4180                 .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS",
 4181                 .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
 4182                 .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE,
 4183                 .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP |
 4184                     F_OTDD | F_RDPE | F_TDUE,
 4185                 .flags = 0,
 4186                 .details = pcie_port_intr_details,
 4187                 .actions = NULL,
 4188         };
 4189         static const struct intr_details pcie_intr_details[] = {
 4190                 { F_MSIADDRLPERR, "MSI AddrL parity error" },
 4191                 { F_MSIADDRHPERR, "MSI AddrH parity error" },
 4192                 { F_MSIDATAPERR, "MSI data parity error" },
 4193                 { F_MSIXADDRLPERR, "MSI-X AddrL parity error" },
 4194                 { F_MSIXADDRHPERR, "MSI-X AddrH parity error" },
 4195                 { F_MSIXDATAPERR, "MSI-X data parity error" },
 4196                 { F_MSIXDIPERR, "MSI-X DI parity error" },
 4197                 { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" },
 4198                 { F_PIOREQPERR, "PCIe PIO request FIFO parity error" },
 4199                 { F_TARTAGPERR, "PCIe target tag FIFO parity error" },
 4200                 { F_CCNTPERR, "PCIe CMD channel count parity error" },
 4201                 { F_CREQPERR, "PCIe CMD channel request parity error" },
 4202                 { F_CRSPPERR, "PCIe CMD channel response parity error" },
 4203                 { F_DCNTPERR, "PCIe DMA channel count parity error" },
 4204                 { F_DREQPERR, "PCIe DMA channel request parity error" },
 4205                 { F_DRSPPERR, "PCIe DMA channel response parity error" },
 4206                 { F_HCNTPERR, "PCIe HMA channel count parity error" },
 4207                 { F_HREQPERR, "PCIe HMA channel request parity error" },
 4208                 { F_HRSPPERR, "PCIe HMA channel response parity error" },
 4209                 { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" },
 4210                 { F_FIDPERR, "PCIe FID parity error" },
 4211                 { F_INTXCLRPERR, "PCIe INTx clear parity error" },
 4212                 { F_MATAGPERR, "PCIe MA tag parity error" },
 4213                 { F_PIOTAGPERR, "PCIe PIO tag parity error" },
 4214                 { F_RXCPLPERR, "PCIe Rx completion parity error" },
 4215                 { F_RXWRPERR, "PCIe Rx write parity error" },
 4216                 { F_RPLPERR, "PCIe replay buffer parity error" },
 4217                 { F_PCIESINT, "PCIe core secondary fault" },
 4218                 { F_PCIEPINT, "PCIe core primary fault" },
 4219                 { F_UNXSPLCPLERR, "PCIe unexpected split completion error" },
 4220                 { 0 }
 4221         };
 4222         static const struct intr_details t5_pcie_intr_details[] = {
 4223                 { F_IPGRPPERR, "Parity errors observed by IP" },
 4224                 { F_NONFATALERR, "PCIe non-fatal error" },
 4225                 { F_READRSPERR, "Outbound read error" },
 4226                 { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" },
 4227                 { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" },
 4228                 { F_IPRETRYPERR, "PCIe IP replay buffer parity error" },
 4229                 { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" },
 4230                 { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" },
 4231                 { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" },
 4232                 { F_MAGRPPERR, "MA group FIFO parity error" },
 4233                 { F_VFIDPERR, "VFID SRAM parity error" },
 4234                 { F_FIDPERR, "FID SRAM parity error" },
 4235                 { F_CFGSNPPERR, "config snoop FIFO parity error" },
 4236                 { F_HRSPPERR, "HMA channel response data SRAM parity error" },
 4237                 { F_HREQRDPERR, "HMA channel read request SRAM parity error" },
 4238                 { F_HREQWRPERR, "HMA channel write request SRAM parity error" },
 4239                 { F_DRSPPERR, "DMA channel response data SRAM parity error" },
 4240                 { F_DREQRDPERR, "DMA channel write request SRAM parity error" },
 4241                 { F_CRSPPERR, "CMD channel response data SRAM parity error" },
 4242                 { F_CREQRDPERR, "CMD channel read request SRAM parity error" },
 4243                 { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" },
 4244                 { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" },
 4245                 { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" },
 4246                 { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" },
 4247                 { F_MSIXDIPERR, "MSI-X DI SRAM parity error" },
 4248                 { F_MSIXDATAPERR, "MSI-X data SRAM parity error" },
 4249                 { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" },
 4250                 { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" },
 4251                 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" },
 4252                 { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" },
 4253                 { F_MSTGRPPERR, "Master response read queue SRAM parity error" },
 4254                 { 0 }
 4255         };
 4256         struct intr_info pcie_intr_info = {
 4257                 .name = "PCIE_INT_CAUSE",
 4258                 .cause_reg = A_PCIE_INT_CAUSE,
 4259                 .enable_reg = A_PCIE_INT_ENABLE,
 4260                 .fatal = 0xffffffff,
 4261                 .flags = NONFATAL_IF_DISABLED,
 4262                 .details = NULL,
 4263                 .actions = NULL,
 4264         };
 4265         bool fatal = false;
 4266 
 4267         if (is_t4(adap)) {
 4268                 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
 4269                 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
 4270 
 4271                 pcie_intr_info.details = pcie_intr_details;
 4272         } else {
 4273                 pcie_intr_info.details = t5_pcie_intr_details;
 4274         }
 4275         fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
 4276 
 4277         return (fatal);
 4278 }
 4279 
 4280 /*
 4281  * TP interrupt handler.
 4282  */
 4283 static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
 4284 {
 4285         static const struct intr_details tp_intr_details[] = {
 4286                 { 0x3fffffff, "TP parity error" },
 4287                 { F_FLMTXFLSTEMPTY, "TP out of Tx pages" },
 4288                 { 0 }
 4289         };
 4290         static const struct intr_info tp_intr_info = {
 4291                 .name = "TP_INT_CAUSE",
 4292                 .cause_reg = A_TP_INT_CAUSE,
 4293                 .enable_reg = A_TP_INT_ENABLE,
 4294                 .fatal = 0x7fffffff,
 4295                 .flags = NONFATAL_IF_DISABLED,
 4296                 .details = tp_intr_details,
 4297                 .actions = NULL,
 4298         };
 4299 
 4300         return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
 4301 }
 4302 
 4303 /*
 4304  * SGE interrupt handler.
 4305  */
 4306 static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
 4307 {
 4308         static const struct intr_info sge_int1_info = {
 4309                 .name = "SGE_INT_CAUSE1",
 4310                 .cause_reg = A_SGE_INT_CAUSE1,
 4311                 .enable_reg = A_SGE_INT_ENABLE1,
 4312                 .fatal = 0xffffffff,
 4313                 .flags = NONFATAL_IF_DISABLED,
 4314                 .details = NULL,
 4315                 .actions = NULL,
 4316         };
 4317         static const struct intr_info sge_int2_info = {
 4318                 .name = "SGE_INT_CAUSE2",
 4319                 .cause_reg = A_SGE_INT_CAUSE2,
 4320                 .enable_reg = A_SGE_INT_ENABLE2,
 4321                 .fatal = 0xffffffff,
 4322                 .flags = NONFATAL_IF_DISABLED,
 4323                 .details = NULL,
 4324                 .actions = NULL,
 4325         };
 4326         static const struct intr_details sge_int3_details[] = {
 4327                 { F_ERR_FLM_DBP,
 4328                         "DBP pointer delivery for invalid context or QID" },
 4329                 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
 4330                         "Invalid QID or header request by IDMA" },
 4331                 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
 4332                 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
 4333                 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
 4334                 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
 4335                 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
 4336                 { F_ERR_TIMER_ABOVE_MAX_QID,
 4337                         "SGE GTS with timer 0-5 for IQID > 1023" },
 4338                 { F_ERR_CPL_EXCEED_IQE_SIZE,
 4339                         "SGE received CPL exceeding IQE size" },
 4340                 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
 4341                 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
 4342                 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
 4343                 { F_ERR_DROPPED_DB, "SGE DB dropped" },
 4344                 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
 4345                   "SGE IQID > 1023 received CPL for FL" },
 4346                 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
 4347                         F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
 4348                 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
 4349                 { F_ERR_ING_CTXT_PRIO,
 4350                         "Ingress context manager priority user error" },
 4351                 { F_ERR_EGR_CTXT_PRIO,
 4352                         "Egress context manager priority user error" },
 4353                 { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" },
 4354                 { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" },
 4355                 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
 4356                 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
 4357                 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
 4358                 { 0x0000000f, "SGE context access for invalid queue" },
 4359                 { 0 }
 4360         };
 4361         static const struct intr_details t6_sge_int3_details[] = {
 4362                 { F_ERR_FLM_DBP,
 4363                         "DBP pointer delivery for invalid context or QID" },
 4364                 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
 4365                         "Invalid QID or header request by IDMA" },
 4366                 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
 4367                 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
 4368                 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
 4369                 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
 4370                 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
 4371                 { F_ERR_TIMER_ABOVE_MAX_QID,
 4372                         "SGE GTS with timer 0-5 for IQID > 1023" },
 4373                 { F_ERR_CPL_EXCEED_IQE_SIZE,
 4374                         "SGE received CPL exceeding IQE size" },
 4375                 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
 4376                 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
 4377                 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
 4378                 { F_ERR_DROPPED_DB, "SGE DB dropped" },
 4379                 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
 4380                         "SGE IQID > 1023 received CPL for FL" },
 4381                 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
 4382                         F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
 4383                 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
 4384                 { F_ERR_ING_CTXT_PRIO,
 4385                         "Ingress context manager priority user error" },
 4386                 { F_ERR_EGR_CTXT_PRIO,
 4387                         "Egress context manager priority user error" },
 4388                 { F_DBP_TBUF_FULL, "SGE DBP tbuf full" },
 4389                 { F_FATAL_WRE_LEN,
 4390                         "SGE WRE packet less than advertized length" },
 4391                 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
 4392                 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
 4393                 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
 4394                 { 0x0000000f, "SGE context access for invalid queue" },
 4395                 { 0 }
 4396         };
 4397         struct intr_info sge_int3_info = {
 4398                 .name = "SGE_INT_CAUSE3",
 4399                 .cause_reg = A_SGE_INT_CAUSE3,
 4400                 .enable_reg = A_SGE_INT_ENABLE3,
 4401                 .fatal = F_ERR_CPL_EXCEED_IQE_SIZE,
 4402                 .flags = 0,
 4403                 .details = NULL,
 4404                 .actions = NULL,
 4405         };
 4406         static const struct intr_info sge_int4_info = {
 4407                 .name = "SGE_INT_CAUSE4",
 4408                 .cause_reg = A_SGE_INT_CAUSE4,
 4409                 .enable_reg = A_SGE_INT_ENABLE4,
 4410                 .fatal = 0,
 4411                 .flags = 0,
 4412                 .details = NULL,
 4413                 .actions = NULL,
 4414         };
 4415         static const struct intr_info sge_int5_info = {
 4416                 .name = "SGE_INT_CAUSE5",
 4417                 .cause_reg = A_SGE_INT_CAUSE5,
 4418                 .enable_reg = A_SGE_INT_ENABLE5,
 4419                 .fatal = 0xffffffff,
 4420                 .flags = NONFATAL_IF_DISABLED,
 4421                 .details = NULL,
 4422                 .actions = NULL,
 4423         };
 4424         static const struct intr_info sge_int6_info = {
 4425                 .name = "SGE_INT_CAUSE6",
 4426                 .cause_reg = A_SGE_INT_CAUSE6,
 4427                 .enable_reg = A_SGE_INT_ENABLE6,
 4428                 .fatal = 0,
 4429                 .flags = 0,
 4430                 .details = NULL,
 4431                 .actions = NULL,
 4432         };
 4433 
 4434         bool fatal;
 4435         u32 v;
 4436 
 4437         if (chip_id(adap) <= CHELSIO_T5) {
 4438                 sge_int3_info.details = sge_int3_details;
 4439         } else {
 4440                 sge_int3_info.details = t6_sge_int3_details;
 4441         }
 4442 
 4443         fatal = false;
 4444         fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
 4445         fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
 4446         fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
 4447         fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
 4448         if (chip_id(adap) >= CHELSIO_T5)
 4449                 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
 4450         if (chip_id(adap) >= CHELSIO_T6)
 4451                 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
 4452 
 4453         v = t4_read_reg(adap, A_SGE_ERROR_STATS);
 4454         if (v & F_ERROR_QID_VALID) {
 4455                 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v));
 4456                 if (v & F_UNCAPTURED_ERROR)
 4457                         CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n");
 4458                 t4_write_reg(adap, A_SGE_ERROR_STATS,
 4459                     F_ERROR_QID_VALID | F_UNCAPTURED_ERROR);
 4460         }
 4461 
 4462         return (fatal);
 4463 }
 4464 
 4465 /*
 4466  * CIM interrupt handler.
 4467  */
 4468 static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
 4469 {
 4470         static const struct intr_details cim_host_intr_details[] = {
 4471                 /* T6+ */
 4472                 { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" },
 4473 
 4474                 /* T5+ */
 4475                 { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" },
 4476                 { F_PLCIM_MSTRSPDATAPARERR,
 4477                         "PL2CIM master response data parity error" },
 4478                 { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" },
 4479                 { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" },
 4480                 { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" },
 4481                 { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" },
 4482                 { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" },
 4483                 { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" },
 4484 
 4485                 /* T4+ */
 4486                 { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" },
 4487                 { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" },
 4488                 { F_MBHOSTPARERR, "CIM mailbox host read parity error" },
 4489                 { F_MBUPPARERR, "CIM mailbox uP parity error" },
 4490                 { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" },
 4491                 { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" },
 4492                 { F_IBQULPPARERR, "CIM IBQ ULP parity error" },
 4493                 { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" },
 4494                 { F_IBQSGEHIPARERR | F_IBQPCIEPARERR,   /* same bit */
 4495                         "CIM IBQ PCIe/SGE_HI parity error" },
 4496                 { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" },
 4497                 { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" },
 4498                 { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" },
 4499                 { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" },
 4500                 { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" },
 4501                 { F_OBQSGEPARERR, "CIM OBQ SGE parity error" },
 4502                 { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" },
 4503                 { F_TIMER1INT, "CIM TIMER0 interrupt" },
 4504                 { F_TIMER0INT, "CIM TIMER0 interrupt" },
 4505                 { F_PREFDROPINT, "CIM control register prefetch drop" },
 4506                 { 0}
 4507         };
 4508         static const struct intr_info cim_host_intr_info = {
 4509                 .name = "CIM_HOST_INT_CAUSE",
 4510                 .cause_reg = A_CIM_HOST_INT_CAUSE,
 4511                 .enable_reg = A_CIM_HOST_INT_ENABLE,
 4512                 .fatal = 0x007fffe6,
 4513                 .flags = NONFATAL_IF_DISABLED,
 4514                 .details = cim_host_intr_details,
 4515                 .actions = NULL,
 4516         };
 4517         static const struct intr_details cim_host_upacc_intr_details[] = {
 4518                 { F_EEPROMWRINT, "CIM EEPROM came out of busy state" },
 4519                 { F_TIMEOUTMAINT, "CIM PIF MA timeout" },
 4520                 { F_TIMEOUTINT, "CIM PIF timeout" },
 4521                 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" },
 4522                 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" },
 4523                 { F_BLKWRPLINT, "CIM block write to PL space" },
 4524                 { F_BLKRDPLINT, "CIM block read from PL space" },
 4525                 { F_SGLWRPLINT,
 4526                         "CIM single write to PL space with illegal BEs" },
 4527                 { F_SGLRDPLINT,
 4528                         "CIM single read from PL space with illegal BEs" },
 4529                 { F_BLKWRCTLINT, "CIM block write to CTL space" },
 4530                 { F_BLKRDCTLINT, "CIM block read from CTL space" },
 4531                 { F_SGLWRCTLINT,
 4532                         "CIM single write to CTL space with illegal BEs" },
 4533                 { F_SGLRDCTLINT,
 4534                         "CIM single read from CTL space with illegal BEs" },
 4535                 { F_BLKWREEPROMINT, "CIM block write to EEPROM space" },
 4536                 { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" },
 4537                 { F_SGLWREEPROMINT,
 4538                         "CIM single write to EEPROM space with illegal BEs" },
 4539                 { F_SGLRDEEPROMINT,
 4540                         "CIM single read from EEPROM space with illegal BEs" },
 4541                 { F_BLKWRFLASHINT, "CIM block write to flash space" },
 4542                 { F_BLKRDFLASHINT, "CIM block read from flash space" },
 4543                 { F_SGLWRFLASHINT, "CIM single write to flash space" },
 4544                 { F_SGLRDFLASHINT,
 4545                         "CIM single read from flash space with illegal BEs" },
 4546                 { F_BLKWRBOOTINT, "CIM block write to boot space" },
 4547                 { F_BLKRDBOOTINT, "CIM block read from boot space" },
 4548                 { F_SGLWRBOOTINT, "CIM single write to boot space" },
 4549                 { F_SGLRDBOOTINT,
 4550                         "CIM single read from boot space with illegal BEs" },
 4551                 { F_ILLWRBEINT, "CIM illegal write BEs" },
 4552                 { F_ILLRDBEINT, "CIM illegal read BEs" },
 4553                 { F_ILLRDINT, "CIM illegal read" },
 4554                 { F_ILLWRINT, "CIM illegal write" },
 4555                 { F_ILLTRANSINT, "CIM illegal transaction" },
 4556                 { F_RSVDSPACEINT, "CIM reserved space access" },
 4557                 {0}
 4558         };
 4559         static const struct intr_info cim_host_upacc_intr_info = {
 4560                 .name = "CIM_HOST_UPACC_INT_CAUSE",
 4561                 .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
 4562                 .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
 4563                 .fatal = 0x3fffeeff,
 4564                 .flags = NONFATAL_IF_DISABLED,
 4565                 .details = cim_host_upacc_intr_details,
 4566                 .actions = NULL,
 4567         };
 4568         static const struct intr_info cim_pf_host_intr_info = {
 4569                 .name = "CIM_PF_HOST_INT_CAUSE",
 4570                 .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
 4571                 .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE),
 4572                 .fatal = 0,
 4573                 .flags = 0,
 4574                 .details = NULL,
 4575                 .actions = NULL,
 4576         };
 4577         u32 val, fw_err;
 4578         bool fatal;
 4579 
 4580         /*
 4581          * When the Firmware detects an internal error which normally wouldn't
 4582          * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
 4583          * to make sure the Host sees the Firmware Crash.  So if we have a
 4584          * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
 4585          * interrupt.
 4586          */
 4587         fw_err = t4_read_reg(adap, A_PCIE_FW);
 4588         val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE);
 4589         if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) ||
 4590             G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) {
 4591                 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT);
 4592         }
 4593 
 4594         fatal = (fw_err & F_PCIE_FW_ERR) != 0;
 4595         fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
 4596         fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
 4597         fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
 4598         if (fatal)
 4599                 t4_os_cim_err(adap);
 4600 
 4601         return (fatal);
 4602 }
 4603 
 4604 /*
 4605  * ULP RX interrupt handler.
 4606  */
 4607 static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
 4608 {
 4609         static const struct intr_details ulprx_intr_details[] = {
 4610                 /* T5+ */
 4611                 { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" },
 4612                 { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" },
 4613 
 4614                 /* T4+ */
 4615                 { F_CAUSE_CTX_1, "ULPRX channel 1 context error" },
 4616                 { F_CAUSE_CTX_0, "ULPRX channel 0 context error" },
 4617                 { 0x007fffff, "ULPRX parity error" },
 4618                 { 0 }
 4619         };
 4620         static const struct intr_info ulprx_intr_info = {
 4621                 .name = "ULP_RX_INT_CAUSE",
 4622                 .cause_reg = A_ULP_RX_INT_CAUSE,
 4623                 .enable_reg = A_ULP_RX_INT_ENABLE,
 4624                 .fatal = 0x07ffffff,
 4625                 .flags = NONFATAL_IF_DISABLED,
 4626                 .details = ulprx_intr_details,
 4627                 .actions = NULL,
 4628         };
 4629         static const struct intr_info ulprx_intr2_info = {
 4630                 .name = "ULP_RX_INT_CAUSE_2",
 4631                 .cause_reg = A_ULP_RX_INT_CAUSE_2,
 4632                 .enable_reg = A_ULP_RX_INT_ENABLE_2,
 4633                 .fatal = 0,
 4634                 .flags = 0,
 4635                 .details = NULL,
 4636                 .actions = NULL,
 4637         };
 4638         bool fatal = false;
 4639 
 4640         fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
 4641         fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
 4642 
 4643         return (fatal);
 4644 }
 4645 
 4646 /*
 4647  * ULP TX interrupt handler.
 4648  */
 4649 static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
 4650 {
 4651         static const struct intr_details ulptx_intr_details[] = {
 4652                 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
 4653                 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" },
 4654                 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" },
 4655                 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" },
 4656                 { 0x0fffffff, "ULPTX parity error" },
 4657                 { 0 }
 4658         };
 4659         static const struct intr_info ulptx_intr_info = {
 4660                 .name = "ULP_TX_INT_CAUSE",
 4661                 .cause_reg = A_ULP_TX_INT_CAUSE,
 4662                 .enable_reg = A_ULP_TX_INT_ENABLE,
 4663                 .fatal = 0x0fffffff,
 4664                 .flags = NONFATAL_IF_DISABLED,
 4665                 .details = ulptx_intr_details,
 4666                 .actions = NULL,
 4667         };
 4668         static const struct intr_info ulptx_intr2_info = {
 4669                 .name = "ULP_TX_INT_CAUSE_2",
 4670                 .cause_reg = A_ULP_TX_INT_CAUSE_2,
 4671                 .enable_reg = A_ULP_TX_INT_ENABLE_2,
 4672                 .fatal = 0xf0,
 4673                 .flags = NONFATAL_IF_DISABLED,
 4674                 .details = NULL,
 4675                 .actions = NULL,
 4676         };
 4677         bool fatal = false;
 4678 
 4679         fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
 4680         fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
 4681 
 4682         return (fatal);
 4683 }
 4684 
 4685 static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
 4686 {
 4687         int i;
 4688         u32 data[17];
 4689 
 4690         t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
 4691             ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
 4692         for (i = 0; i < ARRAY_SIZE(data); i++) {
 4693                 CH_ALERT(adap, "  - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i,
 4694                     A_PM_TX_DBG_STAT0 + i, data[i]);
 4695         }
 4696 
 4697         return (false);
 4698 }
 4699 
 4700 /*
 4701  * PM TX interrupt handler.
 4702  */
 4703 static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
 4704 {
 4705         static const struct intr_action pmtx_intr_actions[] = {
 4706                 { 0xffffffff, 0, pmtx_dump_dbg_stats },
 4707                 { 0 },
 4708         };
 4709         static const struct intr_details pmtx_intr_details[] = {
 4710                 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
 4711                 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
 4712                 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
 4713                 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" },
 4714                 { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" },
 4715                 { 0x00f00000, "PMTX icspi FIFO Rx framing error" },
 4716                 { 0x000f0000, "PMTX icspi FIFO Tx framing error" },
 4717                 { 0x0000f000, "PMTX oespi FIFO Rx framing error" },
 4718                 { 0x00000f00, "PMTX oespi FIFO Tx framing error" },
 4719                 { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" },
 4720                 { F_OESPI_PAR_ERROR, "PMTX oespi parity error" },
 4721                 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" },
 4722                 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" },
 4723                 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
 4724                 { 0 }
 4725         };
 4726         static const struct intr_info pmtx_intr_info = {
 4727                 .name = "PM_TX_INT_CAUSE",
 4728                 .cause_reg = A_PM_TX_INT_CAUSE,
 4729                 .enable_reg = A_PM_TX_INT_ENABLE,
 4730                 .fatal = 0xffffffff,
 4731                 .flags = 0,
 4732                 .details = pmtx_intr_details,
 4733                 .actions = pmtx_intr_actions,
 4734         };
 4735 
 4736         return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
 4737 }
 4738 
 4739 /*
 4740  * PM RX interrupt handler.
 4741  */
 4742 static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
 4743 {
 4744         static const struct intr_details pmrx_intr_details[] = {
 4745                 /* T6+ */
 4746                 { 0x18000000, "PMRX ospi overflow" },
 4747                 { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
 4748                 { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" },
 4749                 { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" },
 4750                 { F_SDC_ERR, "PMRX SDC error" },
 4751 
 4752                 /* T4+ */
 4753                 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" },
 4754                 { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" },
 4755                 { 0x0003c000, "PMRX iespi Rx framing error" },
 4756                 { 0x00003c00, "PMRX iespi Tx framing error" },
 4757                 { 0x00000300, "PMRX ocspi Rx framing error" },
 4758                 { 0x000000c0, "PMRX ocspi Tx framing error" },
 4759                 { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" },
 4760                 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" },
 4761                 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" },
 4762                 { F_IESPI_PAR_ERROR, "PMRX iespi parity error" },
 4763                 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
 4764                 { 0 }
 4765         };
 4766         static const struct intr_info pmrx_intr_info = {
 4767                 .name = "PM_RX_INT_CAUSE",
 4768                 .cause_reg = A_PM_RX_INT_CAUSE,
 4769                 .enable_reg = A_PM_RX_INT_ENABLE,
 4770                 .fatal = 0x1fffffff,
 4771                 .flags = NONFATAL_IF_DISABLED,
 4772                 .details = pmrx_intr_details,
 4773                 .actions = NULL,
 4774         };
 4775 
 4776         return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
 4777 }
 4778 
 4779 /*
 4780  * CPL switch interrupt handler.
 4781  */
 4782 static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
 4783 {
 4784         static const struct intr_details cplsw_intr_details[] = {
 4785                 /* T5+ */
 4786                 { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
 4787                 { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
 4788 
 4789                 /* T4+ */
 4790                 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" },
 4791                 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" },
 4792                 { F_TP_FRAMING_ERROR, "CPLSW TP framing error" },
 4793                 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" },
 4794                 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" },
 4795                 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
 4796                 { 0 }
 4797         };
 4798         static const struct intr_info cplsw_intr_info = {
 4799                 .name = "CPL_INTR_CAUSE",
 4800                 .cause_reg = A_CPL_INTR_CAUSE,
 4801                 .enable_reg = A_CPL_INTR_ENABLE,
 4802                 .fatal = 0xff,
 4803                 .flags = NONFATAL_IF_DISABLED,
 4804                 .details = cplsw_intr_details,
 4805                 .actions = NULL,
 4806         };
 4807 
 4808         return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
 4809 }
 4810 
 4811 #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
 4812 #define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR)
 4813 #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \
 4814     F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \
 4815     F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \
 4816     F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR)
 4817 #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
 4818     F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
 4819     F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
 4820 
 4821 /*
 4822  * LE interrupt handler.
 4823  */
 4824 static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
 4825 {
 4826         static const struct intr_details le_intr_details[] = {
 4827                 { F_REQQPARERR, "LE request queue parity error" },
 4828                 { F_UNKNOWNCMD, "LE unknown command" },
 4829                 { F_ACTRGNFULL, "LE active region full" },
 4830                 { F_PARITYERR, "LE parity error" },
 4831                 { F_LIPMISS, "LE LIP miss" },
 4832                 { F_LIP0, "LE 0 LIP error" },
 4833                 { 0 }
 4834         };
 4835         static const struct intr_details t6_le_intr_details[] = {
 4836                 { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" },
 4837                 { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" },
 4838                 { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" },
 4839                 { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" },
 4840                 { F_TOTCNTERR, "LE total active < TCAM count" },
 4841                 { F_CMDPRSRINTERR, "LE internal error in parser" },
 4842                 { F_CMDTIDERR, "Incorrect tid in LE command" },
 4843                 { F_T6_ACTRGNFULL, "LE active region full" },
 4844                 { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" },
 4845                 { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" },
 4846                 { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" },
 4847                 { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" },
 4848                 { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" },
 4849                 { F_TCAMACCFAIL, "LE TCAM access failure" },
 4850                 { F_T6_UNKNOWNCMD, "LE unknown command" },
 4851                 { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" },
 4852                 { F_T6_LIPMISS, "LE CLIP lookup miss" },
 4853                 { T6_LE_PERRCRC_MASK, "LE parity/CRC error" },
 4854                 { 0 }
 4855         };
 4856         struct intr_info le_intr_info = {
 4857                 .name = "LE_DB_INT_CAUSE",
 4858                 .cause_reg = A_LE_DB_INT_CAUSE,
 4859                 .enable_reg = A_LE_DB_INT_ENABLE,
 4860                 .fatal = 0,
 4861                 .flags = NONFATAL_IF_DISABLED,
 4862                 .details = NULL,
 4863                 .actions = NULL,
 4864         };
 4865 
 4866         if (chip_id(adap) <= CHELSIO_T5) {
 4867                 le_intr_info.details = le_intr_details;
 4868                 le_intr_info.fatal = T5_LE_FATAL_MASK;
 4869         } else {
 4870                 le_intr_info.details = t6_le_intr_details;
 4871                 le_intr_info.fatal = T6_LE_FATAL_MASK;
 4872         }
 4873 
 4874         return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
 4875 }
 4876 
 4877 /*
 4878  * MPS interrupt handler.
 4879  */
 4880 static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
 4881 {
 4882         static const struct intr_details mps_rx_perr_intr_details[] = {
 4883                 { 0xffffffff, "MPS Rx parity error" },
 4884                 { 0 }
 4885         };
 4886         static const struct intr_info mps_rx_perr_intr_info = {
 4887                 .name = "MPS_RX_PERR_INT_CAUSE",
 4888                 .cause_reg = A_MPS_RX_PERR_INT_CAUSE,
 4889                 .enable_reg = A_MPS_RX_PERR_INT_ENABLE,
 4890                 .fatal = 0xffffffff,
 4891                 .flags = NONFATAL_IF_DISABLED,
 4892                 .details = mps_rx_perr_intr_details,
 4893                 .actions = NULL,
 4894         };
 4895         static const struct intr_details mps_tx_intr_details[] = {
 4896                 { F_PORTERR, "MPS Tx destination port is disabled" },
 4897                 { F_FRMERR, "MPS Tx framing error" },
 4898                 { F_SECNTERR, "MPS Tx SOP/EOP error" },
 4899                 { F_BUBBLE, "MPS Tx underflow" },
 4900                 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" },
 4901                 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" },
 4902                 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" },
 4903                 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" },
 4904                 { 0 }
 4905         };
 4906         static const struct intr_info mps_tx_intr_info = {
 4907                 .name = "MPS_TX_INT_CAUSE",
 4908                 .cause_reg = A_MPS_TX_INT_CAUSE,
 4909                 .enable_reg = A_MPS_TX_INT_ENABLE,
 4910                 .fatal = 0x1ffff,
 4911                 .flags = NONFATAL_IF_DISABLED,
 4912                 .details = mps_tx_intr_details,
 4913                 .actions = NULL,
 4914         };
 4915         static const struct intr_details mps_trc_intr_details[] = {
 4916                 { F_MISCPERR, "MPS TRC misc parity error" },
 4917                 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
 4918                 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" },
 4919                 { 0 }
 4920         };
 4921         static const struct intr_info mps_trc_intr_info = {
 4922                 .name = "MPS_TRC_INT_CAUSE",
 4923                 .cause_reg = A_MPS_TRC_INT_CAUSE,
 4924                 .enable_reg = A_MPS_TRC_INT_ENABLE,
 4925                 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
 4926                 .flags = 0,
 4927                 .details = mps_trc_intr_details,
 4928                 .actions = NULL,
 4929         };
 4930         static const struct intr_details mps_stat_sram_intr_details[] = {
 4931                 { 0xffffffff, "MPS statistics SRAM parity error" },
 4932                 { 0 }
 4933         };
 4934         static const struct intr_info mps_stat_sram_intr_info = {
 4935                 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM",
 4936                 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
 4937                 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
 4938                 .fatal = 0x1fffffff,
 4939                 .flags = NONFATAL_IF_DISABLED,
 4940                 .details = mps_stat_sram_intr_details,
 4941                 .actions = NULL,
 4942         };
 4943         static const struct intr_details mps_stat_tx_intr_details[] = {
 4944                 { 0xffffff, "MPS statistics Tx FIFO parity error" },
 4945                 { 0 }
 4946         };
 4947         static const struct intr_info mps_stat_tx_intr_info = {
 4948                 .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO",
 4949                 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
 4950                 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
 4951                 .fatal =  0xffffff,
 4952                 .flags = NONFATAL_IF_DISABLED,
 4953                 .details = mps_stat_tx_intr_details,
 4954                 .actions = NULL,
 4955         };
 4956         static const struct intr_details mps_stat_rx_intr_details[] = {
 4957                 { 0xffffff, "MPS statistics Rx FIFO parity error" },
 4958                 { 0 }
 4959         };
 4960         static const struct intr_info mps_stat_rx_intr_info = {
 4961                 .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO",
 4962                 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
 4963                 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO,
 4964                 .fatal =  0xffffff,
 4965                 .flags = 0,
 4966                 .details = mps_stat_rx_intr_details,
 4967                 .actions = NULL,
 4968         };
 4969         static const struct intr_details mps_cls_intr_details[] = {
 4970                 { F_HASHSRAM, "MPS hash SRAM parity error" },
 4971                 { F_MATCHTCAM, "MPS match TCAM parity error" },
 4972                 { F_MATCHSRAM, "MPS match SRAM parity error" },
 4973                 { 0 }
 4974         };
 4975         static const struct intr_info mps_cls_intr_info = {
 4976                 .name = "MPS_CLS_INT_CAUSE",
 4977                 .cause_reg = A_MPS_CLS_INT_CAUSE,
 4978                 .enable_reg = A_MPS_CLS_INT_ENABLE,
 4979                 .fatal =  F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM,
 4980                 .flags = 0,
 4981                 .details = mps_cls_intr_details,
 4982                 .actions = NULL,
 4983         };
 4984         static const struct intr_details mps_stat_sram1_intr_details[] = {
 4985                 { 0xff, "MPS statistics SRAM1 parity error" },
 4986                 { 0 }
 4987         };
 4988         static const struct intr_info mps_stat_sram1_intr_info = {
 4989                 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1",
 4990                 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1,
 4991                 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1,
 4992                 .fatal = 0xff,
 4993                 .flags = 0,
 4994                 .details = mps_stat_sram1_intr_details,
 4995                 .actions = NULL,
 4996         };
 4997 
 4998         bool fatal;
 4999 
 5000         fatal = false;
 5001         fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
 5002         fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
 5003         fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
 5004         fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
 5005         fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
 5006         fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
 5007         fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
 5008         if (chip_id(adap) > CHELSIO_T4) {
 5009                 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
 5010                     verbose);
 5011         }
 5012 
 5013         t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
 5014         t4_read_reg(adap, A_MPS_INT_CAUSE);     /* flush */
 5015 
 5016         return (fatal);
 5017 
 5018 }
 5019 
 5020 /*
 5021  * EDC/MC interrupt handler.
 5022  */
 5023 static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
 5024 {
 5025         static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
 5026         unsigned int count_reg, v;
 5027         static const struct intr_details mem_intr_details[] = {
 5028                 { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" },
 5029                 { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" },
 5030                 { F_PERR_INT_CAUSE, "FIFO parity error" },
 5031                 { 0 }
 5032         };
 5033         struct intr_info ii = {
 5034                 .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
 5035                 .details = mem_intr_details,
 5036                 .flags = 0,
 5037                 .actions = NULL,
 5038         };
 5039         bool fatal;
 5040 
 5041         switch (idx) {
 5042         case MEM_EDC0:
 5043                 ii.name = "EDC0_INT_CAUSE";
 5044                 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0);
 5045                 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0);
 5046                 count_reg = EDC_REG(A_EDC_ECC_STATUS, 0);
 5047                 break;
 5048         case MEM_EDC1:
 5049                 ii.name = "EDC1_INT_CAUSE";
 5050                 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1);
 5051                 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1);
 5052                 count_reg = EDC_REG(A_EDC_ECC_STATUS, 1);
 5053                 break;
 5054         case MEM_MC0:
 5055                 ii.name = "MC0_INT_CAUSE";
 5056                 if (is_t4(adap)) {
 5057                         ii.cause_reg = A_MC_INT_CAUSE;
 5058                         ii.enable_reg = A_MC_INT_ENABLE;
 5059                         count_reg = A_MC_ECC_STATUS;
 5060                 } else {
 5061                         ii.cause_reg = A_MC_P_INT_CAUSE;
 5062                         ii.enable_reg = A_MC_P_INT_ENABLE;
 5063                         count_reg = A_MC_P_ECC_STATUS;
 5064                 }
 5065                 break;
 5066         case MEM_MC1:
 5067                 ii.name = "MC1_INT_CAUSE";
 5068                 ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1);
 5069                 ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1);
 5070                 count_reg = MC_REG(A_MC_P_ECC_STATUS, 1);
 5071                 break;
 5072         }
 5073 
 5074         fatal = t4_handle_intr(adap, &ii, 0, verbose);
 5075 
 5076         v = t4_read_reg(adap, count_reg);
 5077         if (v != 0) {
 5078                 if (G_ECC_UECNT(v) != 0) {
 5079                         CH_ALERT(adap,
 5080                             "%s: %u uncorrectable ECC data error(s)\n",
 5081                             name[idx], G_ECC_UECNT(v));
 5082                 }
 5083                 if (G_ECC_CECNT(v) != 0) {
 5084                         if (idx <= MEM_EDC1)
 5085                                 t4_edc_err_read(adap, idx);
 5086                         CH_WARN_RATELIMIT(adap,
 5087                             "%s: %u correctable ECC data error(s)\n",
 5088                             name[idx], G_ECC_CECNT(v));
 5089                 }
 5090                 t4_write_reg(adap, count_reg, 0xffffffff);
 5091         }
 5092 
 5093         return (fatal);
 5094 }
 5095 
 5096 static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
 5097 {
 5098         u32 v;
 5099 
 5100         v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
 5101         CH_ALERT(adap,
 5102             "MA address wrap-around error by client %u to address %#x\n",
 5103             G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
 5104         t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
 5105 
 5106         return (false);
 5107 }
 5108 
 5109 
 5110 /*
 5111  * MA interrupt handler.
 5112  */
 5113 static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
 5114 {
 5115         static const struct intr_action ma_intr_actions[] = {
 5116                 { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
 5117                 { 0 },
 5118         };
 5119         static const struct intr_info ma_intr_info = {
 5120                 .name = "MA_INT_CAUSE",
 5121                 .cause_reg = A_MA_INT_CAUSE,
 5122                 .enable_reg = A_MA_INT_ENABLE,
 5123                 .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE,
 5124                 .flags = NONFATAL_IF_DISABLED,
 5125                 .details = NULL,
 5126                 .actions = ma_intr_actions,
 5127         };
 5128         static const struct intr_info ma_perr_status1 = {
 5129                 .name = "MA_PARITY_ERROR_STATUS1",
 5130                 .cause_reg = A_MA_PARITY_ERROR_STATUS1,
 5131                 .enable_reg = A_MA_PARITY_ERROR_ENABLE1,
 5132                 .fatal = 0xffffffff,
 5133                 .flags = 0,
 5134                 .details = NULL,
 5135                 .actions = NULL,
 5136         };
 5137         static const struct intr_info ma_perr_status2 = {
 5138                 .name = "MA_PARITY_ERROR_STATUS2",
 5139                 .cause_reg = A_MA_PARITY_ERROR_STATUS2,
 5140                 .enable_reg = A_MA_PARITY_ERROR_ENABLE2,
 5141                 .fatal = 0xffffffff,
 5142                 .flags = 0,
 5143                 .details = NULL,
 5144                 .actions = NULL,
 5145         };
 5146         bool fatal;
 5147 
 5148         fatal = false;
 5149         fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
 5150         fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
 5151         if (chip_id(adap) > CHELSIO_T4)
 5152                 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
 5153 
 5154         return (fatal);
 5155 }
 5156 
 5157 /*
 5158  * SMB interrupt handler.
 5159  */
 5160 static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
 5161 {
 5162         static const struct intr_details smb_intr_details[] = {
 5163                 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
 5164                 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
 5165                 { F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
 5166                 { 0 }
 5167         };
 5168         static const struct intr_info smb_intr_info = {
 5169                 .name = "SMB_INT_CAUSE",
 5170                 .cause_reg = A_SMB_INT_CAUSE,
 5171                 .enable_reg = A_SMB_INT_ENABLE,
 5172                 .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
 5173                 .flags = 0,
 5174                 .details = smb_intr_details,
 5175                 .actions = NULL,
 5176         };
 5177 
 5178         return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
 5179 }
 5180 
 5181 /*
 5182  * NC-SI interrupt handler.
 5183  */
 5184 static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
 5185 {
 5186         static const struct intr_details ncsi_intr_details[] = {
 5187                 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
 5188                 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
 5189                 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
 5190                 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
 5191                 { 0 }
 5192         };
 5193         static const struct intr_info ncsi_intr_info = {
 5194                 .name = "NCSI_INT_CAUSE",
 5195                 .cause_reg = A_NCSI_INT_CAUSE,
 5196                 .enable_reg = A_NCSI_INT_ENABLE,
 5197                 .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
 5198                     F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
 5199                 .flags = 0,
 5200                 .details = ncsi_intr_details,
 5201                 .actions = NULL,
 5202         };
 5203 
 5204         return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
 5205 }
 5206 
 5207 /*
 5208  * MAC interrupt handler.
 5209  */
 5210 static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
 5211 {
 5212         static const struct intr_details mac_intr_details[] = {
 5213                 { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
 5214                 { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
 5215                 { 0 }
 5216         };
 5217         char name[32];
 5218         struct intr_info ii;
 5219         bool fatal = false;
 5220 
 5221         if (is_t4(adap)) {
 5222                 snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
 5223                 ii.name = &name[0];
 5224                 ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
 5225                 ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN);
 5226                 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
 5227                 ii.flags = 0;
 5228                 ii.details = mac_intr_details;
 5229                 ii.actions = NULL;
 5230         } else {
 5231                 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
 5232                 ii.name = &name[0];
 5233                 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
 5234                 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN);
 5235                 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
 5236                 ii.flags = 0;
 5237                 ii.details = mac_intr_details;
 5238                 ii.actions = NULL;
 5239         }
 5240         fatal |= t4_handle_intr(adap, &ii, 0, verbose);
 5241 
 5242         if (chip_id(adap) >= CHELSIO_T5) {
 5243                 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
 5244                 ii.name = &name[0];
 5245                 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
 5246                 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
 5247                 ii.fatal = 0;
 5248                 ii.flags = 0;
 5249                 ii.details = NULL;
 5250                 ii.actions = NULL;
 5251                 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
 5252         }
 5253 
 5254         if (chip_id(adap) >= CHELSIO_T6) {
 5255                 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
 5256                 ii.name = &name[0];
 5257                 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
 5258                 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
 5259                 ii.fatal = 0;
 5260                 ii.flags = 0;
 5261                 ii.details = NULL;
 5262                 ii.actions = NULL;
 5263                 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
 5264         }
 5265 
 5266         return (fatal);
 5267 }
 5268 
 5269 static bool pl_timeout_status(struct adapter *adap, int arg, bool verbose)
 5270 {
 5271 
 5272         CH_ALERT(adap, "    PL_TIMEOUT_STATUS 0x%08x 0x%08x\n",
 5273             t4_read_reg(adap, A_PL_TIMEOUT_STATUS0),
 5274             t4_read_reg(adap, A_PL_TIMEOUT_STATUS1));
 5275 
 5276         return (false);
 5277 }
 5278 
 5279 static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
 5280 {
 5281         static const struct intr_action plpl_intr_actions[] = {
 5282                 { F_TIMEOUT, 0, pl_timeout_status },
 5283                 { 0 },
 5284         };
 5285         static const struct intr_details plpl_intr_details[] = {
 5286                 { F_PL_BUSPERR, "Bus parity error" },
 5287                 { F_FATALPERR, "Fatal parity error" },
 5288                 { F_INVALIDACCESS, "Global reserved memory access" },
 5289                 { F_TIMEOUT,  "Bus timeout" },
 5290                 { F_PLERR, "Module reserved access" },
 5291                 { F_PERRVFID, "VFID_MAP parity error" },
 5292                 { 0 }
 5293         };
 5294         static const struct intr_info plpl_intr_info = {
 5295                 .name = "PL_PL_INT_CAUSE",
 5296                 .cause_reg = A_PL_PL_INT_CAUSE,
 5297                 .enable_reg = A_PL_PL_INT_ENABLE,
 5298                 .fatal = F_FATALPERR | F_PERRVFID,
 5299                 .flags = NONFATAL_IF_DISABLED,
 5300                 .details = plpl_intr_details,
 5301                 .actions = plpl_intr_actions,
 5302         };
 5303 
 5304         return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
 5305 }
 5306 
 5307 /**
 5308  *      t4_slow_intr_handler - control path interrupt handler
 5309  *      @adap: the adapter
 5310  *      @verbose: increased verbosity, for debug
 5311  *
 5312  *      T4 interrupt handler for non-data global interrupt events, e.g., errors.
 5313  *      The designation 'slow' is because it involves register reads, while
 5314  *      data interrupts typically don't involve any MMIOs.
 5315  */
 5316 bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
 5317 {
 5318         static const struct intr_details pl_intr_details[] = {
 5319                 { F_MC1, "MC1" },
 5320                 { F_UART, "UART" },
 5321                 { F_ULP_TX, "ULP TX" },
 5322                 { F_SGE, "SGE" },
 5323                 { F_HMA, "HMA" },
 5324                 { F_CPL_SWITCH, "CPL Switch" },
 5325                 { F_ULP_RX, "ULP RX" },
 5326                 { F_PM_RX, "PM RX" },
 5327                 { F_PM_TX, "PM TX" },
 5328                 { F_MA, "MA" },
 5329                 { F_TP, "TP" },
 5330                 { F_LE, "LE" },
 5331                 { F_EDC1, "EDC1" },
 5332                 { F_EDC0, "EDC0" },
 5333                 { F_MC, "MC0" },
 5334                 { F_PCIE, "PCIE" },
 5335                 { F_PMU, "PMU" },
 5336                 { F_MAC3, "MAC3" },
 5337                 { F_MAC2, "MAC2" },
 5338                 { F_MAC1, "MAC1" },
 5339                 { F_MAC0, "MAC0" },
 5340                 { F_SMB, "SMB" },
 5341                 { F_SF, "SF" },
 5342                 { F_PL, "PL" },
 5343                 { F_NCSI, "NC-SI" },
 5344                 { F_MPS, "MPS" },
 5345                 { F_MI, "MI" },
 5346                 { F_DBG, "DBG" },
 5347                 { F_I2CM, "I2CM" },
 5348                 { F_CIM, "CIM" },
 5349                 { 0 }
 5350         };
 5351         static const struct intr_info pl_perr_cause = {
 5352                 .name = "PL_PERR_CAUSE",
 5353                 .cause_reg = A_PL_PERR_CAUSE,
 5354                 .enable_reg = A_PL_PERR_ENABLE,
 5355                 .fatal = 0xffffffff,
 5356                 .flags = 0,
 5357                 .details = pl_intr_details,
 5358                 .actions = NULL,
 5359         };
 5360         static const struct intr_action pl_intr_action[] = {
 5361                 { F_MC1, MEM_MC1, mem_intr_handler },
 5362                 { F_ULP_TX, -1, ulptx_intr_handler },
 5363                 { F_SGE, -1, sge_intr_handler },
 5364                 { F_CPL_SWITCH, -1, cplsw_intr_handler },
 5365                 { F_ULP_RX, -1, ulprx_intr_handler },
 5366                 { F_PM_RX, -1, pmrx_intr_handler},
 5367                 { F_PM_TX, -1, pmtx_intr_handler},
 5368                 { F_MA, -1, ma_intr_handler },
 5369                 { F_TP, -1, tp_intr_handler },
 5370                 { F_LE, -1, le_intr_handler },
 5371                 { F_EDC1, MEM_EDC1, mem_intr_handler },
 5372                 { F_EDC0, MEM_EDC0, mem_intr_handler },
 5373                 { F_MC0, MEM_MC0, mem_intr_handler },
 5374                 { F_PCIE, -1, pcie_intr_handler },
 5375                 { F_MAC3, 3, mac_intr_handler},
 5376                 { F_MAC2, 2, mac_intr_handler},
 5377                 { F_MAC1, 1, mac_intr_handler},
 5378                 { F_MAC0, 0, mac_intr_handler},
 5379                 { F_SMB, -1, smb_intr_handler},
 5380                 { F_PL, -1, plpl_intr_handler },
 5381                 { F_NCSI, -1, ncsi_intr_handler},
 5382                 { F_MPS, -1, mps_intr_handler },
 5383                 { F_CIM, -1, cim_intr_handler },
 5384                 { 0 }
 5385         };
 5386         static const struct intr_info pl_intr_info = {
 5387                 .name = "PL_INT_CAUSE",
 5388                 .cause_reg = A_PL_INT_CAUSE,
 5389                 .enable_reg = A_PL_INT_ENABLE,
 5390                 .fatal = 0,
 5391                 .flags = 0,
 5392                 .details = pl_intr_details,
 5393                 .actions = pl_intr_action,
 5394         };
 5395         u32 perr;
 5396 
 5397         perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
 5398         if (verbose || perr != 0) {
 5399                 t4_show_intr_info(adap, &pl_perr_cause, perr);
 5400                 if (perr != 0)
 5401                         t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
 5402                 if (verbose)
 5403                         perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
 5404         }
 5405 
 5406         return (t4_handle_intr(adap, &pl_intr_info, perr, verbose));
 5407 }
 5408 
 5409 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
 5410 
 5411 /**
 5412  *      t4_intr_enable - enable interrupts
 5413  *      @adapter: the adapter whose interrupts should be enabled
 5414  *
 5415  *      Enable PF-specific interrupts for the calling function and the top-level
 5416  *      interrupt concentrator for global interrupts.  Interrupts are already
 5417  *      enabled at each module, here we just enable the roots of the interrupt
 5418  *      hierarchies.
 5419  *
 5420  *      Note: this function should be called only when the driver manages
 5421  *      non PF-specific interrupts from the various HW modules.  Only one PCI
 5422  *      function at a time should be doing this.
 5423  */
 5424 void t4_intr_enable(struct adapter *adap)
 5425 {
 5426         u32 val = 0;
 5427 
 5428         if (chip_id(adap) <= CHELSIO_T5)
 5429                 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
 5430         else
 5431                 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
 5432         val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
 5433             F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
 5434             F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
 5435             F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
 5436             F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
 5437             F_EGRESS_SIZE_ERR;
 5438         t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
 5439         t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
 5440         t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
 5441         t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
 5442 }
 5443 
 5444 /**
 5445  *      t4_intr_disable - disable interrupts
 5446  *      @adap: the adapter whose interrupts should be disabled
 5447  *
 5448  *      Disable interrupts.  We only disable the top-level interrupt
 5449  *      concentrators.  The caller must be a PCI function managing global
 5450  *      interrupts.
 5451  */
 5452 void t4_intr_disable(struct adapter *adap)
 5453 {
 5454 
 5455         t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
 5456         t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0);
 5457 }
 5458 
 5459 /**
 5460  *      t4_intr_clear - clear all interrupts
 5461  *      @adap: the adapter whose interrupts should be cleared
 5462  *
 5463  *      Clears all interrupts.  The caller must be a PCI function managing
 5464  *      global interrupts.
 5465  */
 5466 void t4_intr_clear(struct adapter *adap)
 5467 {
 5468         static const u32 cause_reg[] = {
 5469                 A_CIM_HOST_INT_CAUSE,
 5470                 A_CIM_HOST_UPACC_INT_CAUSE,
 5471                 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
 5472                 A_CPL_INTR_CAUSE,
 5473                 EDC_REG(A_EDC_INT_CAUSE, 0), EDC_REG(A_EDC_INT_CAUSE, 1),
 5474                 A_LE_DB_INT_CAUSE,
 5475                 A_MA_INT_WRAP_STATUS,
 5476                 A_MA_PARITY_ERROR_STATUS1,
 5477                 A_MA_INT_CAUSE,
 5478                 A_MPS_CLS_INT_CAUSE,
 5479                 A_MPS_RX_PERR_INT_CAUSE,
 5480                 A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
 5481                 A_MPS_STAT_PERR_INT_CAUSE_SRAM,
 5482                 A_MPS_TRC_INT_CAUSE,
 5483                 A_MPS_TX_INT_CAUSE,
 5484                 A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
 5485                 A_NCSI_INT_CAUSE,
 5486                 A_PCIE_INT_CAUSE,
 5487                 A_PCIE_NONFAT_ERR,
 5488                 A_PL_PL_INT_CAUSE,
 5489                 A_PM_RX_INT_CAUSE,
 5490                 A_PM_TX_INT_CAUSE,
 5491                 A_SGE_INT_CAUSE1,
 5492                 A_SGE_INT_CAUSE2,
 5493                 A_SGE_INT_CAUSE3,
 5494                 A_SGE_INT_CAUSE4,
 5495                 A_SMB_INT_CAUSE,
 5496                 A_TP_INT_CAUSE,
 5497                 A_ULP_RX_INT_CAUSE,
 5498                 A_ULP_RX_INT_CAUSE_2,
 5499                 A_ULP_TX_INT_CAUSE,
 5500                 A_ULP_TX_INT_CAUSE_2,
 5501 
 5502                 MYPF_REG(A_PL_PF_INT_CAUSE),
 5503         };
 5504         int i;
 5505         const int nchan = adap->chip_params->nchan;
 5506 
 5507         for (i = 0; i < ARRAY_SIZE(cause_reg); i++)
 5508                 t4_write_reg(adap, cause_reg[i], 0xffffffff);
 5509 
 5510         if (is_t4(adap)) {
 5511                 t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
 5512                     0xffffffff);
 5513                 t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
 5514                     0xffffffff);
 5515                 t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff);
 5516                 for (i = 0; i < nchan; i++) {
 5517                         t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE),
 5518                             0xffffffff);
 5519                 }
 5520         }
 5521         if (chip_id(adap) >= CHELSIO_T5) {
 5522                 t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
 5523                 t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff);
 5524                 t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff);
 5525                 t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff);
 5526                 if (is_t5(adap)) {
 5527                         t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1),
 5528                             0xffffffff);
 5529                 }
 5530                 for (i = 0; i < nchan; i++) {
 5531                         t4_write_reg(adap, T5_PORT_REG(i,
 5532                             A_MAC_PORT_PERR_INT_CAUSE), 0xffffffff);
 5533                         if (chip_id(adap) > CHELSIO_T5) {
 5534                                 t4_write_reg(adap, T5_PORT_REG(i,
 5535                                     A_MAC_PORT_PERR_INT_CAUSE_100G),
 5536                                     0xffffffff);
 5537                         }
 5538                         t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE),
 5539                             0xffffffff);
 5540                 }
 5541         }
 5542         if (chip_id(adap) >= CHELSIO_T6) {
 5543                 t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff);
 5544         }
 5545 
 5546         t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
 5547         t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff);
 5548         t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff);
 5549         (void) t4_read_reg(adap, A_PL_INT_CAUSE);          /* flush */
 5550 }
 5551 
 5552 /**
 5553  *      hash_mac_addr - return the hash value of a MAC address
 5554  *      @addr: the 48-bit Ethernet MAC address
 5555  *
 5556  *      Hashes a MAC address according to the hash function used by HW inexact
 5557  *      (hash) address matching.
 5558  */
 5559 static int hash_mac_addr(const u8 *addr)
 5560 {
 5561         u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
 5562         u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
 5563         a ^= b;
 5564         a ^= (a >> 12);
 5565         a ^= (a >> 6);
 5566         return a & 0x3f;
 5567 }
 5568 
 5569 /**
 5570  *      t4_config_rss_range - configure a portion of the RSS mapping table
 5571  *      @adapter: the adapter
 5572  *      @mbox: mbox to use for the FW command
 5573  *      @viid: virtual interface whose RSS subtable is to be written
 5574  *      @start: start entry in the table to write
 5575  *      @n: how many table entries to write
 5576  *      @rspq: values for the "response queue" (Ingress Queue) lookup table
 5577  *      @nrspq: number of values in @rspq
 5578  *
 5579  *      Programs the selected part of the VI's RSS mapping table with the
 5580  *      provided values.  If @nrspq < @n the supplied values are used repeatedly
 5581  *      until the full table range is populated.
 5582  *
 5583  *      The caller must ensure the values in @rspq are in the range allowed for
 5584  *      @viid.
 5585  */
 5586 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
 5587                         int start, int n, const u16 *rspq, unsigned int nrspq)
 5588 {
 5589         int ret;
 5590         const u16 *rsp = rspq;
 5591         const u16 *rsp_end = rspq + nrspq;
 5592         struct fw_rss_ind_tbl_cmd cmd;
 5593 
 5594         memset(&cmd, 0, sizeof(cmd));
 5595         cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
 5596                                      F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 5597                                      V_FW_RSS_IND_TBL_CMD_VIID(viid));
 5598         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
 5599 
 5600         /*
 5601          * Each firmware RSS command can accommodate up to 32 RSS Ingress
 5602          * Queue Identifiers.  These Ingress Queue IDs are packed three to
 5603          * a 32-bit word as 10-bit values with the upper remaining 2 bits
 5604          * reserved.
 5605          */
 5606         while (n > 0) {
 5607                 int nq = min(n, 32);
 5608                 int nq_packed = 0;
 5609                 __be32 *qp = &cmd.iq0_to_iq2;
 5610 
 5611                 /*
 5612                  * Set up the firmware RSS command header to send the next
 5613                  * "nq" Ingress Queue IDs to the firmware.
 5614                  */
 5615                 cmd.niqid = cpu_to_be16(nq);
 5616                 cmd.startidx = cpu_to_be16(start);
 5617 
 5618                 /*
 5619                  * "nq" more done for the start of the next loop.
 5620                  */
 5621                 start += nq;
 5622                 n -= nq;
 5623 
 5624                 /*
 5625                  * While there are still Ingress Queue IDs to stuff into the
 5626                  * current firmware RSS command, retrieve them from the
 5627                  * Ingress Queue ID array and insert them into the command.
 5628                  */
 5629                 while (nq > 0) {
 5630                         /*
 5631                          * Grab up to the next 3 Ingress Queue IDs (wrapping
 5632                          * around the Ingress Queue ID array if necessary) and
 5633                          * insert them into the firmware RSS command at the
 5634                          * current 3-tuple position within the commad.
 5635                          */
 5636                         u16 qbuf[3];
 5637                         u16 *qbp = qbuf;
 5638                         int nqbuf = min(3, nq);
 5639 
 5640                         nq -= nqbuf;
 5641                         qbuf[0] = qbuf[1] = qbuf[2] = 0;
 5642                         while (nqbuf && nq_packed < 32) {
 5643                                 nqbuf--;
 5644                                 nq_packed++;
 5645                                 *qbp++ = *rsp++;
 5646                                 if (rsp >= rsp_end)
 5647                                         rsp = rspq;
 5648                         }
 5649                         *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
 5650                                             V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
 5651                                             V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
 5652                 }
 5653 
 5654                 /*
 5655                  * Send this portion of the RRS table update to the firmware;
 5656                  * bail out on any errors.
 5657                  */
 5658                 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
 5659                 if (ret)
 5660                         return ret;
 5661         }
 5662         return 0;
 5663 }
 5664 
 5665 /**
 5666  *      t4_config_glbl_rss - configure the global RSS mode
 5667  *      @adapter: the adapter
 5668  *      @mbox: mbox to use for the FW command
 5669  *      @mode: global RSS mode
 5670  *      @flags: mode-specific flags
 5671  *
 5672  *      Sets the global RSS mode.
 5673  */
 5674 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
 5675                        unsigned int flags)
 5676 {
 5677         struct fw_rss_glb_config_cmd c;
 5678 
 5679         memset(&c, 0, sizeof(c));
 5680         c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
 5681                                     F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
 5682         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 5683         if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
 5684                 c.u.manual.mode_pkd =
 5685                         cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
 5686         } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
 5687                 c.u.basicvirtual.mode_keymode =
 5688                         cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
 5689                 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
 5690         } else
 5691                 return -EINVAL;
 5692         return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
 5693 }
 5694 
 5695 /**
 5696  *      t4_config_vi_rss - configure per VI RSS settings
 5697  *      @adapter: the adapter
 5698  *      @mbox: mbox to use for the FW command
 5699  *      @viid: the VI id
 5700  *      @flags: RSS flags
 5701  *      @defq: id of the default RSS queue for the VI.
 5702  *      @skeyidx: RSS secret key table index for non-global mode
 5703  *      @skey: RSS vf_scramble key for VI.
 5704  *
 5705  *      Configures VI-specific RSS properties.
 5706  */
 5707 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
 5708                      unsigned int flags, unsigned int defq, unsigned int skeyidx,
 5709                      unsigned int skey)
 5710 {
 5711         struct fw_rss_vi_config_cmd c;
 5712 
 5713         memset(&c, 0, sizeof(c));
 5714         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
 5715                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 5716                                    V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
 5717         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 5718         c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
 5719                                         V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
 5720         c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
 5721                                         V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
 5722         c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
 5723 
 5724         return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
 5725 }
 5726 
 5727 /* Read an RSS table row */
 5728 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
 5729 {
 5730         t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
 5731         return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
 5732                                    5, 0, val);
 5733 }
 5734 
 5735 /**
 5736  *      t4_read_rss - read the contents of the RSS mapping table
 5737  *      @adapter: the adapter
 5738  *      @map: holds the contents of the RSS mapping table
 5739  *
 5740  *      Reads the contents of the RSS hash->queue mapping table.
 5741  */
 5742 int t4_read_rss(struct adapter *adapter, u16 *map)
 5743 {
 5744         u32 val;
 5745         int i, ret;
 5746         int rss_nentries = adapter->chip_params->rss_nentries;
 5747 
 5748         for (i = 0; i < rss_nentries / 2; ++i) {
 5749                 ret = rd_rss_row(adapter, i, &val);
 5750                 if (ret)
 5751                         return ret;
 5752                 *map++ = G_LKPTBLQUEUE0(val);
 5753                 *map++ = G_LKPTBLQUEUE1(val);
 5754         }
 5755         return 0;
 5756 }
 5757 
 5758 /**
 5759  * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
 5760  * @adap: the adapter
 5761  * @cmd: TP fw ldst address space type
 5762  * @vals: where the indirect register values are stored/written
 5763  * @nregs: how many indirect registers to read/write
 5764  * @start_idx: index of first indirect register to read/write
 5765  * @rw: Read (1) or Write (0)
 5766  * @sleep_ok: if true we may sleep while awaiting command completion
 5767  *
 5768  * Access TP indirect registers through LDST
 5769  **/
 5770 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
 5771                             unsigned int nregs, unsigned int start_index,
 5772                             unsigned int rw, bool sleep_ok)
 5773 {
 5774         int ret = 0;
 5775         unsigned int i;
 5776         struct fw_ldst_cmd c;
 5777 
 5778         for (i = 0; i < nregs; i++) {
 5779                 memset(&c, 0, sizeof(c));
 5780                 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
 5781                                                 F_FW_CMD_REQUEST |
 5782                                                 (rw ? F_FW_CMD_READ :
 5783                                                       F_FW_CMD_WRITE) |
 5784                                                 V_FW_LDST_CMD_ADDRSPACE(cmd));
 5785                 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
 5786 
 5787                 c.u.addrval.addr = cpu_to_be32(start_index + i);
 5788                 c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
 5789                 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
 5790                                       sleep_ok);
 5791                 if (ret)
 5792                         return ret;
 5793 
 5794                 if (rw)
 5795                         vals[i] = be32_to_cpu(c.u.addrval.val);
 5796         }
 5797         return 0;
 5798 }
 5799 
 5800 /**
 5801  * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
 5802  * @adap: the adapter
 5803  * @reg_addr: Address Register
 5804  * @reg_data: Data register
 5805  * @buff: where the indirect register values are stored/written
 5806  * @nregs: how many indirect registers to read/write
 5807  * @start_index: index of first indirect register to read/write
 5808  * @rw: READ(1) or WRITE(0)
 5809  * @sleep_ok: if true we may sleep while awaiting command completion
 5810  *
 5811  * Read/Write TP indirect registers through LDST if possible.
 5812  * Else, use backdoor access
 5813  **/
 5814 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
 5815                               u32 *buff, u32 nregs, u32 start_index, int rw,
 5816                               bool sleep_ok)
 5817 {
 5818         int rc = -EINVAL;
 5819         int cmd;
 5820 
 5821         switch (reg_addr) {
 5822         case A_TP_PIO_ADDR:
 5823                 cmd = FW_LDST_ADDRSPC_TP_PIO;
 5824                 break;
 5825         case A_TP_TM_PIO_ADDR:
 5826                 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
 5827                 break;
 5828         case A_TP_MIB_INDEX:
 5829                 cmd = FW_LDST_ADDRSPC_TP_MIB;
 5830                 break;
 5831         default:
 5832                 goto indirect_access;
 5833         }
 5834 
 5835         if (t4_use_ldst(adap))
 5836                 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
 5837                                       sleep_ok);
 5838 
 5839 indirect_access:
 5840 
 5841         if (rc) {
 5842                 if (rw)
 5843                         t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
 5844                                          start_index);
 5845                 else
 5846                         t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
 5847                                           start_index);
 5848         }
 5849 }
 5850 
 5851 /**
 5852  * t4_tp_pio_read - Read TP PIO registers
 5853  * @adap: the adapter
 5854  * @buff: where the indirect register values are written
 5855  * @nregs: how many indirect registers to read
 5856  * @start_index: index of first indirect register to read
 5857  * @sleep_ok: if true we may sleep while awaiting command completion
 5858  *
 5859  * Read TP PIO Registers
 5860  **/
 5861 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
 5862                     u32 start_index, bool sleep_ok)
 5863 {
 5864         t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
 5865                           start_index, 1, sleep_ok);
 5866 }
 5867 
 5868 /**
 5869  * t4_tp_pio_write - Write TP PIO registers
 5870  * @adap: the adapter
 5871  * @buff: where the indirect register values are stored
 5872  * @nregs: how many indirect registers to write
 5873  * @start_index: index of first indirect register to write
 5874  * @sleep_ok: if true we may sleep while awaiting command completion
 5875  *
 5876  * Write TP PIO Registers
 5877  **/
 5878 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
 5879                      u32 start_index, bool sleep_ok)
 5880 {
 5881         t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
 5882             __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
 5883 }
 5884 
 5885 /**
 5886  * t4_tp_tm_pio_read - Read TP TM PIO registers
 5887  * @adap: the adapter
 5888  * @buff: where the indirect register values are written
 5889  * @nregs: how many indirect registers to read
 5890  * @start_index: index of first indirect register to read
 5891  * @sleep_ok: if true we may sleep while awaiting command completion
 5892  *
 5893  * Read TP TM PIO Registers
 5894  **/
 5895 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
 5896                        u32 start_index, bool sleep_ok)
 5897 {
 5898         t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
 5899                           nregs, start_index, 1, sleep_ok);
 5900 }
 5901 
 5902 /**
 5903  * t4_tp_mib_read - Read TP MIB registers
 5904  * @adap: the adapter
 5905  * @buff: where the indirect register values are written
 5906  * @nregs: how many indirect registers to read
 5907  * @start_index: index of first indirect register to read
 5908  * @sleep_ok: if true we may sleep while awaiting command completion
 5909  *
 5910  * Read TP MIB Registers
 5911  **/
 5912 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
 5913                     bool sleep_ok)
 5914 {
 5915         t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
 5916                           start_index, 1, sleep_ok);
 5917 }
 5918 
 5919 /**
 5920  *      t4_read_rss_key - read the global RSS key
 5921  *      @adap: the adapter
 5922  *      @key: 10-entry array holding the 320-bit RSS key
 5923  *      @sleep_ok: if true we may sleep while awaiting command completion
 5924  *
 5925  *      Reads the global 320-bit RSS key.
 5926  */
 5927 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
 5928 {
 5929         t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
 5930 }
 5931 
 5932 /**
 5933  *      t4_write_rss_key - program one of the RSS keys
 5934  *      @adap: the adapter
 5935  *      @key: 10-entry array holding the 320-bit RSS key
 5936  *      @idx: which RSS key to write
 5937  *      @sleep_ok: if true we may sleep while awaiting command completion
 5938  *
 5939  *      Writes one of the RSS keys with the given 320-bit value.  If @idx is
 5940  *      0..15 the corresponding entry in the RSS key table is written,
 5941  *      otherwise the global RSS key is written.
 5942  */
 5943 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
 5944                       bool sleep_ok)
 5945 {
 5946         u8 rss_key_addr_cnt = 16;
 5947         u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
 5948 
 5949         /*
 5950          * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
 5951          * allows access to key addresses 16-63 by using KeyWrAddrX
 5952          * as index[5:4](upper 2) into key table
 5953          */
 5954         if ((chip_id(adap) > CHELSIO_T5) &&
 5955             (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
 5956                 rss_key_addr_cnt = 32;
 5957 
 5958         t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
 5959 
 5960         if (idx >= 0 && idx < rss_key_addr_cnt) {
 5961                 if (rss_key_addr_cnt > 16)
 5962                         t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
 5963                                      vrt | V_KEYWRADDRX(idx >> 4) |
 5964                                      V_T6_VFWRADDR(idx) | F_KEYWREN);
 5965                 else
 5966                         t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
 5967                                      vrt| V_KEYWRADDR(idx) | F_KEYWREN);
 5968         }
 5969 }
 5970 
 5971 /**
 5972  *      t4_read_rss_pf_config - read PF RSS Configuration Table
 5973  *      @adapter: the adapter
 5974  *      @index: the entry in the PF RSS table to read
 5975  *      @valp: where to store the returned value
 5976  *      @sleep_ok: if true we may sleep while awaiting command completion
 5977  *
 5978  *      Reads the PF RSS Configuration Table at the specified index and returns
 5979  *      the value found there.
 5980  */
 5981 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
 5982                            u32 *valp, bool sleep_ok)
 5983 {
 5984         t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
 5985 }
 5986 
 5987 /**
 5988  *      t4_write_rss_pf_config - write PF RSS Configuration Table
 5989  *      @adapter: the adapter
 5990  *      @index: the entry in the VF RSS table to read
 5991  *      @val: the value to store
 5992  *      @sleep_ok: if true we may sleep while awaiting command completion
 5993  *
 5994  *      Writes the PF RSS Configuration Table at the specified index with the
 5995  *      specified value.
 5996  */
 5997 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
 5998                             u32 val, bool sleep_ok)
 5999 {
 6000         t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
 6001                         sleep_ok);
 6002 }
 6003 
 6004 /**
 6005  *      t4_read_rss_vf_config - read VF RSS Configuration Table
 6006  *      @adapter: the adapter
 6007  *      @index: the entry in the VF RSS table to read
 6008  *      @vfl: where to store the returned VFL
 6009  *      @vfh: where to store the returned VFH
 6010  *      @sleep_ok: if true we may sleep while awaiting command completion
 6011  *
 6012  *      Reads the VF RSS Configuration Table at the specified index and returns
 6013  *      the (VFL, VFH) values found there.
 6014  */
 6015 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
 6016                            u32 *vfl, u32 *vfh, bool sleep_ok)
 6017 {
 6018         u32 vrt, mask, data;
 6019 
 6020         if (chip_id(adapter) <= CHELSIO_T5) {
 6021                 mask = V_VFWRADDR(M_VFWRADDR);
 6022                 data = V_VFWRADDR(index);
 6023         } else {
 6024                  mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
 6025                  data = V_T6_VFWRADDR(index);
 6026         }
 6027         /*
 6028          * Request that the index'th VF Table values be read into VFL/VFH.
 6029          */
 6030         vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
 6031         vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
 6032         vrt |= data | F_VFRDEN;
 6033         t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
 6034 
 6035         /*
 6036          * Grab the VFL/VFH values ...
 6037          */
 6038         t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
 6039         t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
 6040 }
 6041 
 6042 /**
 6043  *      t4_write_rss_vf_config - write VF RSS Configuration Table
 6044  *
 6045  *      @adapter: the adapter
 6046  *      @index: the entry in the VF RSS table to write
 6047  *      @vfl: the VFL to store
 6048  *      @vfh: the VFH to store
 6049  *
 6050  *      Writes the VF RSS Configuration Table at the specified index with the
 6051  *      specified (VFL, VFH) values.
 6052  */
 6053 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
 6054                             u32 vfl, u32 vfh, bool sleep_ok)
 6055 {
 6056         u32 vrt, mask, data;
 6057 
 6058         if (chip_id(adapter) <= CHELSIO_T5) {
 6059                 mask = V_VFWRADDR(M_VFWRADDR);
 6060                 data = V_VFWRADDR(index);
 6061         } else {
 6062                 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
 6063                 data = V_T6_VFWRADDR(index);
 6064         }
 6065 
 6066         /*
 6067          * Load up VFL/VFH with the values to be written ...
 6068          */
 6069         t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
 6070         t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
 6071 
 6072         /*
 6073          * Write the VFL/VFH into the VF Table at index'th location.
 6074          */
 6075         vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
 6076         vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
 6077         vrt |= data | F_VFRDEN;
 6078         t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
 6079 }
 6080 
 6081 /**
 6082  *      t4_read_rss_pf_map - read PF RSS Map
 6083  *      @adapter: the adapter
 6084  *      @sleep_ok: if true we may sleep while awaiting command completion
 6085  *
 6086  *      Reads the PF RSS Map register and returns its value.
 6087  */
 6088 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
 6089 {
 6090         u32 pfmap;
 6091 
 6092         t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
 6093 
 6094         return pfmap;
 6095 }
 6096 
 6097 /**
 6098  *      t4_write_rss_pf_map - write PF RSS Map
 6099  *      @adapter: the adapter
 6100  *      @pfmap: PF RSS Map value
 6101  *
 6102  *      Writes the specified value to the PF RSS Map register.
 6103  */
 6104 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
 6105 {
 6106         t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
 6107 }
 6108 
 6109 /**
 6110  *      t4_read_rss_pf_mask - read PF RSS Mask
 6111  *      @adapter: the adapter
 6112  *      @sleep_ok: if true we may sleep while awaiting command completion
 6113  *
 6114  *      Reads the PF RSS Mask register and returns its value.
 6115  */
 6116 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
 6117 {
 6118         u32 pfmask;
 6119 
 6120         t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
 6121 
 6122         return pfmask;
 6123 }
 6124 
 6125 /**
 6126  *      t4_write_rss_pf_mask - write PF RSS Mask
 6127  *      @adapter: the adapter
 6128  *      @pfmask: PF RSS Mask value
 6129  *
 6130  *      Writes the specified value to the PF RSS Mask register.
 6131  */
 6132 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
 6133 {
 6134         t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
 6135 }
 6136 
 6137 /**
 6138  *      t4_tp_get_tcp_stats - read TP's TCP MIB counters
 6139  *      @adap: the adapter
 6140  *      @v4: holds the TCP/IP counter values
 6141  *      @v6: holds the TCP/IPv6 counter values
 6142  *      @sleep_ok: if true we may sleep while awaiting command completion
 6143  *
 6144  *      Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
 6145  *      Either @v4 or @v6 may be %NULL to skip the corresponding stats.
 6146  */
 6147 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
 6148                          struct tp_tcp_stats *v6, bool sleep_ok)
 6149 {
 6150         u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
 6151 
 6152 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
 6153 #define STAT(x)     val[STAT_IDX(x)]
 6154 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
 6155 
 6156         if (v4) {
 6157                 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
 6158                                A_TP_MIB_TCP_OUT_RST, sleep_ok);
 6159                 v4->tcp_out_rsts = STAT(OUT_RST);
 6160                 v4->tcp_in_segs  = STAT64(IN_SEG);
 6161                 v4->tcp_out_segs = STAT64(OUT_SEG);
 6162                 v4->tcp_retrans_segs = STAT64(RXT_SEG);
 6163         }
 6164         if (v6) {
 6165                 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
 6166                                A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
 6167                 v6->tcp_out_rsts = STAT(OUT_RST);
 6168                 v6->tcp_in_segs  = STAT64(IN_SEG);
 6169                 v6->tcp_out_segs = STAT64(OUT_SEG);
 6170                 v6->tcp_retrans_segs = STAT64(RXT_SEG);
 6171         }
 6172 #undef STAT64
 6173 #undef STAT
 6174 #undef STAT_IDX
 6175 }
 6176 
 6177 /**
 6178  *      t4_tp_get_err_stats - read TP's error MIB counters
 6179  *      @adap: the adapter
 6180  *      @st: holds the counter values
 6181  *      @sleep_ok: if true we may sleep while awaiting command completion
 6182  *
 6183  *      Returns the values of TP's error counters.
 6184  */
 6185 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
 6186                          bool sleep_ok)
 6187 {
 6188         int nchan = adap->chip_params->nchan;
 6189 
 6190         t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
 6191                        sleep_ok);
 6192 
 6193         t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
 6194                        sleep_ok);
 6195 
 6196         t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
 6197                        sleep_ok);
 6198 
 6199         t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
 6200                        A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
 6201 
 6202         t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
 6203                        A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
 6204 
 6205         t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
 6206                        sleep_ok);
 6207 
 6208         t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
 6209                        A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
 6210 
 6211         t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
 6212                        A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
 6213 
 6214         t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
 6215                        sleep_ok);
 6216 }
 6217 
 6218 /**
 6219  *      t4_tp_get_err_stats - read TP's error MIB counters
 6220  *      @adap: the adapter
 6221  *      @st: holds the counter values
 6222  *      @sleep_ok: if true we may sleep while awaiting command completion
 6223  *
 6224  *      Returns the values of TP's error counters.
 6225  */
 6226 void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st,
 6227                          bool sleep_ok)
 6228 {
 6229         int nchan = adap->chip_params->nchan;
 6230 
 6231         t4_tp_mib_read(adap, st->out_pkt, nchan, A_TP_MIB_TNL_OUT_PKT_0,
 6232                        sleep_ok);
 6233         t4_tp_mib_read(adap, st->in_pkt, nchan, A_TP_MIB_TNL_IN_PKT_0,
 6234                        sleep_ok);
 6235 }
 6236 
 6237 /**
 6238  *      t4_tp_get_proxy_stats - read TP's proxy MIB counters
 6239  *      @adap: the adapter
 6240  *      @st: holds the counter values
 6241  *
 6242  *      Returns the values of TP's proxy counters.
 6243  */
 6244 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
 6245     bool sleep_ok)
 6246 {
 6247         int nchan = adap->chip_params->nchan;
 6248 
 6249         t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
 6250 }
 6251 
 6252 /**
 6253  *      t4_tp_get_cpl_stats - read TP's CPL MIB counters
 6254  *      @adap: the adapter
 6255  *      @st: holds the counter values
 6256  *      @sleep_ok: if true we may sleep while awaiting command completion
 6257  *
 6258  *      Returns the values of TP's CPL counters.
 6259  */
 6260 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
 6261                          bool sleep_ok)
 6262 {
 6263         int nchan = adap->chip_params->nchan;
 6264 
 6265         t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
 6266 
 6267         t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
 6268 }
 6269 
 6270 /**
 6271  *      t4_tp_get_rdma_stats - read TP's RDMA MIB counters
 6272  *      @adap: the adapter
 6273  *      @st: holds the counter values
 6274  *
 6275  *      Returns the values of TP's RDMA counters.
 6276  */
 6277 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
 6278                           bool sleep_ok)
 6279 {
 6280         t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
 6281                        sleep_ok);
 6282 }
 6283 
 6284 /**
 6285  *      t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
 6286  *      @adap: the adapter
 6287  *      @idx: the port index
 6288  *      @st: holds the counter values
 6289  *      @sleep_ok: if true we may sleep while awaiting command completion
 6290  *
 6291  *      Returns the values of TP's FCoE counters for the selected port.
 6292  */
 6293 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
 6294                        struct tp_fcoe_stats *st, bool sleep_ok)
 6295 {
 6296         u32 val[2];
 6297 
 6298         t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
 6299                        sleep_ok);
 6300 
 6301         t4_tp_mib_read(adap, &st->frames_drop, 1,
 6302                        A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
 6303 
 6304         t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
 6305                        sleep_ok);
 6306 
 6307         st->octets_ddp = ((u64)val[0] << 32) | val[1];
 6308 }
 6309 
 6310 /**
 6311  *      t4_get_usm_stats - read TP's non-TCP DDP MIB counters
 6312  *      @adap: the adapter
 6313  *      @st: holds the counter values
 6314  *      @sleep_ok: if true we may sleep while awaiting command completion
 6315  *
 6316  *      Returns the values of TP's counters for non-TCP directly-placed packets.
 6317  */
 6318 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
 6319                       bool sleep_ok)
 6320 {
 6321         u32 val[4];
 6322 
 6323         t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
 6324 
 6325         st->frames = val[0];
 6326         st->drops = val[1];
 6327         st->octets = ((u64)val[2] << 32) | val[3];
 6328 }
 6329 
 6330 /**
 6331  *      t4_tp_get_tid_stats - read TP's tid MIB counters.
 6332  *      @adap: the adapter
 6333  *      @st: holds the counter values
 6334  *      @sleep_ok: if true we may sleep while awaiting command completion
 6335  *
 6336  *      Returns the values of TP's counters for tids.
 6337  */
 6338 void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st,
 6339                       bool sleep_ok)
 6340 {
 6341 
 6342         t4_tp_mib_read(adap, &st->del, 4, A_TP_MIB_TID_DEL, sleep_ok);
 6343 }
 6344 
 6345 /**
 6346  *      t4_read_mtu_tbl - returns the values in the HW path MTU table
 6347  *      @adap: the adapter
 6348  *      @mtus: where to store the MTU values
 6349  *      @mtu_log: where to store the MTU base-2 log (may be %NULL)
 6350  *
 6351  *      Reads the HW path MTU table.
 6352  */
 6353 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
 6354 {
 6355         u32 v;
 6356         int i;
 6357 
 6358         for (i = 0; i < NMTUS; ++i) {
 6359                 t4_write_reg(adap, A_TP_MTU_TABLE,
 6360                              V_MTUINDEX(0xff) | V_MTUVALUE(i));
 6361                 v = t4_read_reg(adap, A_TP_MTU_TABLE);
 6362                 mtus[i] = G_MTUVALUE(v);
 6363                 if (mtu_log)
 6364                         mtu_log[i] = G_MTUWIDTH(v);
 6365         }
 6366 }
 6367 
 6368 /**
 6369  *      t4_read_cong_tbl - reads the congestion control table
 6370  *      @adap: the adapter
 6371  *      @incr: where to store the alpha values
 6372  *
 6373  *      Reads the additive increments programmed into the HW congestion
 6374  *      control table.
 6375  */
 6376 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
 6377 {
 6378         unsigned int mtu, w;
 6379 
 6380         for (mtu = 0; mtu < NMTUS; ++mtu)
 6381                 for (w = 0; w < NCCTRL_WIN; ++w) {
 6382                         t4_write_reg(adap, A_TP_CCTRL_TABLE,
 6383                                      V_ROWINDEX(0xffff) | (mtu << 5) | w);
 6384                         incr[mtu][w] = (u16)t4_read_reg(adap,
 6385                                                 A_TP_CCTRL_TABLE) & 0x1fff;
 6386                 }
 6387 }
 6388 
 6389 /**
 6390  *      t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
 6391  *      @adap: the adapter
 6392  *      @addr: the indirect TP register address
 6393  *      @mask: specifies the field within the register to modify
 6394  *      @val: new value for the field
 6395  *
 6396  *      Sets a field of an indirect TP register to the given value.
 6397  */
 6398 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
 6399                             unsigned int mask, unsigned int val)
 6400 {
 6401         t4_write_reg(adap, A_TP_PIO_ADDR, addr);
 6402         val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
 6403         t4_write_reg(adap, A_TP_PIO_DATA, val);
 6404 }
 6405 
 6406 /**
 6407  *      init_cong_ctrl - initialize congestion control parameters
 6408  *      @a: the alpha values for congestion control
 6409  *      @b: the beta values for congestion control
 6410  *
 6411  *      Initialize the congestion control parameters.
 6412  */
 6413 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
 6414 {
 6415         a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
 6416         a[9] = 2;
 6417         a[10] = 3;
 6418         a[11] = 4;
 6419         a[12] = 5;
 6420         a[13] = 6;
 6421         a[14] = 7;
 6422         a[15] = 8;
 6423         a[16] = 9;
 6424         a[17] = 10;
 6425         a[18] = 14;
 6426         a[19] = 17;
 6427         a[20] = 21;
 6428         a[21] = 25;
 6429         a[22] = 30;
 6430         a[23] = 35;
 6431         a[24] = 45;
 6432         a[25] = 60;
 6433         a[26] = 80;
 6434         a[27] = 100;
 6435         a[28] = 200;
 6436         a[29] = 300;
 6437         a[30] = 400;
 6438         a[31] = 500;
 6439 
 6440         b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
 6441         b[9] = b[10] = 1;
 6442         b[11] = b[12] = 2;
 6443         b[13] = b[14] = b[15] = b[16] = 3;
 6444         b[17] = b[18] = b[19] = b[20] = b[21] = 4;
 6445         b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
 6446         b[28] = b[29] = 6;
 6447         b[30] = b[31] = 7;
 6448 }
 6449 
 6450 /* The minimum additive increment value for the congestion control table */
 6451 #define CC_MIN_INCR 2U
 6452 
 6453 /**
 6454  *      t4_load_mtus - write the MTU and congestion control HW tables
 6455  *      @adap: the adapter
 6456  *      @mtus: the values for the MTU table
 6457  *      @alpha: the values for the congestion control alpha parameter
 6458  *      @beta: the values for the congestion control beta parameter
 6459  *
 6460  *      Write the HW MTU table with the supplied MTUs and the high-speed
 6461  *      congestion control table with the supplied alpha, beta, and MTUs.
 6462  *      We write the two tables together because the additive increments
 6463  *      depend on the MTUs.
 6464  */
 6465 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
 6466                   const unsigned short *alpha, const unsigned short *beta)
 6467 {
 6468         static const unsigned int avg_pkts[NCCTRL_WIN] = {
 6469                 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
 6470                 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
 6471                 28672, 40960, 57344, 81920, 114688, 163840, 229376
 6472         };
 6473 
 6474         unsigned int i, w;
 6475 
 6476         for (i = 0; i < NMTUS; ++i) {
 6477                 unsigned int mtu = mtus[i];
 6478                 unsigned int log2 = fls(mtu);
 6479 
 6480                 if (!(mtu & ((1 << log2) >> 2)))     /* round */
 6481                         log2--;
 6482                 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
 6483                              V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
 6484 
 6485                 for (w = 0; w < NCCTRL_WIN; ++w) {
 6486                         unsigned int inc;
 6487 
 6488                         inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
 6489                                   CC_MIN_INCR);
 6490 
 6491                         t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
 6492                                      (w << 16) | (beta[w] << 13) | inc);
 6493                 }
 6494         }
 6495 }
 6496 
 6497 /**
 6498  *      t4_set_pace_tbl - set the pace table
 6499  *      @adap: the adapter
 6500  *      @pace_vals: the pace values in microseconds
 6501  *      @start: index of the first entry in the HW pace table to set
 6502  *      @n: how many entries to set
 6503  *
 6504  *      Sets (a subset of the) HW pace table.
 6505  */
 6506 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
 6507                      unsigned int start, unsigned int n)
 6508 {
 6509         unsigned int vals[NTX_SCHED], i;
 6510         unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
 6511 
 6512         if (n > NTX_SCHED)
 6513             return -ERANGE;
 6514 
 6515         /* convert values from us to dack ticks, rounding to closest value */
 6516         for (i = 0; i < n; i++, pace_vals++) {
 6517                 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
 6518                 if (vals[i] > 0x7ff)
 6519                         return -ERANGE;
 6520                 if (*pace_vals && vals[i] == 0)
 6521                         return -ERANGE;
 6522         }
 6523         for (i = 0; i < n; i++, start++)
 6524                 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
 6525         return 0;
 6526 }
 6527 
 6528 /**
 6529  *      t4_set_sched_bps - set the bit rate for a HW traffic scheduler
 6530  *      @adap: the adapter
 6531  *      @kbps: target rate in Kbps
 6532  *      @sched: the scheduler index
 6533  *
 6534  *      Configure a Tx HW scheduler for the target rate.
 6535  */
 6536 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
 6537 {
 6538         unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
 6539         unsigned int clk = adap->params.vpd.cclk * 1000;
 6540         unsigned int selected_cpt = 0, selected_bpt = 0;
 6541 
 6542         if (kbps > 0) {
 6543                 kbps *= 125;     /* -> bytes */
 6544                 for (cpt = 1; cpt <= 255; cpt++) {
 6545                         tps = clk / cpt;
 6546                         bpt = (kbps + tps / 2) / tps;
 6547                         if (bpt > 0 && bpt <= 255) {
 6548                                 v = bpt * tps;
 6549                                 delta = v >= kbps ? v - kbps : kbps - v;
 6550                                 if (delta < mindelta) {
 6551                                         mindelta = delta;
 6552                                         selected_cpt = cpt;
 6553                                         selected_bpt = bpt;
 6554                                 }
 6555                         } else if (selected_cpt)
 6556                                 break;
 6557                 }
 6558                 if (!selected_cpt)
 6559                         return -EINVAL;
 6560         }
 6561         t4_write_reg(adap, A_TP_TM_PIO_ADDR,
 6562                      A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
 6563         v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
 6564         if (sched & 1)
 6565                 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
 6566         else
 6567                 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
 6568         t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
 6569         return 0;
 6570 }
 6571 
 6572 /**
 6573  *      t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
 6574  *      @adap: the adapter
 6575  *      @sched: the scheduler index
 6576  *      @ipg: the interpacket delay in tenths of nanoseconds
 6577  *
 6578  *      Set the interpacket delay for a HW packet rate scheduler.
 6579  */
 6580 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
 6581 {
 6582         unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
 6583 
 6584         /* convert ipg to nearest number of core clocks */
 6585         ipg *= core_ticks_per_usec(adap);
 6586         ipg = (ipg + 5000) / 10000;
 6587         if (ipg > M_TXTIMERSEPQ0)
 6588                 return -EINVAL;
 6589 
 6590         t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 6591         v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
 6592         if (sched & 1)
 6593                 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
 6594         else
 6595                 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
 6596         t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
 6597         t4_read_reg(adap, A_TP_TM_PIO_DATA);
 6598         return 0;
 6599 }
 6600 
 6601 /*
 6602  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
 6603  * clocks.  The formula is
 6604  *
 6605  * bytes/s = bytes256 * 256 * ClkFreq / 4096
 6606  *
 6607  * which is equivalent to
 6608  *
 6609  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
 6610  */
 6611 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
 6612 {
 6613         u64 v = (u64)bytes256 * adap->params.vpd.cclk;
 6614 
 6615         return v * 62 + v / 2;
 6616 }
 6617 
 6618 /**
 6619  *      t4_get_chan_txrate - get the current per channel Tx rates
 6620  *      @adap: the adapter
 6621  *      @nic_rate: rates for NIC traffic
 6622  *      @ofld_rate: rates for offloaded traffic
 6623  *
 6624  *      Return the current Tx rates in bytes/s for NIC and offloaded traffic
 6625  *      for each channel.
 6626  */
 6627 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
 6628 {
 6629         u32 v;
 6630 
 6631         v = t4_read_reg(adap, A_TP_TX_TRATE);
 6632         nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
 6633         nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
 6634         if (adap->chip_params->nchan > 2) {
 6635                 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
 6636                 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
 6637         }
 6638 
 6639         v = t4_read_reg(adap, A_TP_TX_ORATE);
 6640         ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
 6641         ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
 6642         if (adap->chip_params->nchan > 2) {
 6643                 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
 6644                 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
 6645         }
 6646 }
 6647 
 6648 /**
 6649  *      t4_set_trace_filter - configure one of the tracing filters
 6650  *      @adap: the adapter
 6651  *      @tp: the desired trace filter parameters
 6652  *      @idx: which filter to configure
 6653  *      @enable: whether to enable or disable the filter
 6654  *
 6655  *      Configures one of the tracing filters available in HW.  If @tp is %NULL
 6656  *      it indicates that the filter is already written in the register and it
 6657  *      just needs to be enabled or disabled.
 6658  */
 6659 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
 6660     int idx, int enable)
 6661 {
 6662         int i, ofst = idx * 4;
 6663         u32 data_reg, mask_reg, cfg;
 6664         u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
 6665 
 6666         if (idx < 0 || idx >= NTRACE)
 6667                 return -EINVAL;
 6668 
 6669         if (tp == NULL || !enable) {
 6670                 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
 6671                     enable ? en : 0);
 6672                 return 0;
 6673         }
 6674 
 6675         /*
 6676          * TODO - After T4 data book is updated, specify the exact
 6677          * section below.
 6678          *
 6679          * See T4 data book - MPS section for a complete description
 6680          * of the below if..else handling of A_MPS_TRC_CFG register
 6681          * value.
 6682          */
 6683         cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
 6684         if (cfg & F_TRCMULTIFILTER) {
 6685                 /*
 6686                  * If multiple tracers are enabled, then maximum
 6687                  * capture size is 2.5KB (FIFO size of a single channel)
 6688                  * minus 2 flits for CPL_TRACE_PKT header.
 6689                  */
 6690                 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
 6691                         return -EINVAL;
 6692         } else {
 6693                 /*
 6694                  * If multiple tracers are disabled, to avoid deadlocks
 6695                  * maximum packet capture size of 9600 bytes is recommended.
 6696                  * Also in this mode, only trace0 can be enabled and running.
 6697                  */
 6698                 if (tp->snap_len > 9600 || idx)
 6699                         return -EINVAL;
 6700         }
 6701 
 6702         if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
 6703             tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
 6704             tp->min_len > M_TFMINPKTSIZE)
 6705                 return -EINVAL;
 6706 
 6707         /* stop the tracer we'll be changing */
 6708         t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
 6709 
 6710         idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
 6711         data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
 6712         mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
 6713 
 6714         for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
 6715                 t4_write_reg(adap, data_reg, tp->data[i]);
 6716                 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
 6717         }
 6718         t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
 6719                      V_TFCAPTUREMAX(tp->snap_len) |
 6720                      V_TFMINPKTSIZE(tp->min_len));
 6721         t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
 6722                      V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
 6723                      (is_t4(adap) ?
 6724                      V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
 6725                      V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
 6726 
 6727         return 0;
 6728 }
 6729 
 6730 /**
 6731  *      t4_get_trace_filter - query one of the tracing filters
 6732  *      @adap: the adapter
 6733  *      @tp: the current trace filter parameters
 6734  *      @idx: which trace filter to query
 6735  *      @enabled: non-zero if the filter is enabled
 6736  *
 6737  *      Returns the current settings of one of the HW tracing filters.
 6738  */
 6739 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
 6740                          int *enabled)
 6741 {
 6742         u32 ctla, ctlb;
 6743         int i, ofst = idx * 4;
 6744         u32 data_reg, mask_reg;
 6745 
 6746         ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
 6747         ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
 6748 
 6749         if (is_t4(adap)) {
 6750                 *enabled = !!(ctla & F_TFEN);
 6751                 tp->port =  G_TFPORT(ctla);
 6752                 tp->invert = !!(ctla & F_TFINVERTMATCH);
 6753         } else {
 6754                 *enabled = !!(ctla & F_T5_TFEN);
 6755                 tp->port = G_T5_TFPORT(ctla);
 6756                 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
 6757         }
 6758         tp->snap_len = G_TFCAPTUREMAX(ctlb);
 6759         tp->min_len = G_TFMINPKTSIZE(ctlb);
 6760         tp->skip_ofst = G_TFOFFSET(ctla);
 6761         tp->skip_len = G_TFLENGTH(ctla);
 6762 
 6763         ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
 6764         data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
 6765         mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
 6766 
 6767         for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
 6768                 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
 6769                 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
 6770         }
 6771 }
 6772 
 6773 /**
 6774  *      t4_pmtx_get_stats - returns the HW stats from PMTX
 6775  *      @adap: the adapter
 6776  *      @cnt: where to store the count statistics
 6777  *      @cycles: where to store the cycle statistics
 6778  *
 6779  *      Returns performance statistics from PMTX.
 6780  */
 6781 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 6782 {
 6783         int i;
 6784         u32 data[2];
 6785 
 6786         for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
 6787                 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
 6788                 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
 6789                 if (is_t4(adap))
 6790                         cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
 6791                 else {
 6792                         t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
 6793                                          A_PM_TX_DBG_DATA, data, 2,
 6794                                          A_PM_TX_DBG_STAT_MSB);
 6795                         cycles[i] = (((u64)data[0] << 32) | data[1]);
 6796                 }
 6797         }
 6798 }
 6799 
 6800 /**
 6801  *      t4_pmrx_get_stats - returns the HW stats from PMRX
 6802  *      @adap: the adapter
 6803  *      @cnt: where to store the count statistics
 6804  *      @cycles: where to store the cycle statistics
 6805  *
 6806  *      Returns performance statistics from PMRX.
 6807  */
 6808 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 6809 {
 6810         int i;
 6811         u32 data[2];
 6812 
 6813         for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
 6814                 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
 6815                 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
 6816                 if (is_t4(adap)) {
 6817                         cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
 6818                 } else {
 6819                         t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
 6820                                          A_PM_RX_DBG_DATA, data, 2,
 6821                                          A_PM_RX_DBG_STAT_MSB);
 6822                         cycles[i] = (((u64)data[0] << 32) | data[1]);
 6823                 }
 6824         }
 6825 }
 6826 
 6827 /**
 6828  *      t4_get_mps_bg_map - return the buffer groups associated with a port
 6829  *      @adap: the adapter
 6830  *      @idx: the port index
 6831  *
 6832  *      Returns a bitmap indicating which MPS buffer groups are associated
 6833  *      with the given port.  Bit i is set if buffer group i is used by the
 6834  *      port.
 6835  */
 6836 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
 6837 {
 6838         u32 n;
 6839 
 6840         if (adap->params.mps_bg_map)
 6841                 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
 6842 
 6843         n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
 6844         if (n == 0)
 6845                 return idx == 0 ? 0xf : 0;
 6846         if (n == 1 && chip_id(adap) <= CHELSIO_T5)
 6847                 return idx < 2 ? (3 << (2 * idx)) : 0;
 6848         return 1 << idx;
 6849 }
 6850 
 6851 /*
 6852  * TP RX e-channels associated with the port.
 6853  */
 6854 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
 6855 {
 6856         u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
 6857         const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
 6858 
 6859         if (n == 0)
 6860                 return idx == 0 ? all_chan : 0;
 6861         if (n == 1 && chip_id(adap) <= CHELSIO_T5)
 6862                 return idx < 2 ? (3 << (2 * idx)) : 0;
 6863         return 1 << idx;
 6864 }
 6865 
 6866 /*
 6867  * TP RX c-channel associated with the port.
 6868  */
 6869 static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx)
 6870 {
 6871         u32 param, val;
 6872         int ret;
 6873 
 6874         param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
 6875             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
 6876         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
 6877         if (!ret)
 6878                 return (val >> (8 * idx)) & 0xff;
 6879 
 6880         return 0;
 6881 }
 6882 
 6883 /**
 6884  *      t4_get_port_type_description - return Port Type string description
 6885  *      @port_type: firmware Port Type enumeration
 6886  */
 6887 const char *t4_get_port_type_description(enum fw_port_type port_type)
 6888 {
 6889         static const char *const port_type_description[] = {
 6890                 "Fiber_XFI",
 6891                 "Fiber_XAUI",
 6892                 "BT_SGMII",
 6893                 "BT_XFI",
 6894                 "BT_XAUI",
 6895                 "KX4",
 6896                 "CX4",
 6897                 "KX",
 6898                 "KR",
 6899                 "SFP",
 6900                 "BP_AP",
 6901                 "BP4_AP",
 6902                 "QSFP_10G",
 6903                 "QSA",
 6904                 "QSFP",
 6905                 "BP40_BA",
 6906                 "KR4_100G",
 6907                 "CR4_QSFP",
 6908                 "CR_QSFP",
 6909                 "CR2_QSFP",
 6910                 "SFP28",
 6911                 "KR_SFP28",
 6912         };
 6913 
 6914         if (port_type < ARRAY_SIZE(port_type_description))
 6915                 return port_type_description[port_type];
 6916         return "UNKNOWN";
 6917 }
 6918 
 6919 /**
 6920  *      t4_get_port_stats_offset - collect port stats relative to a previous
 6921  *                                 snapshot
 6922  *      @adap: The adapter
 6923  *      @idx: The port
 6924  *      @stats: Current stats to fill
 6925  *      @offset: Previous stats snapshot
 6926  */
 6927 void t4_get_port_stats_offset(struct adapter *adap, int idx,
 6928                 struct port_stats *stats,
 6929                 struct port_stats *offset)
 6930 {
 6931         u64 *s, *o;
 6932         int i;
 6933 
 6934         t4_get_port_stats(adap, idx, stats);
 6935         for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
 6936                         i < (sizeof(struct port_stats)/sizeof(u64)) ;
 6937                         i++, s++, o++)
 6938                 *s -= *o;
 6939 }
 6940 
 6941 /**
 6942  *      t4_get_port_stats - collect port statistics
 6943  *      @adap: the adapter
 6944  *      @idx: the port index
 6945  *      @p: the stats structure to fill
 6946  *
 6947  *      Collect statistics related to the given port from HW.
 6948  */
 6949 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
 6950 {
 6951         struct port_info *pi = adap->port[idx];
 6952         u32 bgmap = pi->mps_bg_map;
 6953         u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
 6954 
 6955 #define GET_STAT(name) \
 6956         t4_read_reg64(adap, \
 6957         (is_t4(adap) ? PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##name##_L) : \
 6958         T5_PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##name##_L)))
 6959 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
 6960 
 6961         p->tx_pause             = GET_STAT(TX_PORT_PAUSE);
 6962         p->tx_octets            = GET_STAT(TX_PORT_BYTES);
 6963         p->tx_frames            = GET_STAT(TX_PORT_FRAMES);
 6964         p->tx_bcast_frames      = GET_STAT(TX_PORT_BCAST);
 6965         p->tx_mcast_frames      = GET_STAT(TX_PORT_MCAST);
 6966         p->tx_ucast_frames      = GET_STAT(TX_PORT_UCAST);
 6967         p->tx_error_frames      = GET_STAT(TX_PORT_ERROR);
 6968         p->tx_frames_64         = GET_STAT(TX_PORT_64B);
 6969         p->tx_frames_65_127     = GET_STAT(TX_PORT_65B_127B);
 6970         p->tx_frames_128_255    = GET_STAT(TX_PORT_128B_255B);
 6971         p->tx_frames_256_511    = GET_STAT(TX_PORT_256B_511B);
 6972         p->tx_frames_512_1023   = GET_STAT(TX_PORT_512B_1023B);
 6973         p->tx_frames_1024_1518  = GET_STAT(TX_PORT_1024B_1518B);
 6974         p->tx_frames_1519_max   = GET_STAT(TX_PORT_1519B_MAX);
 6975         p->tx_drop              = GET_STAT(TX_PORT_DROP);
 6976         p->tx_ppp0              = GET_STAT(TX_PORT_PPP0);
 6977         p->tx_ppp1              = GET_STAT(TX_PORT_PPP1);
 6978         p->tx_ppp2              = GET_STAT(TX_PORT_PPP2);
 6979         p->tx_ppp3              = GET_STAT(TX_PORT_PPP3);
 6980         p->tx_ppp4              = GET_STAT(TX_PORT_PPP4);
 6981         p->tx_ppp5              = GET_STAT(TX_PORT_PPP5);
 6982         p->tx_ppp6              = GET_STAT(TX_PORT_PPP6);
 6983         p->tx_ppp7              = GET_STAT(TX_PORT_PPP7);
 6984 
 6985         if (chip_id(adap) >= CHELSIO_T5) {
 6986                 if (stat_ctl & F_COUNTPAUSESTATTX) {
 6987                         p->tx_frames -= p->tx_pause;
 6988                         p->tx_octets -= p->tx_pause * 64;
 6989                 }
 6990                 if (stat_ctl & F_COUNTPAUSEMCTX)
 6991                         p->tx_mcast_frames -= p->tx_pause;
 6992         }
 6993 
 6994         p->rx_pause             = GET_STAT(RX_PORT_PAUSE);
 6995         p->rx_octets            = GET_STAT(RX_PORT_BYTES);
 6996         p->rx_frames            = GET_STAT(RX_PORT_FRAMES);
 6997         p->rx_bcast_frames      = GET_STAT(RX_PORT_BCAST);
 6998         p->rx_mcast_frames      = GET_STAT(RX_PORT_MCAST);
 6999         p->rx_ucast_frames      = GET_STAT(RX_PORT_UCAST);
 7000         p->rx_too_long          = GET_STAT(RX_PORT_MTU_ERROR);
 7001         p->rx_jabber            = GET_STAT(RX_PORT_MTU_CRC_ERROR);
 7002         p->rx_len_err           = GET_STAT(RX_PORT_LEN_ERROR);
 7003         p->rx_symbol_err        = GET_STAT(RX_PORT_SYM_ERROR);
 7004         p->rx_runt              = GET_STAT(RX_PORT_LESS_64B);
 7005         p->rx_frames_64         = GET_STAT(RX_PORT_64B);
 7006         p->rx_frames_65_127     = GET_STAT(RX_PORT_65B_127B);
 7007         p->rx_frames_128_255    = GET_STAT(RX_PORT_128B_255B);
 7008         p->rx_frames_256_511    = GET_STAT(RX_PORT_256B_511B);
 7009         p->rx_frames_512_1023   = GET_STAT(RX_PORT_512B_1023B);
 7010         p->rx_frames_1024_1518  = GET_STAT(RX_PORT_1024B_1518B);
 7011         p->rx_frames_1519_max   = GET_STAT(RX_PORT_1519B_MAX);
 7012         p->rx_ppp0              = GET_STAT(RX_PORT_PPP0);
 7013         p->rx_ppp1              = GET_STAT(RX_PORT_PPP1);
 7014         p->rx_ppp2              = GET_STAT(RX_PORT_PPP2);
 7015         p->rx_ppp3              = GET_STAT(RX_PORT_PPP3);
 7016         p->rx_ppp4              = GET_STAT(RX_PORT_PPP4);
 7017         p->rx_ppp5              = GET_STAT(RX_PORT_PPP5);
 7018         p->rx_ppp6              = GET_STAT(RX_PORT_PPP6);
 7019         p->rx_ppp7              = GET_STAT(RX_PORT_PPP7);
 7020 
 7021         if (pi->fcs_reg != -1)
 7022                 p->rx_fcs_err = t4_read_reg64(adap, pi->fcs_reg) - pi->fcs_base;
 7023 
 7024         if (chip_id(adap) >= CHELSIO_T5) {
 7025                 if (stat_ctl & F_COUNTPAUSESTATRX) {
 7026                         p->rx_frames -= p->rx_pause;
 7027                         p->rx_octets -= p->rx_pause * 64;
 7028                 }
 7029                 if (stat_ctl & F_COUNTPAUSEMCRX)
 7030                         p->rx_mcast_frames -= p->rx_pause;
 7031         }
 7032 
 7033         p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
 7034         p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
 7035         p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
 7036         p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
 7037         p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
 7038         p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
 7039         p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
 7040         p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
 7041 
 7042 #undef GET_STAT
 7043 #undef GET_STAT_COM
 7044 }
 7045 
 7046 /**
 7047  *      t4_get_lb_stats - collect loopback port statistics
 7048  *      @adap: the adapter
 7049  *      @idx: the loopback port index
 7050  *      @p: the stats structure to fill
 7051  *
 7052  *      Return HW statistics for the given loopback port.
 7053  */
 7054 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
 7055 {
 7056 
 7057 #define GET_STAT(name) \
 7058         t4_read_reg64(adap, \
 7059         (is_t4(adap) ? \
 7060         PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
 7061         T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
 7062 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
 7063 
 7064         p->octets       = GET_STAT(BYTES);
 7065         p->frames       = GET_STAT(FRAMES);
 7066         p->bcast_frames = GET_STAT(BCAST);
 7067         p->mcast_frames = GET_STAT(MCAST);
 7068         p->ucast_frames = GET_STAT(UCAST);
 7069         p->error_frames = GET_STAT(ERROR);
 7070 
 7071         p->frames_64            = GET_STAT(64B);
 7072         p->frames_65_127        = GET_STAT(65B_127B);
 7073         p->frames_128_255       = GET_STAT(128B_255B);
 7074         p->frames_256_511       = GET_STAT(256B_511B);
 7075         p->frames_512_1023      = GET_STAT(512B_1023B);
 7076         p->frames_1024_1518     = GET_STAT(1024B_1518B);
 7077         p->frames_1519_max      = GET_STAT(1519B_MAX);
 7078         p->drop                 = GET_STAT(DROP_FRAMES);
 7079 
 7080         if (idx < adap->params.nports) {
 7081                 u32 bg = adap2pinfo(adap, idx)->mps_bg_map;
 7082 
 7083                 p->ovflow0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
 7084                 p->ovflow1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
 7085                 p->ovflow2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
 7086                 p->ovflow3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
 7087                 p->trunc0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
 7088                 p->trunc1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
 7089                 p->trunc2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
 7090                 p->trunc3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
 7091         }
 7092 
 7093 #undef GET_STAT
 7094 #undef GET_STAT_COM
 7095 }
 7096 
 7097 /**
 7098  *      t4_wol_magic_enable - enable/disable magic packet WoL
 7099  *      @adap: the adapter
 7100  *      @port: the physical port index
 7101  *      @addr: MAC address expected in magic packets, %NULL to disable
 7102  *
 7103  *      Enables/disables magic packet wake-on-LAN for the selected port.
 7104  */
 7105 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
 7106                          const u8 *addr)
 7107 {
 7108         u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
 7109 
 7110         if (is_t4(adap)) {
 7111                 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
 7112                 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
 7113                 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
 7114         } else {
 7115                 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
 7116                 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
 7117                 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
 7118         }
 7119 
 7120         if (addr) {
 7121                 t4_write_reg(adap, mag_id_reg_l,
 7122                              (addr[2] << 24) | (addr[3] << 16) |
 7123                              (addr[4] << 8) | addr[5]);
 7124                 t4_write_reg(adap, mag_id_reg_h,
 7125                              (addr[0] << 8) | addr[1]);
 7126         }
 7127         t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
 7128                          V_MAGICEN(addr != NULL));
 7129 }
 7130 
 7131 /**
 7132  *      t4_wol_pat_enable - enable/disable pattern-based WoL
 7133  *      @adap: the adapter
 7134  *      @port: the physical port index
 7135  *      @map: bitmap of which HW pattern filters to set
 7136  *      @mask0: byte mask for bytes 0-63 of a packet
 7137  *      @mask1: byte mask for bytes 64-127 of a packet
 7138  *      @crc: Ethernet CRC for selected bytes
 7139  *      @enable: enable/disable switch
 7140  *
 7141  *      Sets the pattern filters indicated in @map to mask out the bytes
 7142  *      specified in @mask0/@mask1 in received packets and compare the CRC of
 7143  *      the resulting packet against @crc.  If @enable is %true pattern-based
 7144  *      WoL is enabled, otherwise disabled.
 7145  */
 7146 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
 7147                       u64 mask0, u64 mask1, unsigned int crc, bool enable)
 7148 {
 7149         int i;
 7150         u32 port_cfg_reg;
 7151 
 7152         if (is_t4(adap))
 7153                 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
 7154         else
 7155                 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
 7156 
 7157         if (!enable) {
 7158                 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
 7159                 return 0;
 7160         }
 7161         if (map > 0xff)
 7162                 return -EINVAL;
 7163 
 7164 #define EPIO_REG(name) \
 7165         (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
 7166         T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
 7167 
 7168         t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
 7169         t4_write_reg(adap, EPIO_REG(DATA2), mask1);
 7170         t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
 7171 
 7172         for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
 7173                 if (!(map & 1))
 7174                         continue;
 7175 
 7176                 /* write byte masks */
 7177                 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
 7178                 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
 7179                 t4_read_reg(adap, EPIO_REG(OP));                /* flush */
 7180                 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
 7181                         return -ETIMEDOUT;
 7182 
 7183                 /* write CRC */
 7184                 t4_write_reg(adap, EPIO_REG(DATA0), crc);
 7185                 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
 7186                 t4_read_reg(adap, EPIO_REG(OP));                /* flush */
 7187                 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
 7188                         return -ETIMEDOUT;
 7189         }
 7190 #undef EPIO_REG
 7191 
 7192         t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
 7193         return 0;
 7194 }
 7195 
 7196 /*     t4_mk_filtdelwr - create a delete filter WR
 7197  *     @ftid: the filter ID
 7198  *     @wr: the filter work request to populate
 7199  *     @qid: ingress queue to receive the delete notification
 7200  *
 7201  *     Creates a filter work request to delete the supplied filter.  If @qid is
 7202  *     negative the delete notification is suppressed.
 7203  */
 7204 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
 7205 {
 7206         memset(wr, 0, sizeof(*wr));
 7207         wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
 7208         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
 7209         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
 7210                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
 7211         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
 7212         if (qid >= 0)
 7213                 wr->rx_chan_rx_rpl_iq =
 7214                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
 7215 }
 7216 
 7217 #define INIT_CMD(var, cmd, rd_wr) do { \
 7218         (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
 7219                                         F_FW_CMD_REQUEST | \
 7220                                         F_FW_CMD_##rd_wr); \
 7221         (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
 7222 } while (0)
 7223 
 7224 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
 7225                           u32 addr, u32 val)
 7226 {
 7227         u32 ldst_addrspace;
 7228         struct fw_ldst_cmd c;
 7229 
 7230         memset(&c, 0, sizeof(c));
 7231         ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
 7232         c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
 7233                                         F_FW_CMD_REQUEST |
 7234                                         F_FW_CMD_WRITE |
 7235                                         ldst_addrspace);
 7236         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
 7237         c.u.addrval.addr = cpu_to_be32(addr);
 7238         c.u.addrval.val = cpu_to_be32(val);
 7239 
 7240         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 7241 }
 7242 
 7243 /**
 7244  *      t4_mdio_rd - read a PHY register through MDIO
 7245  *      @adap: the adapter
 7246  *      @mbox: mailbox to use for the FW command
 7247  *      @phy_addr: the PHY address
 7248  *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
 7249  *      @reg: the register to read
 7250  *      @valp: where to store the value
 7251  *
 7252  *      Issues a FW command through the given mailbox to read a PHY register.
 7253  */
 7254 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 7255                unsigned int mmd, unsigned int reg, unsigned int *valp)
 7256 {
 7257         int ret;
 7258         u32 ldst_addrspace;
 7259         struct fw_ldst_cmd c;
 7260 
 7261         memset(&c, 0, sizeof(c));
 7262         ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
 7263         c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
 7264                                         F_FW_CMD_REQUEST | F_FW_CMD_READ |
 7265                                         ldst_addrspace);
 7266         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
 7267         c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
 7268                                          V_FW_LDST_CMD_MMD(mmd));
 7269         c.u.mdio.raddr = cpu_to_be16(reg);
 7270 
 7271         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 7272         if (ret == 0)
 7273                 *valp = be16_to_cpu(c.u.mdio.rval);
 7274         return ret;
 7275 }
 7276 
 7277 /**
 7278  *      t4_mdio_wr - write a PHY register through MDIO
 7279  *      @adap: the adapter
 7280  *      @mbox: mailbox to use for the FW command
 7281  *      @phy_addr: the PHY address
 7282  *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
 7283  *      @reg: the register to write
 7284  *      @valp: value to write
 7285  *
 7286  *      Issues a FW command through the given mailbox to write a PHY register.
 7287  */
 7288 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 7289                unsigned int mmd, unsigned int reg, unsigned int val)
 7290 {
 7291         u32 ldst_addrspace;
 7292         struct fw_ldst_cmd c;
 7293 
 7294         memset(&c, 0, sizeof(c));
 7295         ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
 7296         c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
 7297                                         F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 7298                                         ldst_addrspace);
 7299         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
 7300         c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
 7301                                          V_FW_LDST_CMD_MMD(mmd));
 7302         c.u.mdio.raddr = cpu_to_be16(reg);
 7303         c.u.mdio.rval = cpu_to_be16(val);
 7304 
 7305         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 7306 }
 7307 
 7308 /**
 7309  *
 7310  *      t4_sge_decode_idma_state - decode the idma state
 7311  *      @adap: the adapter
 7312  *      @state: the state idma is stuck in
 7313  */
 7314 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
 7315 {
 7316         static const char * const t4_decode[] = {
 7317                 "IDMA_IDLE",
 7318                 "IDMA_PUSH_MORE_CPL_FIFO",
 7319                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
 7320                 "Not used",
 7321                 "IDMA_PHYSADDR_SEND_PCIEHDR",
 7322                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
 7323                 "IDMA_PHYSADDR_SEND_PAYLOAD",
 7324                 "IDMA_SEND_FIFO_TO_IMSG",
 7325                 "IDMA_FL_REQ_DATA_FL_PREP",
 7326                 "IDMA_FL_REQ_DATA_FL",
 7327                 "IDMA_FL_DROP",
 7328                 "IDMA_FL_H_REQ_HEADER_FL",
 7329                 "IDMA_FL_H_SEND_PCIEHDR",
 7330                 "IDMA_FL_H_PUSH_CPL_FIFO",
 7331                 "IDMA_FL_H_SEND_CPL",
 7332                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
 7333                 "IDMA_FL_H_SEND_IP_HDR",
 7334                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
 7335                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
 7336                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
 7337                 "IDMA_FL_D_SEND_PCIEHDR",
 7338                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
 7339                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
 7340                 "IDMA_FL_SEND_PCIEHDR",
 7341                 "IDMA_FL_PUSH_CPL_FIFO",
 7342                 "IDMA_FL_SEND_CPL",
 7343                 "IDMA_FL_SEND_PAYLOAD_FIRST",
 7344                 "IDMA_FL_SEND_PAYLOAD",
 7345                 "IDMA_FL_REQ_NEXT_DATA_FL",
 7346                 "IDMA_FL_SEND_NEXT_PCIEHDR",
 7347                 "IDMA_FL_SEND_PADDING",
 7348                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
 7349                 "IDMA_FL_SEND_FIFO_TO_IMSG",
 7350                 "IDMA_FL_REQ_DATAFL_DONE",
 7351                 "IDMA_FL_REQ_HEADERFL_DONE",
 7352         };
 7353         static const char * const t5_decode[] = {
 7354                 "IDMA_IDLE",
 7355                 "IDMA_ALMOST_IDLE",
 7356                 "IDMA_PUSH_MORE_CPL_FIFO",
 7357                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
 7358                 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
 7359                 "IDMA_PHYSADDR_SEND_PCIEHDR",
 7360                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
 7361                 "IDMA_PHYSADDR_SEND_PAYLOAD",
 7362                 "IDMA_SEND_FIFO_TO_IMSG",
 7363                 "IDMA_FL_REQ_DATA_FL",
 7364                 "IDMA_FL_DROP",
 7365                 "IDMA_FL_DROP_SEND_INC",
 7366                 "IDMA_FL_H_REQ_HEADER_FL",
 7367                 "IDMA_FL_H_SEND_PCIEHDR",
 7368                 "IDMA_FL_H_PUSH_CPL_FIFO",
 7369                 "IDMA_FL_H_SEND_CPL",
 7370                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
 7371                 "IDMA_FL_H_SEND_IP_HDR",
 7372                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
 7373                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
 7374                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
 7375                 "IDMA_FL_D_SEND_PCIEHDR",
 7376                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
 7377                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
 7378                 "IDMA_FL_SEND_PCIEHDR",
 7379                 "IDMA_FL_PUSH_CPL_FIFO",
 7380                 "IDMA_FL_SEND_CPL",
 7381                 "IDMA_FL_SEND_PAYLOAD_FIRST",
 7382                 "IDMA_FL_SEND_PAYLOAD",
 7383                 "IDMA_FL_REQ_NEXT_DATA_FL",
 7384                 "IDMA_FL_SEND_NEXT_PCIEHDR",
 7385                 "IDMA_FL_SEND_PADDING",
 7386                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
 7387         };
 7388         static const char * const t6_decode[] = {
 7389                 "IDMA_IDLE",
 7390                 "IDMA_PUSH_MORE_CPL_FIFO",
 7391                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
 7392                 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
 7393                 "IDMA_PHYSADDR_SEND_PCIEHDR",
 7394                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
 7395                 "IDMA_PHYSADDR_SEND_PAYLOAD",
 7396                 "IDMA_FL_REQ_DATA_FL",
 7397                 "IDMA_FL_DROP",
 7398                 "IDMA_FL_DROP_SEND_INC",
 7399                 "IDMA_FL_H_REQ_HEADER_FL",
 7400                 "IDMA_FL_H_SEND_PCIEHDR",
 7401                 "IDMA_FL_H_PUSH_CPL_FIFO",
 7402                 "IDMA_FL_H_SEND_CPL",
 7403                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
 7404                 "IDMA_FL_H_SEND_IP_HDR",
 7405                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
 7406                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
 7407                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
 7408                 "IDMA_FL_D_SEND_PCIEHDR",
 7409                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
 7410                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
 7411                 "IDMA_FL_SEND_PCIEHDR",
 7412                 "IDMA_FL_PUSH_CPL_FIFO",
 7413                 "IDMA_FL_SEND_CPL",
 7414                 "IDMA_FL_SEND_PAYLOAD_FIRST",
 7415                 "IDMA_FL_SEND_PAYLOAD",
 7416                 "IDMA_FL_REQ_NEXT_DATA_FL",
 7417                 "IDMA_FL_SEND_NEXT_PCIEHDR",
 7418                 "IDMA_FL_SEND_PADDING",
 7419                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
 7420         };
 7421         static const u32 sge_regs[] = {
 7422                 A_SGE_DEBUG_DATA_LOW_INDEX_2,
 7423                 A_SGE_DEBUG_DATA_LOW_INDEX_3,
 7424                 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
 7425         };
 7426         const char * const *sge_idma_decode;
 7427         int sge_idma_decode_nstates;
 7428         int i;
 7429         unsigned int chip_version = chip_id(adapter);
 7430 
 7431         /* Select the right set of decode strings to dump depending on the
 7432          * adapter chip type.
 7433          */
 7434         switch (chip_version) {
 7435         case CHELSIO_T4:
 7436                 sge_idma_decode = (const char * const *)t4_decode;
 7437                 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
 7438                 break;
 7439 
 7440         case CHELSIO_T5:
 7441                 sge_idma_decode = (const char * const *)t5_decode;
 7442                 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
 7443                 break;
 7444 
 7445         case CHELSIO_T6:
 7446                 sge_idma_decode = (const char * const *)t6_decode;
 7447                 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
 7448                 break;
 7449 
 7450         default:
 7451                 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
 7452                 return;
 7453         }
 7454 
 7455         if (state < sge_idma_decode_nstates)
 7456                 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
 7457         else
 7458                 CH_WARN(adapter, "idma state %d unknown\n", state);
 7459 
 7460         for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
 7461                 CH_WARN(adapter, "SGE register %#x value %#x\n",
 7462                         sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
 7463 }
 7464 
 7465 /**
 7466  *      t4_sge_ctxt_flush - flush the SGE context cache
 7467  *      @adap: the adapter
 7468  *      @mbox: mailbox to use for the FW command
 7469  *
 7470  *      Issues a FW command through the given mailbox to flush the
 7471  *      SGE context cache.
 7472  */
 7473 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
 7474 {
 7475         int ret;
 7476         u32 ldst_addrspace;
 7477         struct fw_ldst_cmd c;
 7478 
 7479         memset(&c, 0, sizeof(c));
 7480         ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
 7481                                                  FW_LDST_ADDRSPC_SGE_EGRC :
 7482                                                  FW_LDST_ADDRSPC_SGE_INGC);
 7483         c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
 7484                                         F_FW_CMD_REQUEST | F_FW_CMD_READ |
 7485                                         ldst_addrspace);
 7486         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
 7487         c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
 7488 
 7489         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 7490         return ret;
 7491 }
 7492 
 7493 /**
 7494  *      t4_fw_hello - establish communication with FW
 7495  *      @adap: the adapter
 7496  *      @mbox: mailbox to use for the FW command
 7497  *      @evt_mbox: mailbox to receive async FW events
 7498  *      @master: specifies the caller's willingness to be the device master
 7499  *      @state: returns the current device state (if non-NULL)
 7500  *
 7501  *      Issues a command to establish communication with FW.  Returns either
 7502  *      an error (negative integer) or the mailbox of the Master PF.
 7503  */
 7504 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
 7505                 enum dev_master master, enum dev_state *state)
 7506 {
 7507         int ret;
 7508         struct fw_hello_cmd c;
 7509         u32 v;
 7510         unsigned int master_mbox;
 7511         int retries = FW_CMD_HELLO_RETRIES;
 7512 
 7513 retry:
 7514         memset(&c, 0, sizeof(c));
 7515         INIT_CMD(c, HELLO, WRITE);
 7516         c.err_to_clearinit = cpu_to_be32(
 7517                 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
 7518                 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
 7519                 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
 7520                                         mbox : M_FW_HELLO_CMD_MBMASTER) |
 7521                 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
 7522                 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
 7523                 F_FW_HELLO_CMD_CLEARINIT);
 7524 
 7525         /*
 7526          * Issue the HELLO command to the firmware.  If it's not successful
 7527          * but indicates that we got a "busy" or "timeout" condition, retry
 7528          * the HELLO until we exhaust our retry limit.  If we do exceed our
 7529          * retry limit, check to see if the firmware left us any error
 7530          * information and report that if so ...
 7531          */
 7532         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 7533         if (ret != FW_SUCCESS) {
 7534                 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
 7535                         goto retry;
 7536                 return ret;
 7537         }
 7538 
 7539         v = be32_to_cpu(c.err_to_clearinit);
 7540         master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
 7541         if (state) {
 7542                 if (v & F_FW_HELLO_CMD_ERR)
 7543                         *state = DEV_STATE_ERR;
 7544                 else if (v & F_FW_HELLO_CMD_INIT)
 7545                         *state = DEV_STATE_INIT;
 7546                 else
 7547                         *state = DEV_STATE_UNINIT;
 7548         }
 7549 
 7550         /*
 7551          * If we're not the Master PF then we need to wait around for the
 7552          * Master PF Driver to finish setting up the adapter.
 7553          *
 7554          * Note that we also do this wait if we're a non-Master-capable PF and
 7555          * there is no current Master PF; a Master PF may show up momentarily
 7556          * and we wouldn't want to fail pointlessly.  (This can happen when an
 7557          * OS loads lots of different drivers rapidly at the same time).  In
 7558          * this case, the Master PF returned by the firmware will be
 7559          * M_PCIE_FW_MASTER so the test below will work ...
 7560          */
 7561         if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
 7562             master_mbox != mbox) {
 7563                 int waiting = FW_CMD_HELLO_TIMEOUT;
 7564 
 7565                 /*
 7566                  * Wait for the firmware to either indicate an error or
 7567                  * initialized state.  If we see either of these we bail out
 7568                  * and report the issue to the caller.  If we exhaust the
 7569                  * "hello timeout" and we haven't exhausted our retries, try
 7570                  * again.  Otherwise bail with a timeout error.
 7571                  */
 7572                 for (;;) {
 7573                         u32 pcie_fw;
 7574 
 7575                         msleep(50);
 7576                         waiting -= 50;
 7577 
 7578                         /*
 7579                          * If neither Error nor Initialialized are indicated
 7580                          * by the firmware keep waiting till we exhaust our
 7581                          * timeout ... and then retry if we haven't exhausted
 7582                          * our retries ...
 7583                          */
 7584                         pcie_fw = t4_read_reg(adap, A_PCIE_FW);
 7585                         if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
 7586                                 if (waiting <= 0) {
 7587                                         if (retries-- > 0)
 7588                                                 goto retry;
 7589 
 7590                                         return -ETIMEDOUT;
 7591                                 }
 7592                                 continue;
 7593                         }
 7594 
 7595                         /*
 7596                          * We either have an Error or Initialized condition
 7597                          * report errors preferentially.
 7598                          */
 7599                         if (state) {
 7600                                 if (pcie_fw & F_PCIE_FW_ERR)
 7601                                         *state = DEV_STATE_ERR;
 7602                                 else if (pcie_fw & F_PCIE_FW_INIT)
 7603                                         *state = DEV_STATE_INIT;
 7604                         }
 7605 
 7606                         /*
 7607                          * If we arrived before a Master PF was selected and
 7608                          * there's not a valid Master PF, grab its identity
 7609                          * for our caller.
 7610                          */
 7611                         if (master_mbox == M_PCIE_FW_MASTER &&
 7612                             (pcie_fw & F_PCIE_FW_MASTER_VLD))
 7613                                 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
 7614                         break;
 7615                 }
 7616         }
 7617 
 7618         return master_mbox;
 7619 }
 7620 
 7621 /**
 7622  *      t4_fw_bye - end communication with FW
 7623  *      @adap: the adapter
 7624  *      @mbox: mailbox to use for the FW command
 7625  *
 7626  *      Issues a command to terminate communication with FW.
 7627  */
 7628 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
 7629 {
 7630         struct fw_bye_cmd c;
 7631 
 7632         memset(&c, 0, sizeof(c));
 7633         INIT_CMD(c, BYE, WRITE);
 7634         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 7635 }
 7636 
 7637 /**
 7638  *      t4_fw_reset - issue a reset to FW
 7639  *      @adap: the adapter
 7640  *      @mbox: mailbox to use for the FW command
 7641  *      @reset: specifies the type of reset to perform
 7642  *
 7643  *      Issues a reset command of the specified type to FW.
 7644  */
 7645 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
 7646 {
 7647         struct fw_reset_cmd c;
 7648 
 7649         memset(&c, 0, sizeof(c));
 7650         INIT_CMD(c, RESET, WRITE);
 7651         c.val = cpu_to_be32(reset);
 7652         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 7653 }
 7654 
 7655 /**
 7656  *      t4_fw_halt - issue a reset/halt to FW and put uP into RESET
 7657  *      @adap: the adapter
 7658  *      @mbox: mailbox to use for the FW RESET command (if desired)
 7659  *      @force: force uP into RESET even if FW RESET command fails
 7660  *
 7661  *      Issues a RESET command to firmware (if desired) with a HALT indication
 7662  *      and then puts the microprocessor into RESET state.  The RESET command
 7663  *      will only be issued if a legitimate mailbox is provided (mbox <=
 7664  *      M_PCIE_FW_MASTER).
 7665  *
 7666  *      This is generally used in order for the host to safely manipulate the
 7667  *      adapter without fear of conflicting with whatever the firmware might
 7668  *      be doing.  The only way out of this state is to RESTART the firmware
 7669  *      ...
 7670  */
 7671 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 7672 {
 7673         int ret = 0;
 7674 
 7675         /*
 7676          * If a legitimate mailbox is provided, issue a RESET command
 7677          * with a HALT indication.
 7678          */
 7679         if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
 7680                 struct fw_reset_cmd c;
 7681 
 7682                 memset(&c, 0, sizeof(c));
 7683                 INIT_CMD(c, RESET, WRITE);
 7684                 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
 7685                 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
 7686                 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 7687         }
 7688 
 7689         /*
 7690          * Normally we won't complete the operation if the firmware RESET
 7691          * command fails but if our caller insists we'll go ahead and put the
 7692          * uP into RESET.  This can be useful if the firmware is hung or even
 7693          * missing ...  We'll have to take the risk of putting the uP into
 7694          * RESET without the cooperation of firmware in that case.
 7695          *
 7696          * We also force the firmware's HALT flag to be on in case we bypassed
 7697          * the firmware RESET command above or we're dealing with old firmware
 7698          * which doesn't have the HALT capability.  This will serve as a flag
 7699          * for the incoming firmware to know that it's coming out of a HALT
 7700          * rather than a RESET ... if it's new enough to understand that ...
 7701          */
 7702         if (ret == 0 || force) {
 7703                 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
 7704                 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
 7705                                  F_PCIE_FW_HALT);
 7706         }
 7707 
 7708         /*
 7709          * And we always return the result of the firmware RESET command
 7710          * even when we force the uP into RESET ...
 7711          */
 7712         return ret;
 7713 }
 7714 
 7715 /**
 7716  *      t4_fw_restart - restart the firmware by taking the uP out of RESET
 7717  *      @adap: the adapter
 7718  *
 7719  *      Restart firmware previously halted by t4_fw_halt().  On successful
 7720  *      return the previous PF Master remains as the new PF Master and there
 7721  *      is no need to issue a new HELLO command, etc.
 7722  */
 7723 int t4_fw_restart(struct adapter *adap, unsigned int mbox)
 7724 {
 7725         int ms;
 7726 
 7727         t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
 7728         for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
 7729                 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
 7730                         return FW_SUCCESS;
 7731                 msleep(100);
 7732                 ms += 100;
 7733         }
 7734 
 7735         return -ETIMEDOUT;
 7736 }
 7737 
 7738 /**
 7739  *      t4_fw_upgrade - perform all of the steps necessary to upgrade FW
 7740  *      @adap: the adapter
 7741  *      @mbox: mailbox to use for the FW RESET command (if desired)
 7742  *      @fw_data: the firmware image to write
 7743  *      @size: image size
 7744  *      @force: force upgrade even if firmware doesn't cooperate
 7745  *
 7746  *      Perform all of the steps necessary for upgrading an adapter's
 7747  *      firmware image.  Normally this requires the cooperation of the
 7748  *      existing firmware in order to halt all existing activities
 7749  *      but if an invalid mailbox token is passed in we skip that step
 7750  *      (though we'll still put the adapter microprocessor into RESET in
 7751  *      that case).
 7752  *
 7753  *      On successful return the new firmware will have been loaded and
 7754  *      the adapter will have been fully RESET losing all previous setup
 7755  *      state.  On unsuccessful return the adapter may be completely hosed ...
 7756  *      positive errno indicates that the adapter is ~probably~ intact, a
 7757  *      negative errno indicates that things are looking bad ...
 7758  */
 7759 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 7760                   const u8 *fw_data, unsigned int size, int force)
 7761 {
 7762         const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
 7763         unsigned int bootstrap =
 7764             be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
 7765         int ret;
 7766 
 7767         if (!t4_fw_matches_chip(adap, fw_hdr))
 7768                 return -EINVAL;
 7769 
 7770         if (!bootstrap) {
 7771                 ret = t4_fw_halt(adap, mbox, force);
 7772                 if (ret < 0 && !force)
 7773                         return ret;
 7774         }
 7775 
 7776         ret = t4_load_fw(adap, fw_data, size);
 7777         if (ret < 0 || bootstrap)
 7778                 return ret;
 7779 
 7780         return t4_fw_restart(adap, mbox);
 7781 }
 7782 
 7783 /**
 7784  *      t4_fw_initialize - ask FW to initialize the device
 7785  *      @adap: the adapter
 7786  *      @mbox: mailbox to use for the FW command
 7787  *
 7788  *      Issues a command to FW to partially initialize the device.  This
 7789  *      performs initialization that generally doesn't depend on user input.
 7790  */
 7791 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
 7792 {
 7793         struct fw_initialize_cmd c;
 7794 
 7795         memset(&c, 0, sizeof(c));
 7796         INIT_CMD(c, INITIALIZE, WRITE);
 7797         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 7798 }
 7799 
 7800 /**
 7801  *      t4_query_params_rw - query FW or device parameters
 7802  *      @adap: the adapter
 7803  *      @mbox: mailbox to use for the FW command
 7804  *      @pf: the PF
 7805  *      @vf: the VF
 7806  *      @nparams: the number of parameters
 7807  *      @params: the parameter names
 7808  *      @val: the parameter values
 7809  *      @rw: Write and read flag
 7810  *
 7811  *      Reads the value of FW or device parameters.  Up to 7 parameters can be
 7812  *      queried at once.
 7813  */
 7814 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
 7815                        unsigned int vf, unsigned int nparams, const u32 *params,
 7816                        u32 *val, int rw)
 7817 {
 7818         int i, ret;
 7819         struct fw_params_cmd c;
 7820         __be32 *p = &c.param[0].mnem;
 7821 
 7822         if (nparams > 7)
 7823                 return -EINVAL;
 7824 
 7825         memset(&c, 0, sizeof(c));
 7826         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
 7827                                   F_FW_CMD_REQUEST | F_FW_CMD_READ |
 7828                                   V_FW_PARAMS_CMD_PFN(pf) |
 7829                                   V_FW_PARAMS_CMD_VFN(vf));
 7830         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 7831 
 7832         for (i = 0; i < nparams; i++) {
 7833                 *p++ = cpu_to_be32(*params++);
 7834                 if (rw)
 7835                         *p = cpu_to_be32(*(val + i));
 7836                 p++;
 7837         }
 7838 
 7839         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 7840         if (ret == 0)
 7841                 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
 7842                         *val++ = be32_to_cpu(*p);
 7843         return ret;
 7844 }
 7845 
 7846 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 7847                     unsigned int vf, unsigned int nparams, const u32 *params,
 7848                     u32 *val)
 7849 {
 7850         return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
 7851 }
 7852 
 7853 /**
 7854  *      t4_set_params_timeout - sets FW or device parameters
 7855  *      @adap: the adapter
 7856  *      @mbox: mailbox to use for the FW command
 7857  *      @pf: the PF
 7858  *      @vf: the VF
 7859  *      @nparams: the number of parameters
 7860  *      @params: the parameter names
 7861  *      @val: the parameter values
 7862  *      @timeout: the timeout time
 7863  *
 7864  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
 7865  *      specified at once.
 7866  */
 7867 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
 7868                           unsigned int pf, unsigned int vf,
 7869                           unsigned int nparams, const u32 *params,
 7870                           const u32 *val, int timeout)
 7871 {
 7872         struct fw_params_cmd c;
 7873         __be32 *p = &c.param[0].mnem;
 7874 
 7875         if (nparams > 7)
 7876                 return -EINVAL;
 7877 
 7878         memset(&c, 0, sizeof(c));
 7879         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
 7880                                   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 7881                                   V_FW_PARAMS_CMD_PFN(pf) |
 7882                                   V_FW_PARAMS_CMD_VFN(vf));
 7883         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 7884 
 7885         while (nparams--) {
 7886                 *p++ = cpu_to_be32(*params++);
 7887                 *p++ = cpu_to_be32(*val++);
 7888         }
 7889 
 7890         return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
 7891 }
 7892 
 7893 /**
 7894  *      t4_set_params - sets FW or device parameters
 7895  *      @adap: the adapter
 7896  *      @mbox: mailbox to use for the FW command
 7897  *      @pf: the PF
 7898  *      @vf: the VF
 7899  *      @nparams: the number of parameters
 7900  *      @params: the parameter names
 7901  *      @val: the parameter values
 7902  *
 7903  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
 7904  *      specified at once.
 7905  */
 7906 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 7907                   unsigned int vf, unsigned int nparams, const u32 *params,
 7908                   const u32 *val)
 7909 {
 7910         return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
 7911                                      FW_CMD_MAX_TIMEOUT);
 7912 }
 7913 
 7914 /**
 7915  *      t4_cfg_pfvf - configure PF/VF resource limits
 7916  *      @adap: the adapter
 7917  *      @mbox: mailbox to use for the FW command
 7918  *      @pf: the PF being configured
 7919  *      @vf: the VF being configured
 7920  *      @txq: the max number of egress queues
 7921  *      @txq_eth_ctrl: the max number of egress Ethernet or control queues
 7922  *      @rxqi: the max number of interrupt-capable ingress queues
 7923  *      @rxq: the max number of interruptless ingress queues
 7924  *      @tc: the PCI traffic class
 7925  *      @vi: the max number of virtual interfaces
 7926  *      @cmask: the channel access rights mask for the PF/VF
 7927  *      @pmask: the port access rights mask for the PF/VF
 7928  *      @nexact: the maximum number of exact MPS filters
 7929  *      @rcaps: read capabilities
 7930  *      @wxcaps: write/execute capabilities
 7931  *
 7932  *      Configures resource limits and capabilities for a physical or virtual
 7933  *      function.
 7934  */
 7935 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
 7936                 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
 7937                 unsigned int rxqi, unsigned int rxq, unsigned int tc,
 7938                 unsigned int vi, unsigned int cmask, unsigned int pmask,
 7939                 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
 7940 {
 7941         struct fw_pfvf_cmd c;
 7942 
 7943         memset(&c, 0, sizeof(c));
 7944         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
 7945                                   F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
 7946                                   V_FW_PFVF_CMD_VFN(vf));
 7947         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 7948         c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
 7949                                      V_FW_PFVF_CMD_NIQ(rxq));
 7950         c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
 7951                                     V_FW_PFVF_CMD_PMASK(pmask) |
 7952                                     V_FW_PFVF_CMD_NEQ(txq));
 7953         c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
 7954                                       V_FW_PFVF_CMD_NVI(vi) |
 7955                                       V_FW_PFVF_CMD_NEXACTF(nexact));
 7956         c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
 7957                                      V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
 7958                                      V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
 7959         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 7960 }
 7961 
 7962 /**
 7963  *      t4_alloc_vi_func - allocate a virtual interface
 7964  *      @adap: the adapter
 7965  *      @mbox: mailbox to use for the FW command
 7966  *      @port: physical port associated with the VI
 7967  *      @pf: the PF owning the VI
 7968  *      @vf: the VF owning the VI
 7969  *      @nmac: number of MAC addresses needed (1 to 5)
 7970  *      @mac: the MAC addresses of the VI
 7971  *      @rss_size: size of RSS table slice associated with this VI
 7972  *      @portfunc: which Port Application Function MAC Address is desired
 7973  *      @idstype: Intrusion Detection Type
 7974  *
 7975  *      Allocates a virtual interface for the given physical port.  If @mac is
 7976  *      not %NULL it contains the MAC addresses of the VI as assigned by FW.
 7977  *      If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
 7978  *      @mac should be large enough to hold @nmac Ethernet addresses, they are
 7979  *      stored consecutively so the space needed is @nmac * 6 bytes.
 7980  *      Returns a negative error number or the non-negative VI id.
 7981  */
 7982 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
 7983                      unsigned int port, unsigned int pf, unsigned int vf,
 7984                      unsigned int nmac, u8 *mac, u16 *rss_size,
 7985                      uint8_t *vfvld, uint16_t *vin,
 7986                      unsigned int portfunc, unsigned int idstype)
 7987 {
 7988         int ret;
 7989         struct fw_vi_cmd c;
 7990 
 7991         memset(&c, 0, sizeof(c));
 7992         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
 7993                                   F_FW_CMD_WRITE | F_FW_CMD_EXEC |
 7994                                   V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
 7995         c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
 7996         c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
 7997                                      V_FW_VI_CMD_FUNC(portfunc));
 7998         c.portid_pkd = V_FW_VI_CMD_PORTID(port);
 7999         c.nmac = nmac - 1;
 8000         if(!rss_size)
 8001                 c.norss_rsssize = F_FW_VI_CMD_NORSS;
 8002 
 8003         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 8004         if (ret)
 8005                 return ret;
 8006         ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
 8007 
 8008         if (mac) {
 8009                 memcpy(mac, c.mac, sizeof(c.mac));
 8010                 switch (nmac) {
 8011                 case 5:
 8012                         memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
 8013                 case 4:
 8014                         memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
 8015                 case 3:
 8016                         memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
 8017                 case 2:
 8018                         memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
 8019                 }
 8020         }
 8021         if (rss_size)
 8022                 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
 8023         if (vfvld) {
 8024                 *vfvld = adap->params.viid_smt_extn_support ?
 8025                     G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) :
 8026                     G_FW_VIID_VIVLD(ret);
 8027         }
 8028         if (vin) {
 8029                 *vin = adap->params.viid_smt_extn_support ?
 8030                     G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) :
 8031                     G_FW_VIID_VIN(ret);
 8032         }
 8033 
 8034         return ret;
 8035 }
 8036 
 8037 /**
 8038  *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
 8039  *      @adap: the adapter
 8040  *      @mbox: mailbox to use for the FW command
 8041  *      @port: physical port associated with the VI
 8042  *      @pf: the PF owning the VI
 8043  *      @vf: the VF owning the VI
 8044  *      @nmac: number of MAC addresses needed (1 to 5)
 8045  *      @mac: the MAC addresses of the VI
 8046  *      @rss_size: size of RSS table slice associated with this VI
 8047  *
 8048  *      backwards compatible and convieniance routine to allocate a Virtual
 8049  *      Interface with a Ethernet Port Application Function and Intrustion
 8050  *      Detection System disabled.
 8051  */
 8052 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 8053                 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
 8054                 u16 *rss_size, uint8_t *vfvld, uint16_t *vin)
 8055 {
 8056         return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
 8057                                 vfvld, vin, FW_VI_FUNC_ETH, 0);
 8058 }
 8059 
 8060 /**
 8061  *      t4_free_vi - free a virtual interface
 8062  *      @adap: the adapter
 8063  *      @mbox: mailbox to use for the FW command
 8064  *      @pf: the PF owning the VI
 8065  *      @vf: the VF owning the VI
 8066  *      @viid: virtual interface identifiler
 8067  *
 8068  *      Free a previously allocated virtual interface.
 8069  */
 8070 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
 8071                unsigned int vf, unsigned int viid)
 8072 {
 8073         struct fw_vi_cmd c;
 8074 
 8075         memset(&c, 0, sizeof(c));
 8076         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
 8077                                   F_FW_CMD_REQUEST |
 8078                                   F_FW_CMD_EXEC |
 8079                                   V_FW_VI_CMD_PFN(pf) |
 8080                                   V_FW_VI_CMD_VFN(vf));
 8081         c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
 8082         c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
 8083 
 8084         return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 8085 }
 8086 
 8087 /**
 8088  *      t4_set_rxmode - set Rx properties of a virtual interface
 8089  *      @adap: the adapter
 8090  *      @mbox: mailbox to use for the FW command
 8091  *      @viid: the VI id
 8092  *      @mtu: the new MTU or -1
 8093  *      @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
 8094  *      @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
 8095  *      @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
 8096  *      @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
 8097  *      @sleep_ok: if true we may sleep while awaiting command completion
 8098  *
 8099  *      Sets Rx properties of a virtual interface.
 8100  */
 8101 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 8102                   int mtu, int promisc, int all_multi, int bcast, int vlanex,
 8103                   bool sleep_ok)
 8104 {
 8105         struct fw_vi_rxmode_cmd c;
 8106 
 8107         /* convert to FW values */
 8108         if (mtu < 0)
 8109                 mtu = M_FW_VI_RXMODE_CMD_MTU;
 8110         if (promisc < 0)
 8111                 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
 8112         if (all_multi < 0)
 8113                 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
 8114         if (bcast < 0)
 8115                 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
 8116         if (vlanex < 0)
 8117                 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
 8118 
 8119         memset(&c, 0, sizeof(c));
 8120         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
 8121                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 8122                                    V_FW_VI_RXMODE_CMD_VIID(viid));
 8123         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 8124         c.mtu_to_vlanexen =
 8125                 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
 8126                             V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
 8127                             V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
 8128                             V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
 8129                             V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
 8130         return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 8131 }
 8132 
 8133 /**
 8134  *      t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
 8135  *      @adap: the adapter
 8136  *      @viid: the VI id
 8137  *      @mac: the MAC address
 8138  *      @mask: the mask
 8139  *      @vni: the VNI id for the tunnel protocol
 8140  *      @vni_mask: mask for the VNI id
 8141  *      @dip_hit: to enable DIP match for the MPS entry
 8142  *      @lookup_type: MAC address for inner (1) or outer (0) header
 8143  *      @sleep_ok: call is allowed to sleep
 8144  *
 8145  *      Allocates an MPS entry with specified MAC address and VNI value.
 8146  *
 8147  *      Returns a negative error number or the allocated index for this mac.
 8148  */
 8149 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
 8150                             const u8 *addr, const u8 *mask, unsigned int vni,
 8151                             unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
 8152                             bool sleep_ok)
 8153 {
 8154         struct fw_vi_mac_cmd c;
 8155         struct fw_vi_mac_vni *p = c.u.exact_vni;
 8156         int ret = 0;
 8157         u32 val;
 8158 
 8159         memset(&c, 0, sizeof(c));
 8160         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8161                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 8162                                    V_FW_VI_MAC_CMD_VIID(viid));
 8163         val = V_FW_CMD_LEN16(1) |
 8164               V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
 8165         c.freemacs_to_len16 = cpu_to_be32(val);
 8166         p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
 8167                                       V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
 8168         memcpy(p->macaddr, addr, sizeof(p->macaddr));
 8169         memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
 8170 
 8171         p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
 8172                                             V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
 8173                                             V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
 8174         p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
 8175 
 8176         ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
 8177         if (ret == 0)
 8178                 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
 8179         return ret;
 8180 }
 8181 
 8182 /**
 8183  *      t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
 8184  *      @adap: the adapter
 8185  *      @viid: the VI id
 8186  *      @mac: the MAC address
 8187  *      @mask: the mask
 8188  *      @idx: index at which to add this entry
 8189  *      @port_id: the port index
 8190  *      @lookup_type: MAC address for inner (1) or outer (0) header
 8191  *      @sleep_ok: call is allowed to sleep
 8192  *
 8193  *      Adds the mac entry at the specified index using raw mac interface.
 8194  *
 8195  *      Returns a negative error number or the allocated index for this mac.
 8196  */
 8197 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
 8198                           const u8 *addr, const u8 *mask, unsigned int idx,
 8199                           u8 lookup_type, u8 port_id, bool sleep_ok)
 8200 {
 8201         int ret = 0;
 8202         struct fw_vi_mac_cmd c;
 8203         struct fw_vi_mac_raw *p = &c.u.raw;
 8204         u32 val;
 8205 
 8206         memset(&c, 0, sizeof(c));
 8207         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8208                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 8209                                    V_FW_VI_MAC_CMD_VIID(viid));
 8210         val = V_FW_CMD_LEN16(1) |
 8211               V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
 8212         c.freemacs_to_len16 = cpu_to_be32(val);
 8213 
 8214         /* Specify that this is an inner mac address */
 8215         p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
 8216 
 8217         /* Lookup Type. Outer header: 0, Inner header: 1 */
 8218         p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
 8219                                    V_DATAPORTNUM(port_id));
 8220         /* Lookup mask and port mask */
 8221         p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
 8222                                     V_DATAPORTNUM(M_DATAPORTNUM));
 8223 
 8224         /* Copy the address and the mask */
 8225         memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
 8226         memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
 8227 
 8228         ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
 8229         if (ret == 0) {
 8230                 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
 8231                 if (ret != idx)
 8232                         ret = -ENOMEM;
 8233         }
 8234 
 8235         return ret;
 8236 }
 8237 
 8238 /**
 8239  *      t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
 8240  *      @adap: the adapter
 8241  *      @mbox: mailbox to use for the FW command
 8242  *      @viid: the VI id
 8243  *      @free: if true any existing filters for this VI id are first removed
 8244  *      @naddr: the number of MAC addresses to allocate filters for (up to 7)
 8245  *      @addr: the MAC address(es)
 8246  *      @idx: where to store the index of each allocated filter
 8247  *      @hash: pointer to hash address filter bitmap
 8248  *      @sleep_ok: call is allowed to sleep
 8249  *
 8250  *      Allocates an exact-match filter for each of the supplied addresses and
 8251  *      sets it to the corresponding address.  If @idx is not %NULL it should
 8252  *      have at least @naddr entries, each of which will be set to the index of
 8253  *      the filter allocated for the corresponding MAC address.  If a filter
 8254  *      could not be allocated for an address its index is set to 0xffff.
 8255  *      If @hash is not %NULL addresses that fail to allocate an exact filter
 8256  *      are hashed and update the hash filter bitmap pointed at by @hash.
 8257  *
 8258  *      Returns a negative error number or the number of filters allocated.
 8259  */
 8260 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
 8261                       unsigned int viid, bool free, unsigned int naddr,
 8262                       const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
 8263 {
 8264         int offset, ret = 0;
 8265         struct fw_vi_mac_cmd c;
 8266         unsigned int nfilters = 0;
 8267         unsigned int max_naddr = adap->chip_params->mps_tcam_size;
 8268         unsigned int rem = naddr;
 8269 
 8270         if (naddr > max_naddr)
 8271                 return -EINVAL;
 8272 
 8273         for (offset = 0; offset < naddr ; /**/) {
 8274                 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
 8275                                          ? rem
 8276                                          : ARRAY_SIZE(c.u.exact));
 8277                 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
 8278                                                      u.exact[fw_naddr]), 16);
 8279                 struct fw_vi_mac_exact *p;
 8280                 int i;
 8281 
 8282                 memset(&c, 0, sizeof(c));
 8283                 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8284                                            F_FW_CMD_REQUEST |
 8285                                            F_FW_CMD_WRITE |
 8286                                            V_FW_CMD_EXEC(free) |
 8287                                            V_FW_VI_MAC_CMD_VIID(viid));
 8288                 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
 8289                                                   V_FW_CMD_LEN16(len16));
 8290 
 8291                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
 8292                         p->valid_to_idx =
 8293                                 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
 8294                                             V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
 8295                         memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
 8296                 }
 8297 
 8298                 /*
 8299                  * It's okay if we run out of space in our MAC address arena.
 8300                  * Some of the addresses we submit may get stored so we need
 8301                  * to run through the reply to see what the results were ...
 8302                  */
 8303                 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
 8304                 if (ret && ret != -FW_ENOMEM)
 8305                         break;
 8306 
 8307                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
 8308                         u16 index = G_FW_VI_MAC_CMD_IDX(
 8309                                                 be16_to_cpu(p->valid_to_idx));
 8310 
 8311                         if (idx)
 8312                                 idx[offset+i] = (index >=  max_naddr
 8313                                                  ? 0xffff
 8314                                                  : index);
 8315                         if (index < max_naddr)
 8316                                 nfilters++;
 8317                         else if (hash)
 8318                                 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
 8319                 }
 8320 
 8321                 free = false;
 8322                 offset += fw_naddr;
 8323                 rem -= fw_naddr;
 8324         }
 8325 
 8326         if (ret == 0 || ret == -FW_ENOMEM)
 8327                 ret = nfilters;
 8328         return ret;
 8329 }
 8330 
 8331 /**
 8332  *      t4_free_encap_mac_filt - frees MPS entry at given index
 8333  *      @adap: the adapter
 8334  *      @viid: the VI id
 8335  *      @idx: index of MPS entry to be freed
 8336  *      @sleep_ok: call is allowed to sleep
 8337  *
 8338  *      Frees the MPS entry at supplied index
 8339  *
 8340  *      Returns a negative error number or zero on success
 8341  */
 8342 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
 8343                            int idx, bool sleep_ok)
 8344 {
 8345         struct fw_vi_mac_exact *p;
 8346         struct fw_vi_mac_cmd c;
 8347         u8 addr[] = {0,0,0,0,0,0};
 8348         int ret = 0;
 8349         u32 exact;
 8350 
 8351         memset(&c, 0, sizeof(c));
 8352         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8353                                    F_FW_CMD_REQUEST |
 8354                                    F_FW_CMD_WRITE |
 8355                                    V_FW_CMD_EXEC(0) |
 8356                                    V_FW_VI_MAC_CMD_VIID(viid));
 8357         exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
 8358         c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
 8359                                           exact |
 8360                                           V_FW_CMD_LEN16(1));
 8361         p = c.u.exact;
 8362         p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
 8363                                       V_FW_VI_MAC_CMD_IDX(idx));
 8364         memcpy(p->macaddr, addr, sizeof(p->macaddr));
 8365 
 8366         ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
 8367         return ret;
 8368 }
 8369 
 8370 /**
 8371  *      t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
 8372  *      @adap: the adapter
 8373  *      @viid: the VI id
 8374  *      @addr: the MAC address
 8375  *      @mask: the mask
 8376  *      @idx: index of the entry in mps tcam
 8377  *      @lookup_type: MAC address for inner (1) or outer (0) header
 8378  *      @port_id: the port index
 8379  *      @sleep_ok: call is allowed to sleep
 8380  *
 8381  *      Removes the mac entry at the specified index using raw mac interface.
 8382  *
 8383  *      Returns a negative error number on failure.
 8384  */
 8385 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
 8386                          const u8 *addr, const u8 *mask, unsigned int idx,
 8387                          u8 lookup_type, u8 port_id, bool sleep_ok)
 8388 {
 8389         struct fw_vi_mac_cmd c;
 8390         struct fw_vi_mac_raw *p = &c.u.raw;
 8391         u32 raw;
 8392 
 8393         memset(&c, 0, sizeof(c));
 8394         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8395                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 8396                                    V_FW_CMD_EXEC(0) |
 8397                                    V_FW_VI_MAC_CMD_VIID(viid));
 8398         raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
 8399         c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
 8400                                           raw |
 8401                                           V_FW_CMD_LEN16(1));
 8402 
 8403         p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
 8404                                      FW_VI_MAC_ID_BASED_FREE);
 8405 
 8406         /* Lookup Type. Outer header: 0, Inner header: 1 */
 8407         p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
 8408                                    V_DATAPORTNUM(port_id));
 8409         /* Lookup mask and port mask */
 8410         p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
 8411                                     V_DATAPORTNUM(M_DATAPORTNUM));
 8412 
 8413         /* Copy the address and the mask */
 8414         memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
 8415         memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
 8416 
 8417         return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
 8418 }
 8419 
 8420 /**
 8421  *      t4_free_mac_filt - frees exact-match filters of given MAC addresses
 8422  *      @adap: the adapter
 8423  *      @mbox: mailbox to use for the FW command
 8424  *      @viid: the VI id
 8425  *      @naddr: the number of MAC addresses to allocate filters for (up to 7)
 8426  *      @addr: the MAC address(es)
 8427  *      @sleep_ok: call is allowed to sleep
 8428  *
 8429  *      Frees the exact-match filter for each of the supplied addresses
 8430  *
 8431  *      Returns a negative error number or the number of filters freed.
 8432  */
 8433 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
 8434                       unsigned int viid, unsigned int naddr,
 8435                       const u8 **addr, bool sleep_ok)
 8436 {
 8437         int offset, ret = 0;
 8438         struct fw_vi_mac_cmd c;
 8439         unsigned int nfilters = 0;
 8440         unsigned int max_naddr = adap->chip_params->mps_tcam_size;
 8441         unsigned int rem = naddr;
 8442 
 8443         if (naddr > max_naddr)
 8444                 return -EINVAL;
 8445 
 8446         for (offset = 0; offset < (int)naddr ; /**/) {
 8447                 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
 8448                                          ? rem
 8449                                          : ARRAY_SIZE(c.u.exact));
 8450                 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
 8451                                                      u.exact[fw_naddr]), 16);
 8452                 struct fw_vi_mac_exact *p;
 8453                 int i;
 8454 
 8455                 memset(&c, 0, sizeof(c));
 8456                 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8457                                      F_FW_CMD_REQUEST |
 8458                                      F_FW_CMD_WRITE |
 8459                                      V_FW_CMD_EXEC(0) |
 8460                                      V_FW_VI_MAC_CMD_VIID(viid));
 8461                 c.freemacs_to_len16 =
 8462                                 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
 8463                                             V_FW_CMD_LEN16(len16));
 8464 
 8465                 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
 8466                         p->valid_to_idx = cpu_to_be16(
 8467                                 F_FW_VI_MAC_CMD_VALID |
 8468                                 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
 8469                         memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
 8470                 }
 8471 
 8472                 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
 8473                 if (ret)
 8474                         break;
 8475 
 8476                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
 8477                         u16 index = G_FW_VI_MAC_CMD_IDX(
 8478                                                 be16_to_cpu(p->valid_to_idx));
 8479 
 8480                         if (index < max_naddr)
 8481                                 nfilters++;
 8482                 }
 8483 
 8484                 offset += fw_naddr;
 8485                 rem -= fw_naddr;
 8486         }
 8487 
 8488         if (ret == 0)
 8489                 ret = nfilters;
 8490         return ret;
 8491 }
 8492 
 8493 /**
 8494  *      t4_change_mac - modifies the exact-match filter for a MAC address
 8495  *      @adap: the adapter
 8496  *      @mbox: mailbox to use for the FW command
 8497  *      @viid: the VI id
 8498  *      @idx: index of existing filter for old value of MAC address, or -1
 8499  *      @addr: the new MAC address value
 8500  *      @persist: whether a new MAC allocation should be persistent
 8501  *      @smt_idx: add MAC to SMT and return its index, or NULL
 8502  *
 8503  *      Modifies an exact-match filter and sets it to the new MAC address if
 8504  *      @idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
 8505  *      latter case the address is added persistently if @persist is %true.
 8506  *
 8507  *      Note that in general it is not possible to modify the value of a given
 8508  *      filter so the generic way to modify an address filter is to free the one
 8509  *      being used by the old address value and allocate a new filter for the
 8510  *      new address value.
 8511  *
 8512  *      Returns a negative error number or the index of the filter with the new
 8513  *      MAC value.  Note that this index may differ from @idx.
 8514  */
 8515 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
 8516                   int idx, const u8 *addr, bool persist, uint16_t *smt_idx)
 8517 {
 8518         int ret, mode;
 8519         struct fw_vi_mac_cmd c;
 8520         struct fw_vi_mac_exact *p = c.u.exact;
 8521         unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
 8522 
 8523         if (idx < 0)            /* new allocation */
 8524                 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
 8525         mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
 8526 
 8527         memset(&c, 0, sizeof(c));
 8528         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8529                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 8530                                    V_FW_VI_MAC_CMD_VIID(viid));
 8531         c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
 8532         p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
 8533                                       V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
 8534                                       V_FW_VI_MAC_CMD_IDX(idx));
 8535         memcpy(p->macaddr, addr, sizeof(p->macaddr));
 8536 
 8537         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 8538         if (ret == 0) {
 8539                 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
 8540                 if (ret >= max_mac_addr)
 8541                         ret = -ENOMEM;
 8542                 if (smt_idx) {
 8543                         if (adap->params.viid_smt_extn_support)
 8544                                 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
 8545                         else {
 8546                                 if (chip_id(adap) <= CHELSIO_T5)
 8547                                         *smt_idx = (viid & M_FW_VIID_VIN) << 1;
 8548                                 else
 8549                                         *smt_idx = viid & M_FW_VIID_VIN;
 8550                         }
 8551                 }
 8552         }
 8553         return ret;
 8554 }
 8555 
 8556 /**
 8557  *      t4_set_addr_hash - program the MAC inexact-match hash filter
 8558  *      @adap: the adapter
 8559  *      @mbox: mailbox to use for the FW command
 8560  *      @viid: the VI id
 8561  *      @ucast: whether the hash filter should also match unicast addresses
 8562  *      @vec: the value to be written to the hash filter
 8563  *      @sleep_ok: call is allowed to sleep
 8564  *
 8565  *      Sets the 64-bit inexact-match hash filter for a virtual interface.
 8566  */
 8567 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
 8568                      bool ucast, u64 vec, bool sleep_ok)
 8569 {
 8570         struct fw_vi_mac_cmd c;
 8571         u32 val;
 8572 
 8573         memset(&c, 0, sizeof(c));
 8574         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
 8575                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
 8576                                    V_FW_VI_ENABLE_CMD_VIID(viid));
 8577         val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
 8578               V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
 8579         c.freemacs_to_len16 = cpu_to_be32(val);
 8580         c.u.hash.hashvec = cpu_to_be64(vec);
 8581         return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 8582 }
 8583 
 8584 /**
 8585  *      t4_enable_vi_params - enable/disable a virtual interface
 8586  *      @adap: the adapter
 8587  *      @mbox: mailbox to use for the FW command
 8588  *      @viid: the VI id
 8589  *      @rx_en: 1=enable Rx, 0=disable Rx
 8590  *      @tx_en: 1=enable Tx, 0=disable Tx
 8591  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
 8592  *
 8593  *      Enables/disables a virtual interface.  Note that setting DCB Enable
 8594  *      only makes sense when enabling a Virtual Interface ...
 8595  */
 8596 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
 8597                         unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
 8598 {
 8599         struct fw_vi_enable_cmd c;
 8600 
 8601         memset(&c, 0, sizeof(c));
 8602         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
 8603                                    F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 8604                                    V_FW_VI_ENABLE_CMD_VIID(viid));
 8605         c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
 8606                                      V_FW_VI_ENABLE_CMD_EEN(tx_en) |
 8607                                      V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
 8608                                      FW_LEN16(c));
 8609         return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
 8610 }
 8611 
 8612 /**
 8613  *      t4_enable_vi - enable/disable a virtual interface
 8614  *      @adap: the adapter
 8615  *      @mbox: mailbox to use for the FW command
 8616  *      @viid: the VI id
 8617  *      @rx_en: 1=enable Rx, 0=disable Rx
 8618  *      @tx_en: 1=enable Tx, 0=disable Tx
 8619  *
 8620  *      Enables/disables a virtual interface.  Note that setting DCB Enable
 8621  *      only makes sense when enabling a Virtual Interface ...
 8622  */
 8623 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
 8624                  bool rx_en, bool tx_en)
 8625 {
 8626         return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
 8627 }
 8628 
 8629 /**
 8630  *      t4_identify_port - identify a VI's port by blinking its LED
 8631  *      @adap: the adapter
 8632  *      @mbox: mailbox to use for the FW command
 8633  *      @viid: the VI id
 8634  *      @nblinks: how many times to blink LED at 2.5 Hz
 8635  *
 8636  *      Identifies a VI's port by blinking its LED.
 8637  */
 8638 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
 8639                      unsigned int nblinks)
 8640 {
 8641         struct fw_vi_enable_cmd c;
 8642 
 8643         memset(&c, 0, sizeof(c));
 8644         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
 8645                                    F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 8646                                    V_FW_VI_ENABLE_CMD_VIID(viid));
 8647         c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
 8648         c.blinkdur = cpu_to_be16(nblinks);
 8649         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 8650 }
 8651 
 8652 /**
 8653  *      t4_iq_stop - stop an ingress queue and its FLs
 8654  *      @adap: the adapter
 8655  *      @mbox: mailbox to use for the FW command
 8656  *      @pf: the PF owning the queues
 8657  *      @vf: the VF owning the queues
 8658  *      @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
 8659  *      @iqid: ingress queue id
 8660  *      @fl0id: FL0 queue id or 0xffff if no attached FL0
 8661  *      @fl1id: FL1 queue id or 0xffff if no attached FL1
 8662  *
 8663  *      Stops an ingress queue and its associated FLs, if any.  This causes
 8664  *      any current or future data/messages destined for these queues to be
 8665  *      tossed.
 8666  */
 8667 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
 8668                unsigned int vf, unsigned int iqtype, unsigned int iqid,
 8669                unsigned int fl0id, unsigned int fl1id)
 8670 {
 8671         struct fw_iq_cmd c;
 8672 
 8673         memset(&c, 0, sizeof(c));
 8674         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
 8675                                   F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
 8676                                   V_FW_IQ_CMD_VFN(vf));
 8677         c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
 8678         c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
 8679         c.iqid = cpu_to_be16(iqid);
 8680         c.fl0id = cpu_to_be16(fl0id);
 8681         c.fl1id = cpu_to_be16(fl1id);
 8682         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 8683 }
 8684 
 8685 /**
 8686  *      t4_iq_free - free an ingress queue and its FLs
 8687  *      @adap: the adapter
 8688  *      @mbox: mailbox to use for the FW command
 8689  *      @pf: the PF owning the queues
 8690  *      @vf: the VF owning the queues
 8691  *      @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
 8692  *      @iqid: ingress queue id
 8693  *      @fl0id: FL0 queue id or 0xffff if no attached FL0
 8694  *      @fl1id: FL1 queue id or 0xffff if no attached FL1
 8695  *
 8696  *      Frees an ingress queue and its associated FLs, if any.
 8697  */
 8698 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 8699                unsigned int vf, unsigned int iqtype, unsigned int iqid,
 8700                unsigned int fl0id, unsigned int fl1id)
 8701 {
 8702         struct fw_iq_cmd c;
 8703 
 8704         memset(&c, 0, sizeof(c));
 8705         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
 8706                                   F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
 8707                                   V_FW_IQ_CMD_VFN(vf));
 8708         c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
 8709         c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
 8710         c.iqid = cpu_to_be16(iqid);
 8711         c.fl0id = cpu_to_be16(fl0id);
 8712         c.fl1id = cpu_to_be16(fl1id);
 8713         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 8714 }
 8715 
 8716 /**
 8717  *      t4_eth_eq_stop - stop an Ethernet egress queue
 8718  *      @adap: the adapter
 8719  *      @mbox: mailbox to use for the FW command
 8720  *      @pf: the PF owning the queues
 8721  *      @vf: the VF owning the queues
 8722  *      @eqid: egress queue id
 8723  *
 8724  *      Stops an Ethernet egress queue.  The queue can be reinitialized or
 8725  *      freed but is not otherwise functional after this call.
 8726  */
 8727 int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
 8728                    unsigned int vf, unsigned int eqid)
 8729 {
 8730         struct fw_eq_eth_cmd c;
 8731 
 8732         memset(&c, 0, sizeof(c));
 8733         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
 8734                                   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 8735                                   V_FW_EQ_ETH_CMD_PFN(pf) |
 8736                                   V_FW_EQ_ETH_CMD_VFN(vf));
 8737         c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_EQSTOP | FW_LEN16(c));
 8738         c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
 8739         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 8740 }
 8741 
 8742 /**
 8743  *      t4_eth_eq_free - free an Ethernet egress queue
 8744  *      @adap: the adapter
 8745  *      @mbox: mailbox to use for the FW command
 8746  *      @pf: the PF owning the queue
 8747  *      @vf: the VF owning the queue
 8748  *      @eqid: egress queue id
 8749  *
 8750  *      Frees an Ethernet egress queue.
 8751  */
 8752 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 8753                    unsigned int vf, unsigned int eqid)
 8754 {
 8755         struct fw_eq_eth_cmd c;
 8756 
 8757         memset(&c, 0, sizeof(c));
 8758         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
 8759                                   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 8760                                   V_FW_EQ_ETH_CMD_PFN(pf) |
 8761                                   V_FW_EQ_ETH_CMD_VFN(vf));
 8762         c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
 8763         c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
 8764         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 8765 }
 8766 
 8767 /**
 8768  *      t4_ctrl_eq_free - free a control egress queue
 8769  *      @adap: the adapter
 8770  *      @mbox: mailbox to use for the FW command
 8771  *      @pf: the PF owning the queue
 8772  *      @vf: the VF owning the queue
 8773  *      @eqid: egress queue id
 8774  *
 8775  *      Frees a control egress queue.
 8776  */
 8777 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 8778                     unsigned int vf, unsigned int eqid)
 8779 {
 8780         struct fw_eq_ctrl_cmd c;
 8781 
 8782         memset(&c, 0, sizeof(c));
 8783         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
 8784                                   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 8785                                   V_FW_EQ_CTRL_CMD_PFN(pf) |
 8786                                   V_FW_EQ_CTRL_CMD_VFN(vf));
 8787         c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
 8788         c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
 8789         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 8790 }
 8791 
 8792 /**
 8793  *      t4_ofld_eq_free - free an offload egress queue
 8794  *      @adap: the adapter
 8795  *      @mbox: mailbox to use for the FW command
 8796  *      @pf: the PF owning the queue
 8797  *      @vf: the VF owning the queue
 8798  *      @eqid: egress queue id
 8799  *
 8800  *      Frees a control egress queue.
 8801  */
 8802 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 8803                     unsigned int vf, unsigned int eqid)
 8804 {
 8805         struct fw_eq_ofld_cmd c;
 8806 
 8807         memset(&c, 0, sizeof(c));
 8808         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
 8809                                   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
 8810                                   V_FW_EQ_OFLD_CMD_PFN(pf) |
 8811                                   V_FW_EQ_OFLD_CMD_VFN(vf));
 8812         c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
 8813         c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
 8814         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 8815 }
 8816 
 8817 /**
 8818  *      t4_link_down_rc_str - return a string for a Link Down Reason Code
 8819  *      @link_down_rc: Link Down Reason Code
 8820  *
 8821  *      Returns a string representation of the Link Down Reason Code.
 8822  */
 8823 const char *t4_link_down_rc_str(unsigned char link_down_rc)
 8824 {
 8825         static const char *reason[] = {
 8826                 "Link Down",
 8827                 "Remote Fault",
 8828                 "Auto-negotiation Failure",
 8829                 "Reserved3",
 8830                 "Insufficient Airflow",
 8831                 "Unable To Determine Reason",
 8832                 "No RX Signal Detected",
 8833                 "Reserved7",
 8834         };
 8835 
 8836         if (link_down_rc >= ARRAY_SIZE(reason))
 8837                 return "Bad Reason Code";
 8838 
 8839         return reason[link_down_rc];
 8840 }
 8841 
 8842 /*
 8843  * Return the highest speed set in the port capabilities, in Mb/s.
 8844  */
 8845 unsigned int fwcap_to_speed(uint32_t caps)
 8846 {
 8847         #define TEST_SPEED_RETURN(__caps_speed, __speed) \
 8848                 do { \
 8849                         if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
 8850                                 return __speed; \
 8851                 } while (0)
 8852 
 8853         TEST_SPEED_RETURN(400G, 400000);
 8854         TEST_SPEED_RETURN(200G, 200000);
 8855         TEST_SPEED_RETURN(100G, 100000);
 8856         TEST_SPEED_RETURN(50G,   50000);
 8857         TEST_SPEED_RETURN(40G,   40000);
 8858         TEST_SPEED_RETURN(25G,   25000);
 8859         TEST_SPEED_RETURN(10G,   10000);
 8860         TEST_SPEED_RETURN(1G,     1000);
 8861         TEST_SPEED_RETURN(100M,    100);
 8862 
 8863         #undef TEST_SPEED_RETURN
 8864 
 8865         return 0;
 8866 }
 8867 
 8868 /*
 8869  * Return the port capabilities bit for the given speed, which is in Mb/s.
 8870  */
 8871 uint32_t speed_to_fwcap(unsigned int speed)
 8872 {
 8873         #define TEST_SPEED_RETURN(__caps_speed, __speed) \
 8874                 do { \
 8875                         if (speed == __speed) \
 8876                                 return FW_PORT_CAP32_SPEED_##__caps_speed; \
 8877                 } while (0)
 8878 
 8879         TEST_SPEED_RETURN(400G, 400000);
 8880         TEST_SPEED_RETURN(200G, 200000);
 8881         TEST_SPEED_RETURN(100G, 100000);
 8882         TEST_SPEED_RETURN(50G,   50000);
 8883         TEST_SPEED_RETURN(40G,   40000);
 8884         TEST_SPEED_RETURN(25G,   25000);
 8885         TEST_SPEED_RETURN(10G,   10000);
 8886         TEST_SPEED_RETURN(1G,     1000);
 8887         TEST_SPEED_RETURN(100M,    100);
 8888 
 8889         #undef TEST_SPEED_RETURN
 8890 
 8891         return 0;
 8892 }
 8893 
 8894 /*
 8895  * Return the port capabilities bit for the highest speed in the capabilities.
 8896  */
 8897 uint32_t fwcap_top_speed(uint32_t caps)
 8898 {
 8899         #define TEST_SPEED_RETURN(__caps_speed) \
 8900                 do { \
 8901                         if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
 8902                                 return FW_PORT_CAP32_SPEED_##__caps_speed; \
 8903                 } while (0)
 8904 
 8905         TEST_SPEED_RETURN(400G);
 8906         TEST_SPEED_RETURN(200G);
 8907         TEST_SPEED_RETURN(100G);
 8908         TEST_SPEED_RETURN(50G);
 8909         TEST_SPEED_RETURN(40G);
 8910         TEST_SPEED_RETURN(25G);
 8911         TEST_SPEED_RETURN(10G);
 8912         TEST_SPEED_RETURN(1G);
 8913         TEST_SPEED_RETURN(100M);
 8914 
 8915         #undef TEST_SPEED_RETURN
 8916 
 8917         return 0;
 8918 }
 8919 
 8920 /**
 8921  *      lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
 8922  *      @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
 8923  *
 8924  *      Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
 8925  *      32-bit Port Capabilities value.
 8926  */
 8927 static uint32_t lstatus_to_fwcap(u32 lstatus)
 8928 {
 8929         uint32_t linkattr = 0;
 8930 
 8931         /*
 8932          * Unfortunately the format of the Link Status in the old
 8933          * 16-bit Port Information message isn't the same as the
 8934          * 16-bit Port Capabilities bitfield used everywhere else ...
 8935          */
 8936         if (lstatus & F_FW_PORT_CMD_RXPAUSE)
 8937                 linkattr |= FW_PORT_CAP32_FC_RX;
 8938         if (lstatus & F_FW_PORT_CMD_TXPAUSE)
 8939                 linkattr |= FW_PORT_CAP32_FC_TX;
 8940         if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
 8941                 linkattr |= FW_PORT_CAP32_SPEED_100M;
 8942         if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
 8943                 linkattr |= FW_PORT_CAP32_SPEED_1G;
 8944         if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
 8945                 linkattr |= FW_PORT_CAP32_SPEED_10G;
 8946         if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
 8947                 linkattr |= FW_PORT_CAP32_SPEED_25G;
 8948         if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
 8949                 linkattr |= FW_PORT_CAP32_SPEED_40G;
 8950         if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
 8951                 linkattr |= FW_PORT_CAP32_SPEED_100G;
 8952 
 8953         return linkattr;
 8954 }
 8955 
 8956 /*
 8957  * Updates all fields owned by the common code in port_info and link_config
 8958  * based on information provided by the firmware.  Does not touch any
 8959  * requested_* field.
 8960  */
 8961 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
 8962     enum fw_port_action action, bool *mod_changed, bool *link_changed)
 8963 {
 8964         struct link_config old_lc, *lc = &pi->link_cfg;
 8965         unsigned char fc;
 8966         u32 stat, linkattr;
 8967         int old_ptype, old_mtype;
 8968 
 8969         old_ptype = pi->port_type;
 8970         old_mtype = pi->mod_type;
 8971         old_lc = *lc;
 8972         if (action == FW_PORT_ACTION_GET_PORT_INFO) {
 8973                 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
 8974 
 8975                 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
 8976                 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
 8977                 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
 8978                     G_FW_PORT_CMD_MDIOADDR(stat) : -1;
 8979 
 8980                 lc->pcaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
 8981                 lc->acaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
 8982                 lc->lpacaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
 8983                 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
 8984                 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
 8985 
 8986                 linkattr = lstatus_to_fwcap(stat);
 8987         } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
 8988                 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
 8989 
 8990                 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
 8991                 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
 8992                 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
 8993                     G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
 8994 
 8995                 lc->pcaps = be32_to_cpu(p->u.info32.pcaps32);
 8996                 lc->acaps = be32_to_cpu(p->u.info32.acaps32);
 8997                 lc->lpacaps = be32_to_cpu(p->u.info32.lpacaps32);
 8998                 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
 8999                 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
 9000 
 9001                 linkattr = be32_to_cpu(p->u.info32.linkattr32);
 9002         } else {
 9003                 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
 9004                 return;
 9005         }
 9006 
 9007         lc->speed = fwcap_to_speed(linkattr);
 9008         lc->fec = fwcap_to_fec(linkattr, true);
 9009 
 9010         fc = 0;
 9011         if (linkattr & FW_PORT_CAP32_FC_RX)
 9012                 fc |= PAUSE_RX;
 9013         if (linkattr & FW_PORT_CAP32_FC_TX)
 9014                 fc |= PAUSE_TX;
 9015         lc->fc = fc;
 9016 
 9017         if (mod_changed != NULL)
 9018                 *mod_changed = false;
 9019         if (link_changed != NULL)
 9020                 *link_changed = false;
 9021         if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
 9022             old_lc.pcaps != lc->pcaps) {
 9023                 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE)
 9024                         lc->fec_hint = fwcap_to_fec(lc->acaps, true);
 9025                 if (mod_changed != NULL)
 9026                         *mod_changed = true;
 9027         }
 9028         if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
 9029             old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
 9030                 if (link_changed != NULL)
 9031                         *link_changed = true;
 9032         }
 9033 }
 9034 
 9035 /**
 9036  *      t4_update_port_info - retrieve and update port information if changed
 9037  *      @pi: the port_info
 9038  *
 9039  *      We issue a Get Port Information Command to the Firmware and, if
 9040  *      successful, we check to see if anything is different from what we
 9041  *      last recorded and update things accordingly.
 9042  */
 9043  int t4_update_port_info(struct port_info *pi)
 9044  {
 9045         struct adapter *sc = pi->adapter;
 9046         struct fw_port_cmd cmd;
 9047         enum fw_port_action action;
 9048         int ret;
 9049 
 9050         memset(&cmd, 0, sizeof(cmd));
 9051         cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
 9052             F_FW_CMD_REQUEST | F_FW_CMD_READ |
 9053             V_FW_PORT_CMD_PORTID(pi->tx_chan));
 9054         action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
 9055             FW_PORT_ACTION_GET_PORT_INFO;
 9056         cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
 9057             FW_LEN16(cmd));
 9058         ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
 9059         if (ret)
 9060                 return ret;
 9061 
 9062         handle_port_info(pi, &cmd, action, NULL, NULL);
 9063         return 0;
 9064 }
 9065 
 9066 /**
 9067  *      t4_handle_fw_rpl - process a FW reply message
 9068  *      @adap: the adapter
 9069  *      @rpl: start of the FW message
 9070  *
 9071  *      Processes a FW message, such as link state change messages.
 9072  */
 9073 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
 9074 {
 9075         u8 opcode = *(const u8 *)rpl;
 9076         const struct fw_port_cmd *p = (const void *)rpl;
 9077         enum fw_port_action action =
 9078             G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
 9079         bool mod_changed, link_changed;
 9080 
 9081         if (opcode == FW_PORT_CMD &&
 9082             (action == FW_PORT_ACTION_GET_PORT_INFO ||
 9083             action == FW_PORT_ACTION_GET_PORT_INFO32)) {
 9084                 /* link/module state change message */
 9085                 int i;
 9086                 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
 9087                 struct port_info *pi = NULL;
 9088 
 9089                 for_each_port(adap, i) {
 9090                         pi = adap2pinfo(adap, i);
 9091                         if (pi->tx_chan == chan)
 9092                                 break;
 9093                 }
 9094 
 9095                 PORT_LOCK(pi);
 9096                 handle_port_info(pi, p, action, &mod_changed, &link_changed);
 9097                 PORT_UNLOCK(pi);
 9098                 if (mod_changed)
 9099                         t4_os_portmod_changed(pi);
 9100                 if (link_changed) {
 9101                         PORT_LOCK(pi);
 9102                         t4_os_link_changed(pi);
 9103                         PORT_UNLOCK(pi);
 9104                 }
 9105         } else {
 9106                 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
 9107                 return -EINVAL;
 9108         }
 9109         return 0;
 9110 }
 9111 
 9112 /**
 9113  *      get_pci_mode - determine a card's PCI mode
 9114  *      @adapter: the adapter
 9115  *      @p: where to store the PCI settings
 9116  *
 9117  *      Determines a card's PCI mode and associated parameters, such as speed
 9118  *      and width.
 9119  */
 9120 static void get_pci_mode(struct adapter *adapter,
 9121                                    struct pci_params *p)
 9122 {
 9123         u16 val;
 9124         u32 pcie_cap;
 9125 
 9126         pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
 9127         if (pcie_cap) {
 9128                 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
 9129                 p->speed = val & PCI_EXP_LNKSTA_CLS;
 9130                 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
 9131         }
 9132 }
 9133 
 9134 struct flash_desc {
 9135         u32 vendor_and_model_id;
 9136         u32 size_mb;
 9137 };
 9138 
 9139 int t4_get_flash_params(struct adapter *adapter)
 9140 {
 9141         /*
 9142          * Table for non-standard supported Flash parts.  Note, all Flash
 9143          * parts must have 64KB sectors.
 9144          */
 9145         static struct flash_desc supported_flash[] = {
 9146                 { 0x00150201, 4 << 20 },        /* Spansion 4MB S25FL032P */
 9147         };
 9148 
 9149         int ret;
 9150         u32 flashid = 0;
 9151         unsigned int part, manufacturer;
 9152         unsigned int density, size = 0;
 9153 
 9154 
 9155         /*
 9156          * Issue a Read ID Command to the Flash part.  We decode supported
 9157          * Flash parts and their sizes from this.  There's a newer Query
 9158          * Command which can retrieve detailed geometry information but many
 9159          * Flash parts don't support it.
 9160          */
 9161         ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
 9162         if (!ret)
 9163                 ret = sf1_read(adapter, 3, 0, 1, &flashid);
 9164         t4_write_reg(adapter, A_SF_OP, 0);      /* unlock SF */
 9165         if (ret < 0)
 9166                 return ret;
 9167 
 9168         /*
 9169          * Check to see if it's one of our non-standard supported Flash parts.
 9170          */
 9171         for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
 9172                 if (supported_flash[part].vendor_and_model_id == flashid) {
 9173                         adapter->params.sf_size =
 9174                                 supported_flash[part].size_mb;
 9175                         adapter->params.sf_nsec =
 9176                                 adapter->params.sf_size / SF_SEC_SIZE;
 9177                         goto found;
 9178                 }
 9179 
 9180         /*
 9181          * Decode Flash part size.  The code below looks repetative with
 9182          * common encodings, but that's not guaranteed in the JEDEC
 9183          * specification for the Read JADEC ID command.  The only thing that
 9184          * we're guaranteed by the JADEC specification is where the
 9185          * Manufacturer ID is in the returned result.  After that each
 9186          * Manufacturer ~could~ encode things completely differently.
 9187          * Note, all Flash parts must have 64KB sectors.
 9188          */
 9189         manufacturer = flashid & 0xff;
 9190         switch (manufacturer) {
 9191         case 0x20: /* Micron/Numonix */
 9192                 /*
 9193                  * This Density -> Size decoding table is taken from Micron
 9194                  * Data Sheets.
 9195                  */
 9196                 density = (flashid >> 16) & 0xff;
 9197                 switch (density) {
 9198                 case 0x14: size = 1 << 20; break; /*   1MB */
 9199                 case 0x15: size = 1 << 21; break; /*   2MB */
 9200                 case 0x16: size = 1 << 22; break; /*   4MB */
 9201                 case 0x17: size = 1 << 23; break; /*   8MB */
 9202                 case 0x18: size = 1 << 24; break; /*  16MB */
 9203                 case 0x19: size = 1 << 25; break; /*  32MB */
 9204                 case 0x20: size = 1 << 26; break; /*  64MB */
 9205                 case 0x21: size = 1 << 27; break; /* 128MB */
 9206                 case 0x22: size = 1 << 28; break; /* 256MB */
 9207                 }
 9208                 break;
 9209 
 9210         case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
 9211                 /*
 9212                  * This Density -> Size decoding table is taken from ISSI
 9213                  * Data Sheets.
 9214                  */
 9215                 density = (flashid >> 16) & 0xff;
 9216                 switch (density) {
 9217                 case 0x16: size = 1 << 25; break; /*  32MB */
 9218                 case 0x17: size = 1 << 26; break; /*  64MB */
 9219                 }
 9220                 break;
 9221 
 9222         case 0xc2: /* Macronix */
 9223                 /*
 9224                  * This Density -> Size decoding table is taken from Macronix
 9225                  * Data Sheets.
 9226                  */
 9227                 density = (flashid >> 16) & 0xff;
 9228                 switch (density) {
 9229                 case 0x17: size = 1 << 23; break; /*   8MB */
 9230                 case 0x18: size = 1 << 24; break; /*  16MB */
 9231                 }
 9232                 break;
 9233 
 9234         case 0xef: /* Winbond */
 9235                 /*
 9236                  * This Density -> Size decoding table is taken from Winbond
 9237                  * Data Sheets.
 9238                  */
 9239                 density = (flashid >> 16) & 0xff;
 9240                 switch (density) {
 9241                 case 0x17: size = 1 << 23; break; /*   8MB */
 9242                 case 0x18: size = 1 << 24; break; /*  16MB */
 9243                 }
 9244                 break;
 9245         }
 9246 
 9247         /* If we didn't recognize the FLASH part, that's no real issue: the
 9248          * Hardware/Software contract says that Hardware will _*ALWAYS*_
 9249          * use a FLASH part which is at least 4MB in size and has 64KB
 9250          * sectors.  The unrecognized FLASH part is likely to be much larger
 9251          * than 4MB, but that's all we really need.
 9252          */
 9253         if (size == 0) {
 9254                 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
 9255                 size = 1 << 22;
 9256         }
 9257 
 9258         /*
 9259          * Store decoded Flash size and fall through into vetting code.
 9260          */
 9261         adapter->params.sf_size = size;
 9262         adapter->params.sf_nsec = size / SF_SEC_SIZE;
 9263 
 9264  found:
 9265         /*
 9266          * We should ~probably~ reject adapters with FLASHes which are too
 9267          * small but we have some legacy FPGAs with small FLASHes that we'd
 9268          * still like to use.  So instead we emit a scary message ...
 9269          */
 9270         if (adapter->params.sf_size < FLASH_MIN_SIZE)
 9271                 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
 9272                         flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
 9273 
 9274         return 0;
 9275 }
 9276 
 9277 static void set_pcie_completion_timeout(struct adapter *adapter,
 9278                                                   u8 range)
 9279 {
 9280         u16 val;
 9281         u32 pcie_cap;
 9282 
 9283         pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
 9284         if (pcie_cap) {
 9285                 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
 9286                 val &= 0xfff0;
 9287                 val |= range ;
 9288                 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
 9289         }
 9290 }
 9291 
 9292 const struct chip_params *t4_get_chip_params(int chipid)
 9293 {
 9294         static const struct chip_params chip_params[] = {
 9295                 {
 9296                         /* T4 */
 9297                         .nchan = NCHAN,
 9298                         .pm_stats_cnt = PM_NSTATS,
 9299                         .cng_ch_bits_log = 2,
 9300                         .nsched_cls = 15,
 9301                         .cim_num_obq = CIM_NUM_OBQ,
 9302                         .filter_opt_len = FILTER_OPT_LEN,
 9303                         .mps_rplc_size = 128,
 9304                         .vfcount = 128,
 9305                         .sge_fl_db = F_DBPRIO,
 9306                         .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
 9307                         .rss_nentries = RSS_NENTRIES,
 9308                 },
 9309                 {
 9310                         /* T5 */
 9311                         .nchan = NCHAN,
 9312                         .pm_stats_cnt = PM_NSTATS,
 9313                         .cng_ch_bits_log = 2,
 9314                         .nsched_cls = 16,
 9315                         .cim_num_obq = CIM_NUM_OBQ_T5,
 9316                         .filter_opt_len = T5_FILTER_OPT_LEN,
 9317                         .mps_rplc_size = 128,
 9318                         .vfcount = 128,
 9319                         .sge_fl_db = F_DBPRIO | F_DBTYPE,
 9320                         .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
 9321                         .rss_nentries = RSS_NENTRIES,
 9322                 },
 9323                 {
 9324                         /* T6 */
 9325                         .nchan = T6_NCHAN,
 9326                         .pm_stats_cnt = T6_PM_NSTATS,
 9327                         .cng_ch_bits_log = 3,
 9328                         .nsched_cls = 16,
 9329                         .cim_num_obq = CIM_NUM_OBQ_T5,
 9330                         .filter_opt_len = T5_FILTER_OPT_LEN,
 9331                         .mps_rplc_size = 256,
 9332                         .vfcount = 256,
 9333                         .sge_fl_db = 0,
 9334                         .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
 9335                         .rss_nentries = T6_RSS_NENTRIES,
 9336                 },
 9337         };
 9338 
 9339         chipid -= CHELSIO_T4;
 9340         if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
 9341                 return NULL;
 9342 
 9343         return &chip_params[chipid];
 9344 }
 9345 
 9346 /**
 9347  *      t4_prep_adapter - prepare SW and HW for operation
 9348  *      @adapter: the adapter
 9349  *      @buf: temporary space of at least VPD_LEN size provided by the caller.
 9350  *
 9351  *      Initialize adapter SW state for the various HW modules, set initial
 9352  *      values for some adapter tunables, take PHYs out of reset, and
 9353  *      initialize the MDIO interface.
 9354  */
 9355 int t4_prep_adapter(struct adapter *adapter, u32 *buf)
 9356 {
 9357         int ret;
 9358         uint16_t device_id;
 9359         uint32_t pl_rev;
 9360 
 9361         get_pci_mode(adapter, &adapter->params.pci);
 9362 
 9363         pl_rev = t4_read_reg(adapter, A_PL_REV);
 9364         adapter->params.chipid = G_CHIPID(pl_rev);
 9365         adapter->params.rev = G_REV(pl_rev);
 9366         if (adapter->params.chipid == 0) {
 9367                 /* T4 did not have chipid in PL_REV (T5 onwards do) */
 9368                 adapter->params.chipid = CHELSIO_T4;
 9369 
 9370                 /* T4A1 chip is not supported */
 9371                 if (adapter->params.rev == 1) {
 9372                         CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
 9373                         return -EINVAL;
 9374                 }
 9375         }
 9376 
 9377         adapter->chip_params = t4_get_chip_params(chip_id(adapter));
 9378         if (adapter->chip_params == NULL)
 9379                 return -EINVAL;
 9380 
 9381         adapter->params.pci.vpd_cap_addr =
 9382             t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
 9383 
 9384         ret = t4_get_flash_params(adapter);
 9385         if (ret < 0)
 9386                 return ret;
 9387 
 9388         /* Cards with real ASICs have the chipid in the PCIe device id */
 9389         t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
 9390         if (device_id >> 12 == chip_id(adapter))
 9391                 adapter->params.cim_la_size = CIMLA_SIZE;
 9392         else {
 9393                 /* FPGA */
 9394                 adapter->params.fpga = 1;
 9395                 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
 9396         }
 9397 
 9398         ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
 9399         if (ret < 0)
 9400                 return ret;
 9401 
 9402         init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
 9403 
 9404         /*
 9405          * Default port and clock for debugging in case we can't reach FW.
 9406          */
 9407         adapter->params.nports = 1;
 9408         adapter->params.portvec = 1;
 9409         adapter->params.vpd.cclk = 50000;
 9410 
 9411         /* Set pci completion timeout value to 4 seconds. */
 9412         set_pcie_completion_timeout(adapter, 0xd);
 9413         return 0;
 9414 }
 9415 
 9416 /**
 9417  *      t4_shutdown_adapter - shut down adapter, host & wire
 9418  *      @adapter: the adapter
 9419  *
 9420  *      Perform an emergency shutdown of the adapter and stop it from
 9421  *      continuing any further communication on the ports or DMA to the
 9422  *      host.  This is typically used when the adapter and/or firmware
 9423  *      have crashed and we want to prevent any further accidental
 9424  *      communication with the rest of the world.  This will also force
 9425  *      the port Link Status to go down -- if register writes work --
 9426  *      which should help our peers figure out that we're down.
 9427  */
 9428 int t4_shutdown_adapter(struct adapter *adapter)
 9429 {
 9430         int port;
 9431         const bool bt = adapter->bt_map != 0;
 9432 
 9433         t4_intr_disable(adapter);
 9434         if (bt)
 9435                 t4_write_reg(adapter, A_DBG_GPIO_EN, 0xffff0000);
 9436         for_each_port(adapter, port) {
 9437                 u32 a_port_cfg = is_t4(adapter) ?
 9438                                  PORT_REG(port, A_XGMAC_PORT_CFG) :
 9439                                  T5_PORT_REG(port, A_MAC_PORT_CFG);
 9440 
 9441                 t4_write_reg(adapter, a_port_cfg,
 9442                              t4_read_reg(adapter, a_port_cfg)
 9443                              & ~V_SIGNAL_DET(1));
 9444                 if (!bt) {
 9445                         u32 hss_cfg0 = is_t4(adapter) ?
 9446                                          PORT_REG(port, A_XGMAC_PORT_HSS_CFG0) :
 9447                                          T5_PORT_REG(port, A_MAC_PORT_HSS_CFG0);
 9448                         t4_set_reg_field(adapter, hss_cfg0, F_HSSPDWNPLLB |
 9449                             F_HSSPDWNPLLA | F_HSSPLLBYPB | F_HSSPLLBYPA,
 9450                             F_HSSPDWNPLLB | F_HSSPDWNPLLA | F_HSSPLLBYPB |
 9451                             F_HSSPLLBYPA);
 9452                 }
 9453         }
 9454         t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
 9455 
 9456         return 0;
 9457 }
 9458 
 9459 /**
 9460  *      t4_bar2_sge_qregs - return BAR2 SGE Queue register information
 9461  *      @adapter: the adapter
 9462  *      @qid: the Queue ID
 9463  *      @qtype: the Ingress or Egress type for @qid
 9464  *      @user: true if this request is for a user mode queue
 9465  *      @pbar2_qoffset: BAR2 Queue Offset
 9466  *      @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
 9467  *
 9468  *      Returns the BAR2 SGE Queue Registers information associated with the
 9469  *      indicated Absolute Queue ID.  These are passed back in return value
 9470  *      pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
 9471  *      and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
 9472  *
 9473  *      This may return an error which indicates that BAR2 SGE Queue
 9474  *      registers aren't available.  If an error is not returned, then the
 9475  *      following values are returned:
 9476  *
 9477  *        *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
 9478  *        *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
 9479  *
 9480  *      If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
 9481  *      require the "Inferred Queue ID" ability may be used.  E.g. the
 9482  *      Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
 9483  *      then these "Inferred Queue ID" register may not be used.
 9484  */
 9485 int t4_bar2_sge_qregs(struct adapter *adapter,
 9486                       unsigned int qid,
 9487                       enum t4_bar2_qtype qtype,
 9488                       int user,
 9489                       u64 *pbar2_qoffset,
 9490                       unsigned int *pbar2_qid)
 9491 {
 9492         unsigned int page_shift, page_size, qpp_shift, qpp_mask;
 9493         u64 bar2_page_offset, bar2_qoffset;
 9494         unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
 9495 
 9496         /* T4 doesn't support BAR2 SGE Queue registers for kernel
 9497          * mode queues.
 9498          */
 9499         if (!user && is_t4(adapter))
 9500                 return -EINVAL;
 9501 
 9502         /* Get our SGE Page Size parameters.
 9503          */
 9504         page_shift = adapter->params.sge.page_shift;
 9505         page_size = 1 << page_shift;
 9506 
 9507         /* Get the right Queues per Page parameters for our Queue.
 9508          */
 9509         qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
 9510                      ? adapter->params.sge.eq_s_qpp
 9511                      : adapter->params.sge.iq_s_qpp);
 9512         qpp_mask = (1 << qpp_shift) - 1;
 9513 
 9514         /* Calculate the basics of the BAR2 SGE Queue register area:
 9515          *  o The BAR2 page the Queue registers will be in.
 9516          *  o The BAR2 Queue ID.
 9517          *  o The BAR2 Queue ID Offset into the BAR2 page.
 9518          */
 9519         bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
 9520         bar2_qid = qid & qpp_mask;
 9521         bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
 9522 
 9523         /* If the BAR2 Queue ID Offset is less than the Page Size, then the
 9524          * hardware will infer the Absolute Queue ID simply from the writes to
 9525          * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
 9526          * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
 9527          * write to the first BAR2 SGE Queue Area within the BAR2 Page with
 9528          * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
 9529          * from the BAR2 Page and BAR2 Queue ID.
 9530          *
 9531          * One important censequence of this is that some BAR2 SGE registers
 9532          * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
 9533          * there.  But other registers synthesize the SGE Queue ID purely
 9534          * from the writes to the registers -- the Write Combined Doorbell
 9535          * Buffer is a good example.  These BAR2 SGE Registers are only
 9536          * available for those BAR2 SGE Register areas where the SGE Absolute
 9537          * Queue ID can be inferred from simple writes.
 9538          */
 9539         bar2_qoffset = bar2_page_offset;
 9540         bar2_qinferred = (bar2_qid_offset < page_size);
 9541         if (bar2_qinferred) {
 9542                 bar2_qoffset += bar2_qid_offset;
 9543                 bar2_qid = 0;
 9544         }
 9545 
 9546         *pbar2_qoffset = bar2_qoffset;
 9547         *pbar2_qid = bar2_qid;
 9548         return 0;
 9549 }
 9550 
 9551 /**
 9552  *      t4_init_devlog_params - initialize adapter->params.devlog
 9553  *      @adap: the adapter
 9554  *      @fw_attach: whether we can talk to the firmware
 9555  *
 9556  *      Initialize various fields of the adapter's Firmware Device Log
 9557  *      Parameters structure.
 9558  */
 9559 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
 9560 {
 9561         struct devlog_params *dparams = &adap->params.devlog;
 9562         u32 pf_dparams;
 9563         unsigned int devlog_meminfo;
 9564         struct fw_devlog_cmd devlog_cmd;
 9565         int ret;
 9566 
 9567         /* If we're dealing with newer firmware, the Device Log Paramerters
 9568          * are stored in a designated register which allows us to access the
 9569          * Device Log even if we can't talk to the firmware.
 9570          */
 9571         pf_dparams =
 9572                 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
 9573         if (pf_dparams) {
 9574                 unsigned int nentries, nentries128;
 9575 
 9576                 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
 9577                 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
 9578 
 9579                 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
 9580                 nentries = (nentries128 + 1) * 128;
 9581                 dparams->size = nentries * sizeof(struct fw_devlog_e);
 9582 
 9583                 return 0;
 9584         }
 9585 
 9586         /*
 9587          * For any failing returns ...
 9588          */
 9589         memset(dparams, 0, sizeof *dparams);
 9590 
 9591         /*
 9592          * If we can't talk to the firmware, there's really nothing we can do
 9593          * at this point.
 9594          */
 9595         if (!fw_attach)
 9596                 return -ENXIO;
 9597 
 9598         /* Otherwise, ask the firmware for it's Device Log Parameters.
 9599          */
 9600         memset(&devlog_cmd, 0, sizeof devlog_cmd);
 9601         devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
 9602                                              F_FW_CMD_REQUEST | F_FW_CMD_READ);
 9603         devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
 9604         ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
 9605                          &devlog_cmd);
 9606         if (ret)
 9607                 return ret;
 9608 
 9609         devlog_meminfo =
 9610                 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
 9611         dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
 9612         dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
 9613         dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
 9614 
 9615         return 0;
 9616 }
 9617 
 9618 /**
 9619  *      t4_init_sge_params - initialize adap->params.sge
 9620  *      @adapter: the adapter
 9621  *
 9622  *      Initialize various fields of the adapter's SGE Parameters structure.
 9623  */
 9624 int t4_init_sge_params(struct adapter *adapter)
 9625 {
 9626         u32 r;
 9627         struct sge_params *sp = &adapter->params.sge;
 9628         unsigned i, tscale = 1;
 9629 
 9630         r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
 9631         sp->counter_val[0] = G_THRESHOLD_0(r);
 9632         sp->counter_val[1] = G_THRESHOLD_1(r);
 9633         sp->counter_val[2] = G_THRESHOLD_2(r);
 9634         sp->counter_val[3] = G_THRESHOLD_3(r);
 9635 
 9636         if (chip_id(adapter) >= CHELSIO_T6) {
 9637                 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
 9638                 tscale = G_TSCALE(r);
 9639                 if (tscale == 0)
 9640                         tscale = 1;
 9641                 else
 9642                         tscale += 2;
 9643         }
 9644 
 9645         r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
 9646         sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
 9647         sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
 9648         r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
 9649         sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
 9650         sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
 9651         r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
 9652         sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
 9653         sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
 9654 
 9655         r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
 9656         sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
 9657         if (is_t4(adapter))
 9658                 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
 9659         else if (is_t5(adapter))
 9660                 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
 9661         else
 9662                 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
 9663 
 9664         /* egress queues: log2 of # of doorbells per BAR2 page */
 9665         r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
 9666         r >>= S_QUEUESPERPAGEPF0 +
 9667             (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
 9668         sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
 9669 
 9670         /* ingress queues: log2 of # of doorbells per BAR2 page */
 9671         r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
 9672         r >>= S_QUEUESPERPAGEPF0 +
 9673             (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
 9674         sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
 9675 
 9676         r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
 9677         r >>= S_HOSTPAGESIZEPF0 +
 9678             (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
 9679         sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
 9680 
 9681         r = t4_read_reg(adapter, A_SGE_CONTROL);
 9682         sp->sge_control = r;
 9683         sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
 9684         sp->fl_pktshift = G_PKTSHIFT(r);
 9685         if (chip_id(adapter) <= CHELSIO_T5) {
 9686                 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
 9687                     X_INGPADBOUNDARY_SHIFT);
 9688         } else {
 9689                 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
 9690                     X_T6_INGPADBOUNDARY_SHIFT);
 9691         }
 9692         if (is_t4(adapter))
 9693                 sp->pack_boundary = sp->pad_boundary;
 9694         else {
 9695                 r = t4_read_reg(adapter, A_SGE_CONTROL2);
 9696                 if (G_INGPACKBOUNDARY(r) == 0)
 9697                         sp->pack_boundary = 16;
 9698                 else
 9699                         sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
 9700         }
 9701         for (i = 0; i < SGE_FLBUF_SIZES; i++)
 9702                 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
 9703                     A_SGE_FL_BUFFER_SIZE0 + (4 * i));
 9704 
 9705         return 0;
 9706 }
 9707 
 9708 /* Convert the LE's hardware hash mask to a shorter filter mask. */
 9709 static inline uint16_t
 9710 hashmask_to_filtermask(uint64_t hashmask, uint16_t filter_mode)
 9711 {
 9712         static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
 9713         int i;
 9714         uint16_t filter_mask;
 9715         uint64_t mask;          /* field mask */
 9716 
 9717         filter_mask = 0;
 9718         for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
 9719                 if ((filter_mode & (1 << i)) == 0)
 9720                         continue;
 9721                 mask = (1 << width[i]) - 1;
 9722                 if ((hashmask & mask) == mask)
 9723                         filter_mask |= 1 << i;
 9724                 hashmask >>= width[i];
 9725         }
 9726 
 9727         return (filter_mask);
 9728 }
 9729 
 9730 /*
 9731  * Read and cache the adapter's compressed filter mode and ingress config.
 9732  */
 9733 static void
 9734 read_filter_mode_and_ingress_config(struct adapter *adap)
 9735 {
 9736         int rc;
 9737         uint32_t v, param[2], val[2];
 9738         struct tp_params *tpp = &adap->params.tp;
 9739         uint64_t hash_mask;
 9740 
 9741         param[0] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
 9742             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
 9743             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
 9744         param[1] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
 9745             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
 9746             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
 9747         rc = -t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val);
 9748         if (rc == 0) {
 9749                 tpp->filter_mode = G_FW_PARAMS_PARAM_FILTER_MODE(val[0]);
 9750                 tpp->filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val[0]);
 9751                 tpp->vnic_mode = val[1];
 9752         } else {
 9753                 /*
 9754                  * Old firmware.  Read filter mode/mask and ingress config
 9755                  * straight from the hardware.
 9756                  */
 9757                 t4_tp_pio_read(adap, &v, 1, A_TP_VLAN_PRI_MAP, true);
 9758                 tpp->filter_mode = v & 0xffff;
 9759 
 9760                 hash_mask = 0;
 9761                 if (chip_id(adap) > CHELSIO_T4) {
 9762                         v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
 9763                         hash_mask = v;
 9764                         v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
 9765                         hash_mask |= (u64)v << 32;
 9766                 }
 9767                 tpp->filter_mask = hashmask_to_filtermask(hash_mask,
 9768                     tpp->filter_mode);
 9769 
 9770                 t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true);
 9771                 if (v & F_VNIC)
 9772                         tpp->vnic_mode = FW_VNIC_MODE_PF_VF;
 9773                 else
 9774                         tpp->vnic_mode = FW_VNIC_MODE_OUTER_VLAN;
 9775         }
 9776 
 9777         /*
 9778          * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
 9779          * shift positions of several elements of the Compressed Filter Tuple
 9780          * for this adapter which we need frequently ...
 9781          */
 9782         tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
 9783         tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
 9784         tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
 9785         tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
 9786         tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
 9787         tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
 9788         tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
 9789         tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
 9790         tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
 9791         tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
 9792 }
 9793 
 9794 /**
 9795  *      t4_init_tp_params - initialize adap->params.tp
 9796  *      @adap: the adapter
 9797  *
 9798  *      Initialize various fields of the adapter's TP Parameters structure.
 9799  */
 9800 int t4_init_tp_params(struct adapter *adap)
 9801 {
 9802         int chan;
 9803         u32 tx_len, rx_len, r, v;
 9804         struct tp_params *tpp = &adap->params.tp;
 9805 
 9806         v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
 9807         tpp->tre = G_TIMERRESOLUTION(v);
 9808         tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
 9809 
 9810         /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
 9811         for (chan = 0; chan < MAX_NCHAN; chan++)
 9812                 tpp->tx_modq[chan] = chan;
 9813 
 9814         read_filter_mode_and_ingress_config(adap);
 9815 
 9816         if (chip_id(adap) > CHELSIO_T5) {
 9817                 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
 9818                 tpp->rx_pkt_encap = v & F_CRXPKTENC;
 9819         } else
 9820                 tpp->rx_pkt_encap = false;
 9821 
 9822         rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
 9823         tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
 9824 
 9825         r = t4_read_reg(adap, A_TP_PARA_REG2);
 9826         rx_len = min(rx_len, G_MAXRXDATA(r));
 9827         tx_len = min(tx_len, G_MAXRXDATA(r));
 9828 
 9829         r = t4_read_reg(adap, A_TP_PARA_REG7);
 9830         v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
 9831         rx_len = min(rx_len, v);
 9832         tx_len = min(tx_len, v);
 9833 
 9834         tpp->max_tx_pdu = tx_len;
 9835         tpp->max_rx_pdu = rx_len;
 9836 
 9837         return 0;
 9838 }
 9839 
 9840 /**
 9841  *      t4_filter_field_shift - calculate filter field shift
 9842  *      @adap: the adapter
 9843  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
 9844  *
 9845  *      Return the shift position of a filter field within the Compressed
 9846  *      Filter Tuple.  The filter field is specified via its selection bit
 9847  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
 9848  */
 9849 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
 9850 {
 9851         const unsigned int filter_mode = adap->params.tp.filter_mode;
 9852         unsigned int sel;
 9853         int field_shift;
 9854 
 9855         if ((filter_mode & filter_sel) == 0)
 9856                 return -1;
 9857 
 9858         for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
 9859                 switch (filter_mode & sel) {
 9860                 case F_FCOE:
 9861                         field_shift += W_FT_FCOE;
 9862                         break;
 9863                 case F_PORT:
 9864                         field_shift += W_FT_PORT;
 9865                         break;
 9866                 case F_VNIC_ID:
 9867                         field_shift += W_FT_VNIC_ID;
 9868                         break;
 9869                 case F_VLAN:
 9870                         field_shift += W_FT_VLAN;
 9871                         break;
 9872                 case F_TOS:
 9873                         field_shift += W_FT_TOS;
 9874                         break;
 9875                 case F_PROTOCOL:
 9876                         field_shift += W_FT_PROTOCOL;
 9877                         break;
 9878                 case F_ETHERTYPE:
 9879                         field_shift += W_FT_ETHERTYPE;
 9880                         break;
 9881                 case F_MACMATCH:
 9882                         field_shift += W_FT_MACMATCH;
 9883                         break;
 9884                 case F_MPSHITTYPE:
 9885                         field_shift += W_FT_MPSHITTYPE;
 9886                         break;
 9887                 case F_FRAGMENTATION:
 9888                         field_shift += W_FT_FRAGMENTATION;
 9889                         break;
 9890                 }
 9891         }
 9892         return field_shift;
 9893 }
 9894 
 9895 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
 9896 {
 9897         u8 addr[6];
 9898         int ret, i, j;
 9899         struct port_info *p = adap2pinfo(adap, port_id);
 9900         u32 param, val;
 9901         struct vi_info *vi = &p->vi[0];
 9902 
 9903         for (i = 0, j = -1; i <= p->port_id; i++) {
 9904                 do {
 9905                         j++;
 9906                 } while ((adap->params.portvec & (1 << j)) == 0);
 9907         }
 9908 
 9909         p->tx_chan = j;
 9910         p->mps_bg_map = t4_get_mps_bg_map(adap, j);
 9911         p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
 9912         p->rx_c_chan = t4_get_rx_c_chan(adap, j);
 9913         p->lport = j;
 9914 
 9915         if (!(adap->flags & IS_VF) ||
 9916             adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
 9917                 t4_update_port_info(p);
 9918         }
 9919 
 9920         ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size,
 9921             &vi->vfvld, &vi->vin);
 9922         if (ret < 0)
 9923                 return ret;
 9924 
 9925         vi->viid = ret;
 9926         t4_os_set_hw_addr(p, addr);
 9927 
 9928         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
 9929             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
 9930             V_FW_PARAMS_PARAM_YZ(vi->viid);
 9931         ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
 9932         if (ret)
 9933                 vi->rss_base = 0xffff;
 9934         else {
 9935                 /* MPASS((val >> 16) == rss_size); */
 9936                 vi->rss_base = val & 0xffff;
 9937         }
 9938 
 9939         return 0;
 9940 }
 9941 
 9942 /**
 9943  *      t4_read_cimq_cfg - read CIM queue configuration
 9944  *      @adap: the adapter
 9945  *      @base: holds the queue base addresses in bytes
 9946  *      @size: holds the queue sizes in bytes
 9947  *      @thres: holds the queue full thresholds in bytes
 9948  *
 9949  *      Returns the current configuration of the CIM queues, starting with
 9950  *      the IBQs, then the OBQs.
 9951  */
 9952 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
 9953 {
 9954         unsigned int i, v;
 9955         int cim_num_obq = adap->chip_params->cim_num_obq;
 9956 
 9957         for (i = 0; i < CIM_NUM_IBQ; i++) {
 9958                 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
 9959                              V_QUENUMSELECT(i));
 9960                 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
 9961                 /* value is in 256-byte units */
 9962                 *base++ = G_CIMQBASE(v) * 256;
 9963                 *size++ = G_CIMQSIZE(v) * 256;
 9964                 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
 9965         }
 9966         for (i = 0; i < cim_num_obq; i++) {
 9967                 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
 9968                              V_QUENUMSELECT(i));
 9969                 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
 9970                 /* value is in 256-byte units */
 9971                 *base++ = G_CIMQBASE(v) * 256;
 9972                 *size++ = G_CIMQSIZE(v) * 256;
 9973         }
 9974 }
 9975 
 9976 /**
 9977  *      t4_read_cim_ibq - read the contents of a CIM inbound queue
 9978  *      @adap: the adapter
 9979  *      @qid: the queue index
 9980  *      @data: where to store the queue contents
 9981  *      @n: capacity of @data in 32-bit words
 9982  *
 9983  *      Reads the contents of the selected CIM queue starting at address 0 up
 9984  *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
 9985  *      error and the number of 32-bit words actually read on success.
 9986  */
 9987 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
 9988 {
 9989         int i, err, attempts;
 9990         unsigned int addr;
 9991         const unsigned int nwords = CIM_IBQ_SIZE * 4;
 9992 
 9993         if (qid > 5 || (n & 3))
 9994                 return -EINVAL;
 9995 
 9996         addr = qid * nwords;
 9997         if (n > nwords)
 9998                 n = nwords;
 9999 
10000         /* It might take 3-10ms before the IBQ debug read access is allowed.
10001          * Wait for 1 Sec with a delay of 1 usec.
10002          */
10003         attempts = 1000000;
10004 
10005         for (i = 0; i < n; i++, addr++) {
10006                 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
10007                              F_IBQDBGEN);
10008                 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
10009                                       attempts, 1);
10010                 if (err)
10011                         return err;
10012                 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
10013         }
10014         t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
10015         return i;
10016 }
10017 
10018 /**
10019  *      t4_read_cim_obq - read the contents of a CIM outbound queue
10020  *      @adap: the adapter
10021  *      @qid: the queue index
10022  *      @data: where to store the queue contents
10023  *      @n: capacity of @data in 32-bit words
10024  *
10025  *      Reads the contents of the selected CIM queue starting at address 0 up
10026  *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
10027  *      error and the number of 32-bit words actually read on success.
10028  */
10029 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10030 {
10031         int i, err;
10032         unsigned int addr, v, nwords;
10033         int cim_num_obq = adap->chip_params->cim_num_obq;
10034 
10035         if ((qid > (cim_num_obq - 1)) || (n & 3))
10036                 return -EINVAL;
10037 
10038         t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10039                      V_QUENUMSELECT(qid));
10040         v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10041 
10042         addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
10043         nwords = G_CIMQSIZE(v) * 64;  /* same */
10044         if (n > nwords)
10045                 n = nwords;
10046 
10047         for (i = 0; i < n; i++, addr++) {
10048                 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
10049                              F_OBQDBGEN);
10050                 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
10051                                       2, 1);
10052                 if (err)
10053                         return err;
10054                 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
10055         }
10056         t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
10057         return i;
10058 }
10059 
10060 enum {
10061         CIM_QCTL_BASE     = 0,
10062         CIM_CTL_BASE      = 0x2000,
10063         CIM_PBT_ADDR_BASE = 0x2800,
10064         CIM_PBT_LRF_BASE  = 0x3000,
10065         CIM_PBT_DATA_BASE = 0x3800
10066 };
10067 
10068 /**
10069  *      t4_cim_read - read a block from CIM internal address space
10070  *      @adap: the adapter
10071  *      @addr: the start address within the CIM address space
10072  *      @n: number of words to read
10073  *      @valp: where to store the result
10074  *
10075  *      Reads a block of 4-byte words from the CIM intenal address space.
10076  */
10077 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
10078                 unsigned int *valp)
10079 {
10080         int ret = 0;
10081 
10082         if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
10083                 return -EBUSY;
10084 
10085         for ( ; !ret && n--; addr += 4) {
10086                 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
10087                 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
10088                                       0, 5, 2);
10089                 if (!ret)
10090                         *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
10091         }
10092         return ret;
10093 }
10094 
10095 /**
10096  *      t4_cim_write - write a block into CIM internal address space
10097  *      @adap: the adapter
10098  *      @addr: the start address within the CIM address space
10099  *      @n: number of words to write
10100  *      @valp: set of values to write
10101  *
10102  *      Writes a block of 4-byte words into the CIM intenal address space.
10103  */
10104 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
10105                  const unsigned int *valp)
10106 {
10107         int ret = 0;
10108 
10109         if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
10110                 return -EBUSY;
10111 
10112         for ( ; !ret && n--; addr += 4) {
10113                 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
10114                 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
10115                 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
10116                                       0, 5, 2);
10117         }
10118         return ret;
10119 }
10120 
10121 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
10122                          unsigned int val)
10123 {
10124         return t4_cim_write(adap, addr, 1, &val);
10125 }
10126 
10127 /**
10128  *      t4_cim_ctl_read - read a block from CIM control region
10129  *      @adap: the adapter
10130  *      @addr: the start address within the CIM control region
10131  *      @n: number of words to read
10132  *      @valp: where to store the result
10133  *
10134  *      Reads a block of 4-byte words from the CIM control region.
10135  */
10136 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
10137                     unsigned int *valp)
10138 {
10139         return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
10140 }
10141 
10142 /**
10143  *      t4_cim_read_la - read CIM LA capture buffer
10144  *      @adap: the adapter
10145  *      @la_buf: where to store the LA data
10146  *      @wrptr: the HW write pointer within the capture buffer
10147  *
10148  *      Reads the contents of the CIM LA buffer with the most recent entry at
10149  *      the end of the returned data and with the entry at @wrptr first.
10150  *      We try to leave the LA in the running state we find it in.
10151  */
10152 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
10153 {
10154         int i, ret;
10155         unsigned int cfg, val, idx;
10156 
10157         ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
10158         if (ret)
10159                 return ret;
10160 
10161         if (cfg & F_UPDBGLAEN) {        /* LA is running, freeze it */
10162                 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
10163                 if (ret)
10164                         return ret;
10165         }
10166 
10167         ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10168         if (ret)
10169                 goto restart;
10170 
10171         idx = G_UPDBGLAWRPTR(val);
10172         if (wrptr)
10173                 *wrptr = idx;
10174 
10175         for (i = 0; i < adap->params.cim_la_size; i++) {
10176                 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
10177                                     V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
10178                 if (ret)
10179                         break;
10180                 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10181                 if (ret)
10182                         break;
10183                 if (val & F_UPDBGLARDEN) {
10184                         ret = -ETIMEDOUT;
10185                         break;
10186                 }
10187                 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
10188                 if (ret)
10189                         break;
10190 
10191                 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
10192                  * identify the 32-bit portion of the full 312-bit data
10193                  */
10194                 if (is_t6(adap) && (idx & 0xf) >= 9)
10195                         idx = (idx & 0xff0) + 0x10;
10196                 else
10197                         idx++;
10198                 /* address can't exceed 0xfff */
10199                 idx &= M_UPDBGLARDPTR;
10200         }
10201 restart:
10202         if (cfg & F_UPDBGLAEN) {
10203                 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
10204                                       cfg & ~F_UPDBGLARDEN);
10205                 if (!ret)
10206                         ret = r;
10207         }
10208         return ret;
10209 }
10210 
10211 /**
10212  *      t4_tp_read_la - read TP LA capture buffer
10213  *      @adap: the adapter
10214  *      @la_buf: where to store the LA data
10215  *      @wrptr: the HW write pointer within the capture buffer
10216  *
10217  *      Reads the contents of the TP LA buffer with the most recent entry at
10218  *      the end of the returned data and with the entry at @wrptr first.
10219  *      We leave the LA in the running state we find it in.
10220  */
10221 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10222 {
10223         bool last_incomplete;
10224         unsigned int i, cfg, val, idx;
10225 
10226         cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
10227         if (cfg & F_DBGLAENABLE)                        /* freeze LA */
10228                 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10229                              adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
10230 
10231         val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
10232         idx = G_DBGLAWPTR(val);
10233         last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
10234         if (last_incomplete)
10235                 idx = (idx + 1) & M_DBGLARPTR;
10236         if (wrptr)
10237                 *wrptr = idx;
10238 
10239         val &= 0xffff;
10240         val &= ~V_DBGLARPTR(M_DBGLARPTR);
10241         val |= adap->params.tp.la_mask;
10242 
10243         for (i = 0; i < TPLA_SIZE; i++) {
10244                 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
10245                 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
10246                 idx = (idx + 1) & M_DBGLARPTR;
10247         }
10248 
10249         /* Wipe out last entry if it isn't valid */
10250         if (last_incomplete)
10251                 la_buf[TPLA_SIZE - 1] = ~0ULL;
10252 
10253         if (cfg & F_DBGLAENABLE)                /* restore running state */
10254                 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10255                              cfg | adap->params.tp.la_mask);
10256 }
10257 
10258 /*
10259  * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10260  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
10261  * state for more than the Warning Threshold then we'll issue a warning about
10262  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
10263  * appears to be hung every Warning Repeat second till the situation clears.
10264  * If the situation clears, we'll note that as well.
10265  */
10266 #define SGE_IDMA_WARN_THRESH 1
10267 #define SGE_IDMA_WARN_REPEAT 300
10268 
10269 /**
10270  *      t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10271  *      @adapter: the adapter
10272  *      @idma: the adapter IDMA Monitor state
10273  *
10274  *      Initialize the state of an SGE Ingress DMA Monitor.
10275  */
10276 void t4_idma_monitor_init(struct adapter *adapter,
10277                           struct sge_idma_monitor_state *idma)
10278 {
10279         /* Initialize the state variables for detecting an SGE Ingress DMA
10280          * hang.  The SGE has internal counters which count up on each clock
10281          * tick whenever the SGE finds its Ingress DMA State Engines in the
10282          * same state they were on the previous clock tick.  The clock used is
10283          * the Core Clock so we have a limit on the maximum "time" they can
10284          * record; typically a very small number of seconds.  For instance,
10285          * with a 600MHz Core Clock, we can only count up to a bit more than
10286          * 7s.  So we'll synthesize a larger counter in order to not run the
10287          * risk of having the "timers" overflow and give us the flexibility to
10288          * maintain a Hung SGE State Machine of our own which operates across
10289          * a longer time frame.
10290          */
10291         idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10292         idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
10293 }
10294 
10295 /**
10296  *      t4_idma_monitor - monitor SGE Ingress DMA state
10297  *      @adapter: the adapter
10298  *      @idma: the adapter IDMA Monitor state
10299  *      @hz: number of ticks/second
10300  *      @ticks: number of ticks since the last IDMA Monitor call
10301  */
10302 void t4_idma_monitor(struct adapter *adapter,
10303                      struct sge_idma_monitor_state *idma,
10304                      int hz, int ticks)
10305 {
10306         int i, idma_same_state_cnt[2];
10307 
10308          /* Read the SGE Debug Ingress DMA Same State Count registers.  These
10309           * are counters inside the SGE which count up on each clock when the
10310           * SGE finds its Ingress DMA State Engines in the same states they
10311           * were in the previous clock.  The counters will peg out at
10312           * 0xffffffff without wrapping around so once they pass the 1s
10313           * threshold they'll stay above that till the IDMA state changes.
10314           */
10315         t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
10316         idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
10317         idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10318 
10319         for (i = 0; i < 2; i++) {
10320                 u32 debug0, debug11;
10321 
10322                 /* If the Ingress DMA Same State Counter ("timer") is less
10323                  * than 1s, then we can reset our synthesized Stall Timer and
10324                  * continue.  If we have previously emitted warnings about a
10325                  * potential stalled Ingress Queue, issue a note indicating
10326                  * that the Ingress Queue has resumed forward progress.
10327                  */
10328                 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10329                         if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
10330                                 CH_WARN(adapter, "SGE idma%d, queue %u, "
10331                                         "resumed after %d seconds\n",
10332                                         i, idma->idma_qid[i],
10333                                         idma->idma_stalled[i]/hz);
10334                         idma->idma_stalled[i] = 0;
10335                         continue;
10336                 }
10337 
10338                 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10339                  * domain.  The first time we get here it'll be because we
10340                  * passed the 1s Threshold; each additional time it'll be
10341                  * because the RX Timer Callback is being fired on its regular
10342                  * schedule.
10343                  *
10344                  * If the stall is below our Potential Hung Ingress Queue
10345                  * Warning Threshold, continue.
10346                  */
10347                 if (idma->idma_stalled[i] == 0) {
10348                         idma->idma_stalled[i] = hz;
10349                         idma->idma_warn[i] = 0;
10350                 } else {
10351                         idma->idma_stalled[i] += ticks;
10352                         idma->idma_warn[i] -= ticks;
10353                 }
10354 
10355                 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
10356                         continue;
10357 
10358                 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10359                  */
10360                 if (idma->idma_warn[i] > 0)
10361                         continue;
10362                 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
10363 
10364                 /* Read and save the SGE IDMA State and Queue ID information.
10365                  * We do this every time in case it changes across time ...
10366                  * can't be too careful ...
10367                  */
10368                 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
10369                 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10370                 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10371 
10372                 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
10373                 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10374                 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10375 
10376                 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
10377                         " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10378                         i, idma->idma_qid[i], idma->idma_state[i],
10379                         idma->idma_stalled[i]/hz,
10380                         debug0, debug11);
10381                 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10382         }
10383 }
10384 
10385 /**
10386  *     t4_set_vf_mac - Set MAC address for the specified VF
10387  *     @adapter: The adapter
10388  *     @pf: the PF used to instantiate the VFs
10389  *     @vf: one of the VFs instantiated by the specified PF
10390  *     @naddr: the number of MAC addresses
10391  *     @addr: the MAC address(es) to be set to the specified VF
10392  */
10393 int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf,
10394                   unsigned int naddr, u8 *addr)
10395 {
10396         struct fw_acl_mac_cmd cmd;
10397 
10398         memset(&cmd, 0, sizeof(cmd));
10399         cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
10400                                     F_FW_CMD_REQUEST |
10401                                     F_FW_CMD_WRITE |
10402                                     V_FW_ACL_MAC_CMD_PFN(pf) |
10403                                     V_FW_ACL_MAC_CMD_VFN(vf));
10404 
10405         /* Note: Do not enable the ACL */
10406         cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10407         cmd.nmac = naddr;
10408 
10409         switch (pf) {
10410         case 3:
10411                 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10412                 break;
10413         case 2:
10414                 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10415                 break;
10416         case 1:
10417                 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10418                 break;
10419         case 0:
10420                 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10421                 break;
10422         }
10423 
10424         return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10425 }
10426 
10427 /**
10428  *      t4_read_pace_tbl - read the pace table
10429  *      @adap: the adapter
10430  *      @pace_vals: holds the returned values
10431  *
10432  *      Returns the values of TP's pace table in microseconds.
10433  */
10434 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10435 {
10436         unsigned int i, v;
10437 
10438         for (i = 0; i < NTX_SCHED; i++) {
10439                 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
10440                 v = t4_read_reg(adap, A_TP_PACE_TABLE);
10441                 pace_vals[i] = dack_ticks_to_usec(adap, v);
10442         }
10443 }
10444 
10445 /**
10446  *      t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10447  *      @adap: the adapter
10448  *      @sched: the scheduler index
10449  *      @kbps: the byte rate in Kbps
10450  *      @ipg: the interpacket delay in tenths of nanoseconds
10451  *
10452  *      Return the current configuration of a HW Tx scheduler.
10453  */
10454 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
10455                      unsigned int *ipg, bool sleep_ok)
10456 {
10457         unsigned int v, addr, bpt, cpt;
10458 
10459         if (kbps) {
10460                 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
10461                 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10462                 if (sched & 1)
10463                         v >>= 16;
10464                 bpt = (v >> 8) & 0xff;
10465                 cpt = v & 0xff;
10466                 if (!cpt)
10467                         *kbps = 0;      /* scheduler disabled */
10468                 else {
10469                         v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10470                         *kbps = (v * bpt) / 125;
10471                 }
10472         }
10473         if (ipg) {
10474                 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
10475                 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10476                 if (sched & 1)
10477                         v >>= 16;
10478                 v &= 0xffff;
10479                 *ipg = (10000 * v) / core_ticks_per_usec(adap);
10480         }
10481 }
10482 
10483 /**
10484  *      t4_load_cfg - download config file
10485  *      @adap: the adapter
10486  *      @cfg_data: the cfg text file to write
10487  *      @size: text file size
10488  *
10489  *      Write the supplied config text file to the card's serial flash.
10490  */
10491 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10492 {
10493         int ret, i, n, cfg_addr;
10494         unsigned int addr;
10495         unsigned int flash_cfg_start_sec;
10496         unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10497 
10498         cfg_addr = t4_flash_cfg_addr(adap);
10499         if (cfg_addr < 0)
10500                 return cfg_addr;
10501 
10502         addr = cfg_addr;
10503         flash_cfg_start_sec = addr / SF_SEC_SIZE;
10504 
10505         if (size > FLASH_CFG_MAX_SIZE) {
10506                 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
10507                        FLASH_CFG_MAX_SIZE);
10508                 return -EFBIG;
10509         }
10510 
10511         i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,    /* # of sectors spanned */
10512                          sf_sec_size);
10513         ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10514                                      flash_cfg_start_sec + i - 1);
10515         /*
10516          * If size == 0 then we're simply erasing the FLASH sectors associated
10517          * with the on-adapter Firmware Configuration File.
10518          */
10519         if (ret || size == 0)
10520                 goto out;
10521 
10522         /* this will write to the flash up to SF_PAGE_SIZE at a time */
10523         for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10524                 if ( (size - i) <  SF_PAGE_SIZE)
10525                         n = size - i;
10526                 else
10527                         n = SF_PAGE_SIZE;
10528                 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
10529                 if (ret)
10530                         goto out;
10531 
10532                 addr += SF_PAGE_SIZE;
10533                 cfg_data += SF_PAGE_SIZE;
10534         }
10535 
10536 out:
10537         if (ret)
10538                 CH_ERR(adap, "config file %s failed %d\n",
10539                        (size == 0 ? "clear" : "download"), ret);
10540         return ret;
10541 }
10542 
10543 /**
10544  *      t5_fw_init_extern_mem - initialize the external memory
10545  *      @adap: the adapter
10546  *
10547  *      Initializes the external memory on T5.
10548  */
10549 int t5_fw_init_extern_mem(struct adapter *adap)
10550 {
10551         u32 params[1], val[1];
10552         int ret;
10553 
10554         if (!is_t5(adap))
10555                 return 0;
10556 
10557         val[0] = 0xff; /* Initialize all MCs */
10558         params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10559                         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
10560         ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
10561                         FW_CMD_MAX_TIMEOUT);
10562 
10563         return ret;
10564 }
10565 
10566 /* BIOS boot headers */
10567 typedef struct pci_expansion_rom_header {
10568         u8      signature[2]; /* ROM Signature. Should be 0xaa55 */
10569         u8      reserved[22]; /* Reserved per processor Architecture data */
10570         u8      pcir_offset[2]; /* Offset to PCI Data Structure */
10571 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
10572 
10573 /* Legacy PCI Expansion ROM Header */
10574 typedef struct legacy_pci_expansion_rom_header {
10575         u8      signature[2]; /* ROM Signature. Should be 0xaa55 */
10576         u8      size512; /* Current Image Size in units of 512 bytes */
10577         u8      initentry_point[4];
10578         u8      cksum; /* Checksum computed on the entire Image */
10579         u8      reserved[16]; /* Reserved */
10580         u8      pcir_offset[2]; /* Offset to PCI Data Struture */
10581 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
10582 
10583 /* EFI PCI Expansion ROM Header */
10584 typedef struct efi_pci_expansion_rom_header {
10585         u8      signature[2]; // ROM signature. The value 0xaa55
10586         u8      initialization_size[2]; /* Units 512. Includes this header */
10587         u8      efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
10588         u8      efi_subsystem[2]; /* Subsystem value for EFI image header */
10589         u8      efi_machine_type[2]; /* Machine type from EFI image header */
10590         u8      compression_type[2]; /* Compression type. */
10591                 /*
10592                  * Compression type definition
10593                  * 0x0: uncompressed
10594                  * 0x1: Compressed
10595                  * 0x2-0xFFFF: Reserved
10596                  */
10597         u8      reserved[8]; /* Reserved */
10598         u8      efi_image_header_offset[2]; /* Offset to EFI Image */
10599         u8      pcir_offset[2]; /* Offset to PCI Data Structure */
10600 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
10601 
10602 /* PCI Data Structure Format */
10603 typedef struct pcir_data_structure { /* PCI Data Structure */
10604         u8      signature[4]; /* Signature. The string "PCIR" */
10605         u8      vendor_id[2]; /* Vendor Identification */
10606         u8      device_id[2]; /* Device Identification */
10607         u8      vital_product[2]; /* Pointer to Vital Product Data */
10608         u8      length[2]; /* PCIR Data Structure Length */
10609         u8      revision; /* PCIR Data Structure Revision */
10610         u8      class_code[3]; /* Class Code */
10611         u8      image_length[2]; /* Image Length. Multiple of 512B */
10612         u8      code_revision[2]; /* Revision Level of Code/Data */
10613         u8      code_type; /* Code Type. */
10614                 /*
10615                  * PCI Expansion ROM Code Types
10616                  * 0x00: Intel IA-32, PC-AT compatible. Legacy
10617                  * 0x01: Open Firmware standard for PCI. FCODE
10618                  * 0x02: Hewlett-Packard PA RISC. HP reserved
10619                  * 0x03: EFI Image. EFI
10620                  * 0x04-0xFF: Reserved.
10621                  */
10622         u8      indicator; /* Indicator. Identifies the last image in the ROM */
10623         u8      reserved[2]; /* Reserved */
10624 } pcir_data_t; /* PCI__DATA_STRUCTURE */
10625 
10626 /* BOOT constants */
10627 enum {
10628         BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
10629         BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
10630         BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
10631         BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
10632         BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
10633         VENDOR_ID = 0x1425, /* Vendor ID */
10634         PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
10635 };
10636 
10637 /*
10638  *      modify_device_id - Modifies the device ID of the Boot BIOS image
10639  *      @adatper: the device ID to write.
10640  *      @boot_data: the boot image to modify.
10641  *
10642  *      Write the supplied device ID to the boot BIOS image.
10643  */
10644 static void modify_device_id(int device_id, u8 *boot_data)
10645 {
10646         legacy_pci_exp_rom_header_t *header;
10647         pcir_data_t *pcir_header;
10648         u32 cur_header = 0;
10649 
10650         /*
10651          * Loop through all chained images and change the device ID's
10652          */
10653         while (1) {
10654                 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
10655                 pcir_header = (pcir_data_t *) &boot_data[cur_header +
10656                               le16_to_cpu(*(u16*)header->pcir_offset)];
10657 
10658                 /*
10659                  * Only modify the Device ID if code type is Legacy or HP.
10660                  * 0x00: Okay to modify
10661                  * 0x01: FCODE. Do not be modify
10662                  * 0x03: Okay to modify
10663                  * 0x04-0xFF: Do not modify
10664                  */
10665                 if (pcir_header->code_type == 0x00) {
10666                         u8 csum = 0;
10667                         int i;
10668 
10669                         /*
10670                          * Modify Device ID to match current adatper
10671                          */
10672                         *(u16*) pcir_header->device_id = device_id;
10673 
10674                         /*
10675                          * Set checksum temporarily to 0.
10676                          * We will recalculate it later.
10677                          */
10678                         header->cksum = 0x0;
10679 
10680                         /*
10681                          * Calculate and update checksum
10682                          */
10683                         for (i = 0; i < (header->size512 * 512); i++)
10684                                 csum += (u8)boot_data[cur_header + i];
10685 
10686                         /*
10687                          * Invert summed value to create the checksum
10688                          * Writing new checksum value directly to the boot data
10689                          */
10690                         boot_data[cur_header + 7] = -csum;
10691 
10692                 } else if (pcir_header->code_type == 0x03) {
10693 
10694                         /*
10695                          * Modify Device ID to match current adatper
10696                          */
10697                         *(u16*) pcir_header->device_id = device_id;
10698 
10699                 }
10700 
10701 
10702                 /*
10703                  * Check indicator element to identify if this is the last
10704                  * image in the ROM.
10705                  */
10706                 if (pcir_header->indicator & 0x80)
10707                         break;
10708 
10709                 /*
10710                  * Move header pointer up to the next image in the ROM.
10711                  */
10712                 cur_header += header->size512 * 512;
10713         }
10714 }
10715 
10716 /*
10717  *      t4_load_boot - download boot flash
10718  *      @adapter: the adapter
10719  *      @boot_data: the boot image to write
10720  *      @boot_addr: offset in flash to write boot_data
10721  *      @size: image size
10722  *
10723  *      Write the supplied boot image to the card's serial flash.
10724  *      The boot image has the following sections: a 28-byte header and the
10725  *      boot image.
10726  */
10727 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10728                  unsigned int boot_addr, unsigned int size)
10729 {
10730         pci_exp_rom_header_t *header;
10731         int pcir_offset ;
10732         pcir_data_t *pcir_header;
10733         int ret, addr;
10734         uint16_t device_id;
10735         unsigned int i;
10736         unsigned int boot_sector = (boot_addr * 1024 );
10737         unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10738 
10739         /*
10740          * Make sure the boot image does not encroach on the firmware region
10741          */
10742         if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10743                 CH_ERR(adap, "boot image encroaching on firmware region\n");
10744                 return -EFBIG;
10745         }
10746 
10747         /*
10748          * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10749          * and Boot configuration data sections. These 3 boot sections span
10750          * sectors 0 to 7 in flash and live right before the FW image location.
10751          */
10752         i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
10753                         sf_sec_size);
10754         ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10755                                      (boot_sector >> 16) + i - 1);
10756 
10757         /*
10758          * If size == 0 then we're simply erasing the FLASH sectors associated
10759          * with the on-adapter option ROM file
10760          */
10761         if (ret || (size == 0))
10762                 goto out;
10763 
10764         /* Get boot header */
10765         header = (pci_exp_rom_header_t *)boot_data;
10766         pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
10767         /* PCIR Data Structure */
10768         pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
10769 
10770         /*
10771          * Perform some primitive sanity testing to avoid accidentally
10772          * writing garbage over the boot sectors.  We ought to check for
10773          * more but it's not worth it for now ...
10774          */
10775         if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10776                 CH_ERR(adap, "boot image too small/large\n");
10777                 return -EFBIG;
10778         }
10779 
10780 #ifndef CHELSIO_T4_DIAGS
10781         /*
10782          * Check BOOT ROM header signature
10783          */
10784         if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
10785                 CH_ERR(adap, "Boot image missing signature\n");
10786                 return -EINVAL;
10787         }
10788 
10789         /*
10790          * Check PCI header signature
10791          */
10792         if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
10793                 CH_ERR(adap, "PCI header missing signature\n");
10794                 return -EINVAL;
10795         }
10796 
10797         /*
10798          * Check Vendor ID matches Chelsio ID
10799          */
10800         if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
10801                 CH_ERR(adap, "Vendor ID missing signature\n");
10802                 return -EINVAL;
10803         }
10804 #endif
10805 
10806         /*
10807          * Retrieve adapter's device ID
10808          */
10809         t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10810         /* Want to deal with PF 0 so I strip off PF 4 indicator */
10811         device_id = device_id & 0xf0ff;
10812 
10813         /*
10814          * Check PCIE Device ID
10815          */
10816         if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
10817                 /*
10818                  * Change the device ID in the Boot BIOS image to match
10819                  * the Device ID of the current adapter.
10820                  */
10821                 modify_device_id(device_id, boot_data);
10822         }
10823 
10824         /*
10825          * Skip over the first SF_PAGE_SIZE worth of data and write it after
10826          * we finish copying the rest of the boot image. This will ensure
10827          * that the BIOS boot header will only be written if the boot image
10828          * was written in full.
10829          */
10830         addr = boot_sector;
10831         for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10832                 addr += SF_PAGE_SIZE;
10833                 boot_data += SF_PAGE_SIZE;
10834                 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10835                 if (ret)
10836                         goto out;
10837         }
10838 
10839         ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10840                              (const u8 *)header, 0);
10841 
10842 out:
10843         if (ret)
10844                 CH_ERR(adap, "boot image download failed, error %d\n", ret);
10845         return ret;
10846 }
10847 
10848 /*
10849  *      t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
10850  *      @adapter: the adapter
10851  *
10852  *      Return the address within the flash where the OptionROM Configuration
10853  *      is stored, or an error if the device FLASH is too small to contain
10854  *      a OptionROM Configuration.
10855  */
10856 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10857 {
10858         /*
10859          * If the device FLASH isn't large enough to hold a Firmware
10860          * Configuration File, return an error.
10861          */
10862         if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10863                 return -ENOSPC;
10864 
10865         return FLASH_BOOTCFG_START;
10866 }
10867 
10868 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10869 {
10870         int ret, i, n, cfg_addr;
10871         unsigned int addr;
10872         unsigned int flash_cfg_start_sec;
10873         unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10874 
10875         cfg_addr = t4_flash_bootcfg_addr(adap);
10876         if (cfg_addr < 0)
10877                 return cfg_addr;
10878 
10879         addr = cfg_addr;
10880         flash_cfg_start_sec = addr / SF_SEC_SIZE;
10881 
10882         if (size > FLASH_BOOTCFG_MAX_SIZE) {
10883                 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10884                         FLASH_BOOTCFG_MAX_SIZE);
10885                 return -EFBIG;
10886         }
10887 
10888         i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
10889                          sf_sec_size);
10890         ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10891                                         flash_cfg_start_sec + i - 1);
10892 
10893         /*
10894          * If size == 0 then we're simply erasing the FLASH sectors associated
10895          * with the on-adapter OptionROM Configuration File.
10896          */
10897         if (ret || size == 0)
10898                 goto out;
10899 
10900         /* this will write to the flash up to SF_PAGE_SIZE at a time */
10901         for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10902                 if ( (size - i) <  SF_PAGE_SIZE)
10903                         n = size - i;
10904                 else
10905                         n = SF_PAGE_SIZE;
10906                 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10907                 if (ret)
10908                         goto out;
10909 
10910                 addr += SF_PAGE_SIZE;
10911                 cfg_data += SF_PAGE_SIZE;
10912         }
10913 
10914 out:
10915         if (ret)
10916                 CH_ERR(adap, "boot config data %s failed %d\n",
10917                                 (size == 0 ? "clear" : "download"), ret);
10918         return ret;
10919 }
10920 
10921 /**
10922  *      t4_set_filter_cfg - set up filter mode/mask and ingress config.
10923  *      @adap: the adapter
10924  *      @mode: a bitmap selecting which optional filter components to enable
10925  *      @mask: a bitmap selecting which components to enable in filter mask
10926  *      @vnic_mode: the ingress config/vnic mode setting
10927  *
10928  *      Sets the filter mode and mask by selecting the optional components to
10929  *      enable in filter tuples.  Returns 0 on success and a negative error if
10930  *      the requested mode needs more bits than are available for optional
10931  *      components.  The filter mask must be a subset of the filter mode.
10932  */
10933 int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
10934 {
10935         static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
10936         int i, nbits, rc;
10937         uint32_t param, val;
10938         uint16_t fmode, fmask;
10939         const int maxbits = adap->chip_params->filter_opt_len;
10940 
10941         if (mode != -1 || mask != -1) {
10942                 if (mode != -1) {
10943                         fmode = mode;
10944                         nbits = 0;
10945                         for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
10946                                 if (fmode & (1 << i))
10947                                         nbits += width[i];
10948                         }
10949                         if (nbits > maxbits) {
10950                                 CH_ERR(adap, "optional fields in the filter "
10951                                     "mode (0x%x) add up to %d bits "
10952                                     "(must be <= %db).  Remove some fields and "
10953                                     "try again.\n", fmode, nbits, maxbits);
10954                                 return -E2BIG;
10955                         }
10956 
10957                         /*
10958                          * Hardware wants the bits to be maxed out.  Keep
10959                          * setting them until there's no room for more.
10960                          */
10961                         for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
10962                                 if (fmode & (1 << i))
10963                                         continue;
10964                                 if (nbits + width[i] <= maxbits) {
10965                                         fmode |= 1 << i;
10966                                         nbits += width[i];
10967                                         if (nbits == maxbits)
10968                                                 break;
10969                                 }
10970                         }
10971 
10972                         fmask = fmode & adap->params.tp.filter_mask;
10973                         if (fmask != adap->params.tp.filter_mask) {
10974                                 CH_WARN(adap,
10975                                     "filter mask will be changed from 0x%x to "
10976                                     "0x%x to comply with the filter mode (0x%x).\n",
10977                                     adap->params.tp.filter_mask, fmask, fmode);
10978                         }
10979                 } else {
10980                         fmode = adap->params.tp.filter_mode;
10981                         fmask = mask;
10982                         if ((fmode | fmask) != fmode) {
10983                                 CH_ERR(adap,
10984                                     "filter mask (0x%x) must be a subset of "
10985                                     "the filter mode (0x%x).\n", fmask, fmode);
10986                                 return -EINVAL;
10987                         }
10988                 }
10989 
10990                 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10991                     V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
10992                     V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
10993                 val = V_FW_PARAMS_PARAM_FILTER_MODE(fmode) |
10994                     V_FW_PARAMS_PARAM_FILTER_MASK(fmask);
10995                 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param,
10996                     &val);
10997                 if (rc < 0)
10998                         return rc;
10999         }
11000 
11001         if (vnic_mode != -1) {
11002                 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11003                     V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
11004                     V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
11005                 val = vnic_mode;
11006                 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param,
11007                     &val);
11008                 if (rc < 0)
11009                         return rc;
11010         }
11011 
11012         /* Refresh. */
11013         read_filter_mode_and_ingress_config(adap);
11014 
11015         return 0;
11016 }
11017 
11018 /**
11019  *      t4_clr_port_stats - clear port statistics
11020  *      @adap: the adapter
11021  *      @idx: the port index
11022  *
11023  *      Clear HW statistics for the given port.
11024  */
11025 void t4_clr_port_stats(struct adapter *adap, int idx)
11026 {
11027         unsigned int i;
11028         u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
11029         u32 port_base_addr;
11030 
11031         if (is_t4(adap))
11032                 port_base_addr = PORT_BASE(idx);
11033         else
11034                 port_base_addr = T5_PORT_BASE(idx);
11035 
11036         for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
11037                         i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
11038                 t4_write_reg(adap, port_base_addr + i, 0);
11039         for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
11040                         i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
11041                 t4_write_reg(adap, port_base_addr + i, 0);
11042         for (i = 0; i < 4; i++)
11043                 if (bgmap & (1 << i)) {
11044                         t4_write_reg(adap,
11045                         A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
11046                         t4_write_reg(adap,
11047                         A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
11048                 }
11049 }
11050 
11051 /**
11052  *      t4_i2c_io - read/write I2C data from adapter
11053  *      @adap: the adapter
11054  *      @port: Port number if per-port device; <0 if not
11055  *      @devid: per-port device ID or absolute device ID
11056  *      @offset: byte offset into device I2C space
11057  *      @len: byte length of I2C space data
11058  *      @buf: buffer in which to return I2C data for read
11059  *            buffer which holds the I2C data for write
11060  *      @write: if true, do a write; else do a read
11061  *      Reads/Writes the I2C data from/to the indicated device and location.
11062  */
11063 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
11064               int port, unsigned int devid,
11065               unsigned int offset, unsigned int len,
11066               u8 *buf, bool write)
11067 {
11068         struct fw_ldst_cmd ldst_cmd, ldst_rpl;
11069         unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
11070         int ret = 0;
11071 
11072         if (len > I2C_PAGE_SIZE)
11073                 return -EINVAL;
11074 
11075         /* Dont allow reads that spans multiple pages */
11076         if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
11077                 return -EINVAL;
11078 
11079         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
11080         ldst_cmd.op_to_addrspace =
11081                 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
11082                             F_FW_CMD_REQUEST |
11083                             (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
11084                             V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
11085         ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
11086         ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
11087         ldst_cmd.u.i2c.did = devid;
11088 
11089         while (len > 0) {
11090                 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
11091 
11092                 ldst_cmd.u.i2c.boffset = offset;
11093                 ldst_cmd.u.i2c.blen = i2c_len;
11094 
11095                 if (write)
11096                         memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
11097 
11098                 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
11099                                  write ? NULL : &ldst_rpl);
11100                 if (ret)
11101                         break;
11102 
11103                 if (!write)
11104                         memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
11105                 offset += i2c_len;
11106                 buf += i2c_len;
11107                 len -= i2c_len;
11108         }
11109 
11110         return ret;
11111 }
11112 
11113 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
11114               int port, unsigned int devid,
11115               unsigned int offset, unsigned int len,
11116               u8 *buf)
11117 {
11118         return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
11119 }
11120 
11121 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
11122               int port, unsigned int devid,
11123               unsigned int offset, unsigned int len,
11124               u8 *buf)
11125 {
11126         return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
11127 }
11128 
11129 /**
11130  *      t4_sge_ctxt_rd - read an SGE context through FW
11131  *      @adap: the adapter
11132  *      @mbox: mailbox to use for the FW command
11133  *      @cid: the context id
11134  *      @ctype: the context type
11135  *      @data: where to store the context data
11136  *
11137  *      Issues a FW command through the given mailbox to read an SGE context.
11138  */
11139 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
11140                    enum ctxt_type ctype, u32 *data)
11141 {
11142         int ret;
11143         struct fw_ldst_cmd c;
11144 
11145         if (ctype == CTXT_EGRESS)
11146                 ret = FW_LDST_ADDRSPC_SGE_EGRC;
11147         else if (ctype == CTXT_INGRESS)
11148                 ret = FW_LDST_ADDRSPC_SGE_INGC;
11149         else if (ctype == CTXT_FLM)
11150                 ret = FW_LDST_ADDRSPC_SGE_FLMC;
11151         else
11152                 ret = FW_LDST_ADDRSPC_SGE_CONMC;
11153 
11154         memset(&c, 0, sizeof(c));
11155         c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
11156                                         F_FW_CMD_REQUEST | F_FW_CMD_READ |
11157                                         V_FW_LDST_CMD_ADDRSPACE(ret));
11158         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
11159         c.u.idctxt.physid = cpu_to_be32(cid);
11160 
11161         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
11162         if (ret == 0) {
11163                 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
11164                 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
11165                 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
11166                 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
11167                 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
11168                 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
11169         }
11170         return ret;
11171 }
11172 
11173 /**
11174  *      t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
11175  *      @adap: the adapter
11176  *      @cid: the context id
11177  *      @ctype: the context type
11178  *      @data: where to store the context data
11179  *
11180  *      Reads an SGE context directly, bypassing FW.  This is only for
11181  *      debugging when FW is unavailable.
11182  */
11183 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
11184                       u32 *data)
11185 {
11186         int i, ret;
11187 
11188         t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
11189         ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
11190         if (!ret)
11191                 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
11192                         *data++ = t4_read_reg(adap, i);
11193         return ret;
11194 }
11195 
11196 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
11197     int sleep_ok)
11198 {
11199         struct fw_sched_cmd cmd;
11200 
11201         memset(&cmd, 0, sizeof(cmd));
11202         cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11203                                       F_FW_CMD_REQUEST |
11204                                       F_FW_CMD_WRITE);
11205         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11206 
11207         cmd.u.config.sc = FW_SCHED_SC_CONFIG;
11208         cmd.u.config.type = type;
11209         cmd.u.config.minmaxen = minmaxen;
11210 
11211         return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11212                                NULL, sleep_ok);
11213 }
11214 
11215 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
11216                     int rateunit, int ratemode, int channel, int cl,
11217                     int minrate, int maxrate, int weight, int pktsize,
11218                     int burstsize, int sleep_ok)
11219 {
11220         struct fw_sched_cmd cmd;
11221 
11222         memset(&cmd, 0, sizeof(cmd));
11223         cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11224                                       F_FW_CMD_REQUEST |
11225                                       F_FW_CMD_WRITE);
11226         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11227 
11228         cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11229         cmd.u.params.type = type;
11230         cmd.u.params.level = level;
11231         cmd.u.params.mode = mode;
11232         cmd.u.params.ch = channel;
11233         cmd.u.params.cl = cl;
11234         cmd.u.params.unit = rateunit;
11235         cmd.u.params.rate = ratemode;
11236         cmd.u.params.min = cpu_to_be32(minrate);
11237         cmd.u.params.max = cpu_to_be32(maxrate);
11238         cmd.u.params.weight = cpu_to_be16(weight);
11239         cmd.u.params.pktsize = cpu_to_be16(pktsize);
11240         cmd.u.params.burstsize = cpu_to_be16(burstsize);
11241 
11242         return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11243                                NULL, sleep_ok);
11244 }
11245 
11246 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
11247     unsigned int maxrate, int sleep_ok)
11248 {
11249         struct fw_sched_cmd cmd;
11250 
11251         memset(&cmd, 0, sizeof(cmd));
11252         cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11253                                       F_FW_CMD_REQUEST |
11254                                       F_FW_CMD_WRITE);
11255         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11256 
11257         cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11258         cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
11259         cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
11260         cmd.u.params.ch = channel;
11261         cmd.u.params.rate = ratemode;           /* REL or ABS */
11262         cmd.u.params.max = cpu_to_be32(maxrate);/*  %  or kbps */
11263 
11264         return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11265                                NULL, sleep_ok);
11266 }
11267 
11268 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
11269     int weight, int sleep_ok)
11270 {
11271         struct fw_sched_cmd cmd;
11272 
11273         if (weight < 0 || weight > 100)
11274                 return -EINVAL;
11275 
11276         memset(&cmd, 0, sizeof(cmd));
11277         cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11278                                       F_FW_CMD_REQUEST |
11279                                       F_FW_CMD_WRITE);
11280         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11281 
11282         cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11283         cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
11284         cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
11285         cmd.u.params.ch = channel;
11286         cmd.u.params.cl = cl;
11287         cmd.u.params.weight = cpu_to_be16(weight);
11288 
11289         return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11290                                NULL, sleep_ok);
11291 }
11292 
11293 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
11294     int mode, unsigned int maxrate, int pktsize, int sleep_ok)
11295 {
11296         struct fw_sched_cmd cmd;
11297 
11298         memset(&cmd, 0, sizeof(cmd));
11299         cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
11300                                       F_FW_CMD_REQUEST |
11301                                       F_FW_CMD_WRITE);
11302         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
11303 
11304         cmd.u.params.sc = FW_SCHED_SC_PARAMS;
11305         cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
11306         cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
11307         cmd.u.params.mode = mode;
11308         cmd.u.params.ch = channel;
11309         cmd.u.params.cl = cl;
11310         cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
11311         cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
11312         cmd.u.params.max = cpu_to_be32(maxrate);
11313         cmd.u.params.pktsize = cpu_to_be16(pktsize);
11314 
11315         return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
11316                                NULL, sleep_ok);
11317 }
11318 
11319 /*
11320  *      t4_config_watchdog - configure (enable/disable) a watchdog timer
11321  *      @adapter: the adapter
11322  *      @mbox: mailbox to use for the FW command
11323  *      @pf: the PF owning the queue
11324  *      @vf: the VF owning the queue
11325  *      @timeout: watchdog timeout in ms
11326  *      @action: watchdog timer / action
11327  *
11328  *      There are separate watchdog timers for each possible watchdog
11329  *      action.  Configure one of the watchdog timers by setting a non-zero
11330  *      timeout.  Disable a watchdog timer by using a timeout of zero.
11331  */
11332 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
11333                        unsigned int pf, unsigned int vf,
11334                        unsigned int timeout, unsigned int action)
11335 {
11336         struct fw_watchdog_cmd wdog;
11337         unsigned int ticks;
11338 
11339         /*
11340          * The watchdog command expects a timeout in units of 10ms so we need
11341          * to convert it here (via rounding) and force a minimum of one 10ms
11342          * "tick" if the timeout is non-zero but the conversion results in 0
11343          * ticks.
11344          */
11345         ticks = (timeout + 5)/10;
11346         if (timeout && !ticks)
11347                 ticks = 1;
11348 
11349         memset(&wdog, 0, sizeof wdog);
11350         wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
11351                                      F_FW_CMD_REQUEST |
11352                                      F_FW_CMD_WRITE |
11353                                      V_FW_PARAMS_CMD_PFN(pf) |
11354                                      V_FW_PARAMS_CMD_VFN(vf));
11355         wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
11356         wdog.timeout = cpu_to_be32(ticks);
11357         wdog.action = cpu_to_be32(action);
11358 
11359         return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
11360 }
11361 
11362 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
11363 {
11364         struct fw_devlog_cmd devlog_cmd;
11365         int ret;
11366 
11367         memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11368         devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11369                                              F_FW_CMD_REQUEST | F_FW_CMD_READ);
11370         devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11371         ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11372                          sizeof(devlog_cmd), &devlog_cmd);
11373         if (ret)
11374                 return ret;
11375 
11376         *level = devlog_cmd.level;
11377         return 0;
11378 }
11379 
11380 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
11381 {
11382         struct fw_devlog_cmd devlog_cmd;
11383 
11384         memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11385         devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11386                                              F_FW_CMD_REQUEST |
11387                                              F_FW_CMD_WRITE);
11388         devlog_cmd.level = level;
11389         devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11390         return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11391                           sizeof(devlog_cmd), &devlog_cmd);
11392 }
11393 
11394 int t4_configure_add_smac(struct adapter *adap)
11395 {
11396         unsigned int param, val;
11397         int ret = 0;
11398 
11399         adap->params.smac_add_support = 0;
11400         param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11401                   V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
11402         /* Query FW to check if FW supports adding source mac address
11403          * to TCAM feature or not.
11404          * If FW returns 1, driver can use this feature and driver need to send
11405          * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
11406          * enable adding smac to TCAM.
11407          */
11408         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
11409         if (ret)
11410                 return ret;
11411 
11412         if (val == 1) {
11413                 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
11414                                     &param, &val);
11415                 if (!ret)
11416                         /* Firmware allows adding explicit TCAM entries.
11417                          * Save this internally.
11418                          */
11419                         adap->params.smac_add_support = 1;
11420         }
11421 
11422         return ret;
11423 }
11424 
11425 int t4_configure_ringbb(struct adapter *adap)
11426 {
11427         unsigned int param, val;
11428         int ret = 0;
11429 
11430         param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11431                   V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
11432         /* Query FW to check if FW supports ring switch feature or not.
11433          * If FW returns 1, driver can use this feature and driver need to send
11434          * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
11435          * enable the ring backbone configuration.
11436          */
11437         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
11438         if (ret < 0) {
11439                 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
11440                         ret);
11441                 goto out;
11442         }
11443 
11444         if (val != 1) {
11445                 CH_ERR(adap, "FW doesnot support ringbackbone features\n");
11446                 goto out;
11447         }
11448 
11449         ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
11450         if (ret < 0) {
11451                 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
11452                         ret);
11453                 goto out;
11454         }
11455 
11456 out:
11457         return ret;
11458 }
11459 
11460 /*
11461  *      t4_set_vlan_acl - Set a VLAN id for the specified VF
11462  *      @adapter: the adapter
11463  *      @mbox: mailbox to use for the FW command
11464  *      @vf: one of the VFs instantiated by the specified PF
11465  *      @vlan: The vlanid to be set
11466  *
11467  */
11468 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
11469                     u16 vlan)
11470 {
11471         struct fw_acl_vlan_cmd vlan_cmd;
11472         unsigned int enable;
11473 
11474         enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
11475         memset(&vlan_cmd, 0, sizeof(vlan_cmd));
11476         vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
11477                                          F_FW_CMD_REQUEST |
11478                                          F_FW_CMD_WRITE |
11479                                          F_FW_CMD_EXEC |
11480                                          V_FW_ACL_VLAN_CMD_PFN(adap->pf) |
11481                                          V_FW_ACL_VLAN_CMD_VFN(vf));
11482         vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
11483         /* Drop all packets that donot match vlan id */
11484         vlan_cmd.dropnovlan_fm = (enable
11485                                   ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
11486                                      F_FW_ACL_VLAN_CMD_FM)
11487                                   : 0);
11488         if (enable != 0) {
11489                 vlan_cmd.nvlan = 1;
11490                 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
11491         }
11492 
11493         return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
11494 }
11495 
11496 /**
11497  *      t4_del_mac - Removes the exact-match filter for a MAC address
11498  *      @adap: the adapter
11499  *      @mbox: mailbox to use for the FW command
11500  *      @viid: the VI id
11501  *      @addr: the MAC address value
11502  *      @smac: if true, delete from only the smac region of MPS
11503  *
11504  *      Modifies an exact-match filter and sets it to the new MAC address if
11505  *      @idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
11506  *      latter case the address is added persistently if @persist is %true.
11507  *
11508  *      Returns a negative error number or the index of the filter with the new
11509  *      MAC value.  Note that this index may differ from @idx.
11510  */
11511 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
11512                const u8 *addr, bool smac)
11513 {
11514         int ret;
11515         struct fw_vi_mac_cmd c;
11516         struct fw_vi_mac_exact *p = c.u.exact;
11517         unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
11518 
11519         memset(&c, 0, sizeof(c));
11520         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
11521                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
11522                                    V_FW_VI_MAC_CMD_VIID(viid));
11523         c.freemacs_to_len16 = cpu_to_be32(
11524                                         V_FW_CMD_LEN16(1) |
11525                                         (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
11526 
11527         memcpy(p->macaddr, addr, sizeof(p->macaddr));
11528         p->valid_to_idx = cpu_to_be16(
11529                                 F_FW_VI_MAC_CMD_VALID |
11530                                 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
11531 
11532         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
11533         if (ret == 0) {
11534                 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
11535                 if (ret < max_mac_addr)
11536                         return -ENOMEM;
11537         }
11538 
11539         return ret;
11540 }
11541 
11542 /**
11543  *      t4_add_mac - Adds an exact-match filter for a MAC address
11544  *      @adap: the adapter
11545  *      @mbox: mailbox to use for the FW command
11546  *      @viid: the VI id
11547  *      @idx: index of existing filter for old value of MAC address, or -1
11548  *      @addr: the new MAC address value
11549  *      @persist: whether a new MAC allocation should be persistent
11550  *      @add_smt: if true also add the address to the HW SMT
11551  *      @smac: if true, update only the smac region of MPS
11552  *
11553  *      Modifies an exact-match filter and sets it to the new MAC address if
11554  *      @idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
11555  *      latter case the address is added persistently if @persist is %true.
11556  *
11557  *      Returns a negative error number or the index of the filter with the new
11558  *      MAC value.  Note that this index may differ from @idx.
11559  */
11560 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
11561                int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
11562 {
11563         int ret, mode;
11564         struct fw_vi_mac_cmd c;
11565         struct fw_vi_mac_exact *p = c.u.exact;
11566         unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
11567 
11568         if (idx < 0)            /* new allocation */
11569                 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
11570         mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
11571 
11572         memset(&c, 0, sizeof(c));
11573         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
11574                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
11575                                    V_FW_VI_MAC_CMD_VIID(viid));
11576         c.freemacs_to_len16 = cpu_to_be32(
11577                                 V_FW_CMD_LEN16(1) |
11578                                 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
11579         p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
11580                                       V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
11581                                       V_FW_VI_MAC_CMD_IDX(idx));
11582         memcpy(p->macaddr, addr, sizeof(p->macaddr));
11583 
11584         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
11585         if (ret == 0) {
11586                 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
11587                 if (ret >= max_mac_addr)
11588                         return -ENOMEM;
11589                 if (smt_idx) {
11590                         /* Does fw supports returning smt_idx? */
11591                         if (adap->params.viid_smt_extn_support)
11592                                 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
11593                         else {
11594                                 /* In T4/T5, SMT contains 256 SMAC entries
11595                                  * organized in 128 rows of 2 entries each.
11596                                  * In T6, SMT contains 256 SMAC entries in
11597                                  * 256 rows.
11598                                  */
11599                                 if (chip_id(adap) <= CHELSIO_T5)
11600                                         *smt_idx = ((viid & M_FW_VIID_VIN) << 1);
11601                                 else
11602                                         *smt_idx = (viid & M_FW_VIID_VIN);
11603                         }
11604                 }
11605         }
11606 
11607         return ret;
11608 }

Cache object: 1de804dea5ffd57af1376b34e33b119e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.