The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/common/cxgb_t3_hw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15 
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/6.4/sys/dev/cxgb/common/cxgb_t3_hw.c 174319 2007-12-05 22:05:49Z kmacy $");
   32 
   33 
   34 #ifdef CONFIG_DEFINED
   35 #include <cxgb_include.h>
   36 #else
   37 #include <dev/cxgb/cxgb_include.h>
   38 #endif
   39 
   40 #undef msleep
   41 #define msleep t3_os_sleep
   42 
   43 /**
   44  *      t3_wait_op_done_val - wait until an operation is completed
   45  *      @adapter: the adapter performing the operation
   46  *      @reg: the register to check for completion
   47  *      @mask: a single-bit field within @reg that indicates completion
   48  *      @polarity: the value of the field when the operation is completed
   49  *      @attempts: number of check iterations
   50  *      @delay: delay in usecs between iterations
   51  *      @valp: where to store the value of the register at completion time
   52  *
   53  *      Wait until an operation is completed by checking a bit in a register
   54  *      up to @attempts times.  If @valp is not NULL the value of the register
   55  *      at the time it indicated completion is stored there.  Returns 0 if the
   56  *      operation completes and -EAGAIN otherwise.
   57  */
   58 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
   59                         int attempts, int delay, u32 *valp)
   60 {
   61         while (1) {
   62                 u32 val = t3_read_reg(adapter, reg);
   63 
   64                 if (!!(val & mask) == polarity) {
   65                         if (valp)
   66                                 *valp = val;
   67                         return 0;
   68                 }
   69                 if (--attempts == 0)
   70                         return -EAGAIN;
   71                 if (delay)
   72                         udelay(delay);
   73         }
   74 }
   75 
   76 /**
   77  *      t3_write_regs - write a bunch of registers
   78  *      @adapter: the adapter to program
   79  *      @p: an array of register address/register value pairs
   80  *      @n: the number of address/value pairs
   81  *      @offset: register address offset
   82  *
   83  *      Takes an array of register address/register value pairs and writes each
   84  *      value to the corresponding register.  Register addresses are adjusted
   85  *      by the supplied offset.
   86  */
   87 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
   88                    unsigned int offset)
   89 {
   90         while (n--) {
   91                 t3_write_reg(adapter, p->reg_addr + offset, p->val);
   92                 p++;
   93         }
   94 }
   95 
   96 /**
   97  *      t3_set_reg_field - set a register field to a value
   98  *      @adapter: the adapter to program
   99  *      @addr: the register address
  100  *      @mask: specifies the portion of the register to modify
  101  *      @val: the new value for the register field
  102  *
  103  *      Sets a register field specified by the supplied mask to the
  104  *      given value.
  105  */
  106 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
  107 {
  108         u32 v = t3_read_reg(adapter, addr) & ~mask;
  109 
  110         t3_write_reg(adapter, addr, v | val);
  111         (void) t3_read_reg(adapter, addr);      /* flush */
  112 }
  113 
  114 /**
  115  *      t3_read_indirect - read indirectly addressed registers
  116  *      @adap: the adapter
  117  *      @addr_reg: register holding the indirect address
  118  *      @data_reg: register holding the value of the indirect register
  119  *      @vals: where the read register values are stored
  120  *      @start_idx: index of first indirect register to read
  121  *      @nregs: how many indirect registers to read
  122  *
  123  *      Reads registers that are accessed indirectly through an address/data
  124  *      register pair.
  125  */
  126 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
  127                       unsigned int data_reg, u32 *vals, unsigned int nregs,
  128                       unsigned int start_idx)
  129 {
  130         while (nregs--) {
  131                 t3_write_reg(adap, addr_reg, start_idx);
  132                 *vals++ = t3_read_reg(adap, data_reg);
  133                 start_idx++;
  134         }
  135 }
  136 
  137 /**
  138  *      t3_mc7_bd_read - read from MC7 through backdoor accesses
  139  *      @mc7: identifies MC7 to read from
  140  *      @start: index of first 64-bit word to read
  141  *      @n: number of 64-bit words to read
  142  *      @buf: where to store the read result
  143  *
  144  *      Read n 64-bit words from MC7 starting at word start, using backdoor
  145  *      accesses.
  146  */
  147 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
  148                    u64 *buf)
  149 {
  150         static int shift[] = { 0, 0, 16, 24 };
  151         static int step[]  = { 0, 32, 16, 8 };
  152 
  153         unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
  154         adapter_t *adap = mc7->adapter;
  155 
  156         if (start >= size64 || start + n > size64)
  157                 return -EINVAL;
  158 
  159         start *= (8 << mc7->width);
  160         while (n--) {
  161                 int i;
  162                 u64 val64 = 0;
  163 
  164                 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
  165                         int attempts = 10;
  166                         u32 val;
  167 
  168                         t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
  169                                        start);
  170                         t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
  171                         val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
  172                         while ((val & F_BUSY) && attempts--)
  173                                 val = t3_read_reg(adap,
  174                                                   mc7->offset + A_MC7_BD_OP);
  175                         if (val & F_BUSY)
  176                                 return -EIO;
  177 
  178                         val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
  179                         if (mc7->width == 0) {
  180                                 val64 = t3_read_reg(adap,
  181                                                 mc7->offset + A_MC7_BD_DATA0);
  182                                 val64 |= (u64)val << 32;
  183                         } else {
  184                                 if (mc7->width > 1)
  185                                         val >>= shift[mc7->width];
  186                                 val64 |= (u64)val << (step[mc7->width] * i);
  187                         }
  188                         start += 8;
  189                 }
  190                 *buf++ = val64;
  191         }
  192         return 0;
  193 }
  194 
  195 /*
  196  * Initialize MI1.
  197  */
  198 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
  199 {
  200         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
  201         u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
  202                   V_CLKDIV(clkdiv);
  203 
  204         if (!(ai->caps & SUPPORTED_10000baseT_Full))
  205                 val |= V_ST(1);
  206         t3_write_reg(adap, A_MI1_CFG, val);
  207 }
  208 
  209 #define MDIO_ATTEMPTS 20
  210 
  211 /*
  212  * MI1 read/write operations for direct-addressed PHYs.
  213  */
  214 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
  215                     int reg_addr, unsigned int *valp)
  216 {
  217         int ret;
  218         u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
  219 
  220         if (mmd_addr)
  221                 return -EINVAL;
  222 
  223         MDIO_LOCK(adapter);
  224         t3_write_reg(adapter, A_MI1_ADDR, addr);
  225         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
  226         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
  227         if (!ret)
  228                 *valp = t3_read_reg(adapter, A_MI1_DATA);
  229         MDIO_UNLOCK(adapter);
  230         return ret;
  231 }
  232 
  233 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
  234                      int reg_addr, unsigned int val)
  235 {
  236         int ret;
  237         u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
  238 
  239         if (mmd_addr)
  240                 return -EINVAL;
  241 
  242         MDIO_LOCK(adapter);
  243         t3_write_reg(adapter, A_MI1_ADDR, addr);
  244         t3_write_reg(adapter, A_MI1_DATA, val);
  245         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
  246         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
  247         MDIO_UNLOCK(adapter);
  248         return ret;
  249 }
  250 
  251 static struct mdio_ops mi1_mdio_ops = {
  252         mi1_read,
  253         mi1_write
  254 };
  255 
  256 /*
  257  * MI1 read/write operations for indirect-addressed PHYs.
  258  */
  259 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
  260                         int reg_addr, unsigned int *valp)
  261 {
  262         int ret;
  263         u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
  264 
  265         MDIO_LOCK(adapter);
  266         t3_write_reg(adapter, A_MI1_ADDR, addr);
  267         t3_write_reg(adapter, A_MI1_DATA, reg_addr);
  268         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
  269         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
  270         if (!ret) {
  271                 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
  272                 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
  273                                       MDIO_ATTEMPTS, 10);
  274                 if (!ret)
  275                         *valp = t3_read_reg(adapter, A_MI1_DATA);
  276         }
  277         MDIO_UNLOCK(adapter);
  278         return ret;
  279 }
  280 
  281 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
  282                          int reg_addr, unsigned int val)
  283 {
  284         int ret;
  285         u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
  286 
  287         MDIO_LOCK(adapter);
  288         t3_write_reg(adapter, A_MI1_ADDR, addr);
  289         t3_write_reg(adapter, A_MI1_DATA, reg_addr);
  290         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
  291         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
  292         if (!ret) {
  293                 t3_write_reg(adapter, A_MI1_DATA, val);
  294                 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
  295                 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
  296                                       MDIO_ATTEMPTS, 10);
  297         }
  298         MDIO_UNLOCK(adapter);
  299         return ret;
  300 }
  301 
  302 static struct mdio_ops mi1_mdio_ext_ops = {
  303         mi1_ext_read,
  304         mi1_ext_write
  305 };
  306 
  307 /**
  308  *      t3_mdio_change_bits - modify the value of a PHY register
  309  *      @phy: the PHY to operate on
  310  *      @mmd: the device address
  311  *      @reg: the register address
  312  *      @clear: what part of the register value to mask off
  313  *      @set: what part of the register value to set
  314  *
  315  *      Changes the value of a PHY register by applying a mask to its current
  316  *      value and ORing the result with a new value.
  317  */
  318 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
  319                         unsigned int set)
  320 {
  321         int ret;
  322         unsigned int val;
  323 
  324         ret = mdio_read(phy, mmd, reg, &val);
  325         if (!ret) {
  326                 val &= ~clear;
  327                 ret = mdio_write(phy, mmd, reg, val | set);
  328         }
  329         return ret;
  330 }
  331 
  332 /**
  333  *      t3_phy_reset - reset a PHY block
  334  *      @phy: the PHY to operate on
  335  *      @mmd: the device address of the PHY block to reset
  336  *      @wait: how long to wait for the reset to complete in 1ms increments
  337  *
  338  *      Resets a PHY block and optionally waits for the reset to complete.
  339  *      @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
  340  *      for 10G PHYs.
  341  */
  342 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
  343 {
  344         int err;
  345         unsigned int ctl;
  346 
  347         err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
  348         if (err || !wait)
  349                 return err;
  350 
  351         do {
  352                 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
  353                 if (err)
  354                         return err;
  355                 ctl &= BMCR_RESET;
  356                 if (ctl)
  357                         msleep(1);
  358         } while (ctl && --wait);
  359 
  360         return ctl ? -1 : 0;
  361 }
  362 
  363 /**
  364  *      t3_phy_advertise - set the PHY advertisement registers for autoneg
  365  *      @phy: the PHY to operate on
  366  *      @advert: bitmap of capabilities the PHY should advertise
  367  *
  368  *      Sets a 10/100/1000 PHY's advertisement registers to advertise the
  369  *      requested capabilities.
  370  */
  371 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
  372 {
  373         int err;
  374         unsigned int val = 0;
  375 
  376         err = mdio_read(phy, 0, MII_CTRL1000, &val);
  377         if (err)
  378                 return err;
  379 
  380         val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
  381         if (advert & ADVERTISED_1000baseT_Half)
  382                 val |= ADVERTISE_1000HALF;
  383         if (advert & ADVERTISED_1000baseT_Full)
  384                 val |= ADVERTISE_1000FULL;
  385 
  386         err = mdio_write(phy, 0, MII_CTRL1000, val);
  387         if (err)
  388                 return err;
  389 
  390         val = 1;
  391         if (advert & ADVERTISED_10baseT_Half)
  392                 val |= ADVERTISE_10HALF;
  393         if (advert & ADVERTISED_10baseT_Full)
  394                 val |= ADVERTISE_10FULL;
  395         if (advert & ADVERTISED_100baseT_Half)
  396                 val |= ADVERTISE_100HALF;
  397         if (advert & ADVERTISED_100baseT_Full)
  398                 val |= ADVERTISE_100FULL;
  399         if (advert & ADVERTISED_Pause)
  400                 val |= ADVERTISE_PAUSE_CAP;
  401         if (advert & ADVERTISED_Asym_Pause)
  402                 val |= ADVERTISE_PAUSE_ASYM;
  403         return mdio_write(phy, 0, MII_ADVERTISE, val);
  404 }
  405 
  406 /**
  407  *      t3_set_phy_speed_duplex - force PHY speed and duplex
  408  *      @phy: the PHY to operate on
  409  *      @speed: requested PHY speed
  410  *      @duplex: requested PHY duplex
  411  *
  412  *      Force a 10/100/1000 PHY's speed and duplex.  This also disables
  413  *      auto-negotiation except for GigE, where auto-negotiation is mandatory.
  414  */
  415 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
  416 {
  417         int err;
  418         unsigned int ctl;
  419 
  420         err = mdio_read(phy, 0, MII_BMCR, &ctl);
  421         if (err)
  422                 return err;
  423 
  424         if (speed >= 0) {
  425                 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
  426                 if (speed == SPEED_100)
  427                         ctl |= BMCR_SPEED100;
  428                 else if (speed == SPEED_1000)
  429                         ctl |= BMCR_SPEED1000;
  430         }
  431         if (duplex >= 0) {
  432                 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
  433                 if (duplex == DUPLEX_FULL)
  434                         ctl |= BMCR_FULLDPLX;
  435         }
  436         if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
  437                 ctl |= BMCR_ANENABLE;
  438         return mdio_write(phy, 0, MII_BMCR, ctl);
  439 }
  440 
  441 static struct adapter_info t3_adap_info[] = {
  442         { 1, 1, 0, 0, 0,
  443           F_GPIO2_OEN | F_GPIO4_OEN |
  444           F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
  445           0,
  446           &mi1_mdio_ops, "Chelsio PE9000" },
  447         { 1, 1, 0, 0, 0,
  448           F_GPIO2_OEN | F_GPIO4_OEN |
  449           F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
  450           0,
  451           &mi1_mdio_ops, "Chelsio T302" },
  452         { 1, 0, 0, 0, 0,
  453           F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
  454           F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
  455           SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
  456           &mi1_mdio_ext_ops, "Chelsio T310" },
  457         { 1, 1, 0, 0, 0,
  458           F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
  459           F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
  460           F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
  461           SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
  462           &mi1_mdio_ext_ops, "Chelsio T320" },
  463         { 4, 0, 0, 0, 0,
  464           F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
  465           F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
  466           F_GPIO1 | F_GPIO2 | F_GPIO3 | F_GPIO4, SUPPORTED_AUI,
  467           &mi1_mdio_ops, "Chelsio T304" },
  468 };
  469 
  470 /*
  471  * Return the adapter_info structure with a given index.  Out-of-range indices
  472  * return NULL.
  473  */
  474 const struct adapter_info *t3_get_adapter_info(unsigned int id)
  475 {
  476         return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
  477 }
  478 
  479 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
  480                  SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
  481 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
  482 
  483 static struct port_type_info port_types[] = {
  484         { NULL },
  485         { t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
  486           "10GBASE-XR" },
  487         { t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
  488           "10/100/1000BASE-T" },
  489         { t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
  490           "10/100/1000BASE-T" },
  491         { t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
  492         { NULL, CAPS_10G, "10GBASE-KX4" },
  493         { t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
  494         { t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
  495           "10GBASE-SR" },
  496         { NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
  497 };
  498 
  499 #undef CAPS_1G
  500 #undef CAPS_10G
  501 
  502 #define VPD_ENTRY(name, len) \
  503         u8 name##_kword[2]; u8 name##_len; char name##_data[len]
  504 
  505 /*
  506  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
  507  * VPD-R sections.
  508  */
  509 struct t3_vpd {
  510         u8  id_tag;
  511         u8  id_len[2];
  512         u8  id_data[16];
  513         u8  vpdr_tag;
  514         u8  vpdr_len[2];
  515         VPD_ENTRY(pn, 16);                     /* part number */
  516         VPD_ENTRY(ec, 16);                     /* EC level */
  517         VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
  518         VPD_ENTRY(na, 12);                     /* MAC address base */
  519         VPD_ENTRY(cclk, 6);                    /* core clock */
  520         VPD_ENTRY(mclk, 6);                    /* mem clock */
  521         VPD_ENTRY(uclk, 6);                    /* uP clk */
  522         VPD_ENTRY(mdc, 6);                     /* MDIO clk */
  523         VPD_ENTRY(mt, 2);                      /* mem timing */
  524         VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
  525         VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
  526         VPD_ENTRY(port0, 2);                   /* PHY0 complex */
  527         VPD_ENTRY(port1, 2);                   /* PHY1 complex */
  528         VPD_ENTRY(port2, 2);                   /* PHY2 complex */
  529         VPD_ENTRY(port3, 2);                   /* PHY3 complex */
  530         VPD_ENTRY(rv, 1);                      /* csum */
  531         u32 pad;                  /* for multiple-of-4 sizing and alignment */
  532 };
  533 
  534 #define EEPROM_MAX_POLL   4
  535 #define EEPROM_STAT_ADDR  0x4000
  536 #define VPD_BASE          0xc00
  537 
  538 /**
  539  *      t3_seeprom_read - read a VPD EEPROM location
  540  *      @adapter: adapter to read
  541  *      @addr: EEPROM address
  542  *      @data: where to store the read data
  543  *
  544  *      Read a 32-bit word from a location in VPD EEPROM using the card's PCI
  545  *      VPD ROM capability.  A zero is written to the flag bit when the
  546  *      addres is written to the control register.  The hardware device will
  547  *      set the flag to 1 when 4 bytes have been read into the data register.
  548  */
  549 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
  550 {
  551         u16 val;
  552         int attempts = EEPROM_MAX_POLL;
  553         unsigned int base = adapter->params.pci.vpd_cap_addr;
  554 
  555         if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
  556                 return -EINVAL;
  557 
  558         t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
  559         do {
  560                 udelay(10);
  561                 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
  562         } while (!(val & PCI_VPD_ADDR_F) && --attempts);
  563 
  564         if (!(val & PCI_VPD_ADDR_F)) {
  565                 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
  566                 return -EIO;
  567         }
  568         t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
  569         *data = le32_to_cpu(*data);
  570         return 0;
  571 }
  572 
  573 /**
  574  *      t3_seeprom_write - write a VPD EEPROM location
  575  *      @adapter: adapter to write
  576  *      @addr: EEPROM address
  577  *      @data: value to write
  578  *
  579  *      Write a 32-bit word to a location in VPD EEPROM using the card's PCI
  580  *      VPD ROM capability.
  581  */
  582 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
  583 {
  584         u16 val;
  585         int attempts = EEPROM_MAX_POLL;
  586         unsigned int base = adapter->params.pci.vpd_cap_addr;
  587 
  588         if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
  589                 return -EINVAL;
  590 
  591         t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
  592                                  cpu_to_le32(data));
  593         t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
  594                                  (u16)addr | PCI_VPD_ADDR_F);
  595         do {
  596                 msleep(1);
  597                 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
  598         } while ((val & PCI_VPD_ADDR_F) && --attempts);
  599 
  600         if (val & PCI_VPD_ADDR_F) {
  601                 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
  602                 return -EIO;
  603         }
  604         return 0;
  605 }
  606 
  607 /**
  608  *      t3_seeprom_wp - enable/disable EEPROM write protection
  609  *      @adapter: the adapter
  610  *      @enable: 1 to enable write protection, 0 to disable it
  611  *
  612  *      Enables or disables write protection on the serial EEPROM.
  613  */
  614 int t3_seeprom_wp(adapter_t *adapter, int enable)
  615 {
  616         return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
  617 }
  618 
  619 /*
  620  * Convert a character holding a hex digit to a number.
  621  */
  622 static unsigned int hex2int(unsigned char c)
  623 {
  624         return isdigit(c) ? c - '' : toupper(c) - 'A' + 10;
  625 }
  626 
  627 /**
  628  *      get_vpd_params - read VPD parameters from VPD EEPROM
  629  *      @adapter: adapter to read
  630  *      @p: where to store the parameters
  631  *
  632  *      Reads card parameters stored in VPD EEPROM.
  633  */
  634 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
  635 {
  636         int i, addr, ret;
  637         struct t3_vpd vpd;
  638 
  639         /*
  640          * Card information is normally at VPD_BASE but some early cards had
  641          * it at 0.
  642          */
  643         ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
  644         if (ret)
  645                 return ret;
  646         addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
  647 
  648         for (i = 0; i < sizeof(vpd); i += 4) {
  649                 ret = t3_seeprom_read(adapter, addr + i,
  650                                       (u32 *)((u8 *)&vpd + i));
  651                 if (ret)
  652                         return ret;
  653         }
  654 
  655         p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
  656         p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
  657         p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
  658         p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
  659         p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
  660         memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
  661 
  662         /* Old eeproms didn't have port information */
  663         if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
  664                 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
  665                 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
  666         } else {
  667                 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
  668                 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
  669                 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
  670                 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
  671                 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
  672                 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
  673         }
  674 
  675         for (i = 0; i < 6; i++)
  676                 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
  677                                  hex2int(vpd.na_data[2 * i + 1]);
  678         return 0;
  679 }
  680 
  681 /* serial flash and firmware constants */
  682 enum {
  683         SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
  684         SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
  685         SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
  686 
  687         /* flash command opcodes */
  688         SF_PROG_PAGE    = 2,       /* program page */
  689         SF_WR_DISABLE   = 4,       /* disable writes */
  690         SF_RD_STATUS    = 5,       /* read status register */
  691         SF_WR_ENABLE    = 6,       /* enable writes */
  692         SF_RD_DATA_FAST = 0xb,     /* read flash */
  693         SF_ERASE_SECTOR = 0xd8,    /* erase sector */
  694 
  695         FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
  696         FW_VERS_ADDR = 0x77ffc,    /* flash address holding FW version */
  697         FW_MIN_SIZE = 8            /* at least version and csum */
  698 };
  699 
  700 /**
  701  *      sf1_read - read data from the serial flash
  702  *      @adapter: the adapter
  703  *      @byte_cnt: number of bytes to read
  704  *      @cont: whether another operation will be chained
  705  *      @valp: where to store the read data
  706  *
  707  *      Reads up to 4 bytes of data from the serial flash.  The location of
  708  *      the read needs to be specified prior to calling this by issuing the
  709  *      appropriate commands to the serial flash.
  710  */
  711 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
  712                     u32 *valp)
  713 {
  714         int ret;
  715 
  716         if (!byte_cnt || byte_cnt > 4)
  717                 return -EINVAL;
  718         if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
  719                 return -EBUSY;
  720         t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
  721         ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
  722         if (!ret)
  723                 *valp = t3_read_reg(adapter, A_SF_DATA);
  724         return ret;
  725 }
  726 
  727 /**
  728  *      sf1_write - write data to the serial flash
  729  *      @adapter: the adapter
  730  *      @byte_cnt: number of bytes to write
  731  *      @cont: whether another operation will be chained
  732  *      @val: value to write
  733  *
  734  *      Writes up to 4 bytes of data to the serial flash.  The location of
  735  *      the write needs to be specified prior to calling this by issuing the
  736  *      appropriate commands to the serial flash.
  737  */
  738 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
  739                      u32 val)
  740 {
  741         if (!byte_cnt || byte_cnt > 4)
  742                 return -EINVAL;
  743         if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
  744                 return -EBUSY;
  745         t3_write_reg(adapter, A_SF_DATA, val);
  746         t3_write_reg(adapter, A_SF_OP,
  747                      V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
  748         return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
  749 }
  750 
  751 /**
  752  *      flash_wait_op - wait for a flash operation to complete
  753  *      @adapter: the adapter
  754  *      @attempts: max number of polls of the status register
  755  *      @delay: delay between polls in ms
  756  *
  757  *      Wait for a flash operation to complete by polling the status register.
  758  */
  759 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
  760 {
  761         int ret;
  762         u32 status;
  763 
  764         while (1) {
  765                 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
  766                     (ret = sf1_read(adapter, 1, 0, &status)) != 0)
  767                         return ret;
  768                 if (!(status & 1))
  769                         return 0;
  770                 if (--attempts == 0)
  771                         return -EAGAIN;
  772                 if (delay)
  773                         msleep(delay);
  774         }
  775 }
  776 
  777 /**
  778  *      t3_read_flash - read words from serial flash
  779  *      @adapter: the adapter
  780  *      @addr: the start address for the read
  781  *      @nwords: how many 32-bit words to read
  782  *      @data: where to store the read data
  783  *      @byte_oriented: whether to store data as bytes or as words
  784  *
  785  *      Read the specified number of 32-bit words from the serial flash.
  786  *      If @byte_oriented is set the read data is stored as a byte array
  787  *      (i.e., big-endian), otherwise as 32-bit words in the platform's
  788  *      natural endianess.
  789  */
  790 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
  791                   u32 *data, int byte_oriented)
  792 {
  793         int ret;
  794 
  795         if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
  796                 return -EINVAL;
  797 
  798         addr = swab32(addr) | SF_RD_DATA_FAST;
  799 
  800         if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
  801             (ret = sf1_read(adapter, 1, 1, data)) != 0)
  802                 return ret;
  803 
  804         for ( ; nwords; nwords--, data++) {
  805                 ret = sf1_read(adapter, 4, nwords > 1, data);
  806                 if (ret)
  807                         return ret;
  808                 if (byte_oriented)
  809                         *data = htonl(*data);
  810         }
  811         return 0;
  812 }
  813 
  814 /**
  815  *      t3_write_flash - write up to a page of data to the serial flash
  816  *      @adapter: the adapter
  817  *      @addr: the start address to write
  818  *      @n: length of data to write
  819  *      @data: the data to write
  820  *
  821  *      Writes up to a page of data (256 bytes) to the serial flash starting
  822  *      at the given address.
  823  */
  824 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
  825                           unsigned int n, const u8 *data)
  826 {
  827         int ret;
  828         u32 buf[64];
  829         unsigned int i, c, left, val, offset = addr & 0xff;
  830 
  831         if (addr + n > SF_SIZE || offset + n > 256)
  832                 return -EINVAL;
  833 
  834         val = swab32(addr) | SF_PROG_PAGE;
  835 
  836         if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
  837             (ret = sf1_write(adapter, 4, 1, val)) != 0)
  838                 return ret;
  839 
  840         for (left = n; left; left -= c) {
  841                 c = min(left, 4U);
  842                 for (val = 0, i = 0; i < c; ++i)
  843                         val = (val << 8) + *data++;
  844 
  845                 ret = sf1_write(adapter, c, c != left, val);
  846                 if (ret)
  847                         return ret;
  848         }
  849         if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
  850                 return ret;
  851 
  852         /* Read the page to verify the write succeeded */
  853         ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
  854         if (ret)
  855                 return ret;
  856 
  857         if (memcmp(data - n, (u8 *)buf + offset, n))
  858                 return -EIO;
  859         return 0;
  860 }
  861 
  862 /**
  863  *      t3_get_tp_version - read the tp sram version
  864  *      @adapter: the adapter
  865  *      @vers: where to place the version
  866  *
  867  *      Reads the protocol sram version from sram.
  868  */
  869 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
  870 {
  871         int ret;
  872 
  873         /* Get version loaded in SRAM */
  874         t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
  875         ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
  876                               1, 1, 5, 1);
  877         if (ret)
  878                 return ret;
  879         
  880         *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
  881 
  882         return 0;
  883 }
  884 
  885 /**
  886  *      t3_check_tpsram_version - read the tp sram version
  887  *      @adapter: the adapter
  888  *
  889  */
  890 int t3_check_tpsram_version(adapter_t *adapter)
  891 {
  892         int ret;
  893         u32 vers;
  894         unsigned int major, minor;
  895 
  896         /* Get version loaded in SRAM */
  897         t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
  898         ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
  899                               1, 1, 5, 1);
  900         if (ret)
  901                 return ret;
  902         
  903         vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
  904 
  905         major = G_TP_VERSION_MAJOR(vers);
  906         minor = G_TP_VERSION_MINOR(vers);
  907 
  908         if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 
  909                 return 0;
  910 
  911         CH_WARN(adapter, "found wrong TP version (%u.%u), "
  912                "driver needs version %d.%d\n", major, minor,
  913                TP_VERSION_MAJOR, TP_VERSION_MINOR);
  914         return -EINVAL;
  915 }
  916 
  917 /**
  918  *      t3_check_tpsram - check if provided protocol SRAM 
  919  *                        is compatible with this driver
  920  *      @adapter: the adapter
  921  *      @tp_sram: the firmware image to write
  922  *      @size: image size
  923  *
  924  *      Checks if an adapter's tp sram is compatible with the driver.
  925  *      Returns 0 if the versions are compatible, a negative error otherwise.
  926  */
  927 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
  928 {
  929         u32 csum;
  930         unsigned int i;
  931         const u32 *p = (const u32 *)tp_sram;
  932 
  933         /* Verify checksum */
  934         for (csum = 0, i = 0; i < size / sizeof(csum); i++)
  935                 csum += ntohl(p[i]);
  936         if (csum != 0xffffffff) {
  937                 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
  938                        csum);
  939                 return -EINVAL;
  940         }
  941 
  942         return 0;
  943 }
  944 
  945 enum fw_version_type {
  946         FW_VERSION_N3,
  947         FW_VERSION_T3
  948 };
  949 
  950 /**
  951  *      t3_get_fw_version - read the firmware version
  952  *      @adapter: the adapter
  953  *      @vers: where to place the version
  954  *
  955  *      Reads the FW version from flash.
  956  */
  957 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
  958 {
  959         return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
  960 }
  961 
  962 /**
  963  *      t3_check_fw_version - check if the FW is compatible with this driver
  964  *      @adapter: the adapter
  965  *
  966  *      Checks if an adapter's FW is compatible with the driver.  Returns 0
  967  *      if the versions are compatible, a negative error otherwise.
  968  */
  969 int t3_check_fw_version(adapter_t *adapter)
  970 {
  971         int ret;
  972         u32 vers;
  973         unsigned int type, major, minor;
  974 
  975         ret = t3_get_fw_version(adapter, &vers);
  976         if (ret)
  977                 return ret;
  978 
  979         type = G_FW_VERSION_TYPE(vers);
  980         major = G_FW_VERSION_MAJOR(vers);
  981         minor = G_FW_VERSION_MINOR(vers);
  982 
  983         if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
  984             minor == FW_VERSION_MINOR)
  985                 return 0;
  986 
  987         CH_WARN(adapter, "found wrong FW version (%u.%u), "
  988                "driver needs version %d.%d\n", major, minor,
  989                FW_VERSION_MAJOR, FW_VERSION_MINOR);
  990         return -EINVAL;
  991 }
  992 
  993 /**
  994  *      t3_flash_erase_sectors - erase a range of flash sectors
  995  *      @adapter: the adapter
  996  *      @start: the first sector to erase
  997  *      @end: the last sector to erase
  998  *
  999  *      Erases the sectors in the given range.
 1000  */
 1001 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
 1002 {
 1003         while (start <= end) {
 1004                 int ret;
 1005 
 1006                 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
 1007                     (ret = sf1_write(adapter, 4, 0,
 1008                                      SF_ERASE_SECTOR | (start << 8))) != 0 ||
 1009                     (ret = flash_wait_op(adapter, 5, 500)) != 0)
 1010                         return ret;
 1011                 start++;
 1012         }
 1013         return 0;
 1014 }
 1015 
 1016 /*
 1017  *      t3_load_fw - download firmware
 1018  *      @adapter: the adapter
 1019  *      @fw_data: the firmware image to write
 1020  *      @size: image size
 1021  *
 1022  *      Write the supplied firmware image to the card's serial flash.
 1023  *      The FW image has the following sections: @size - 8 bytes of code and
 1024  *      data, followed by 4 bytes of FW version, followed by the 32-bit
 1025  *      1's complement checksum of the whole image.
 1026  */
 1027 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
 1028 {
 1029         u32 csum;
 1030         unsigned int i;
 1031         const u32 *p = (const u32 *)fw_data;
 1032         int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
 1033 
 1034         if ((size & 3) || size < FW_MIN_SIZE)
 1035                 return -EINVAL;
 1036         if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
 1037                 return -EFBIG;
 1038 
 1039         for (csum = 0, i = 0; i < size / sizeof(csum); i++)
 1040                 csum += ntohl(p[i]);
 1041         if (csum != 0xffffffff) {
 1042                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
 1043                        csum);
 1044                 return -EINVAL;
 1045         }
 1046 
 1047         ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
 1048         if (ret)
 1049                 goto out;
 1050 
 1051         size -= 8;  /* trim off version and checksum */
 1052         for (addr = FW_FLASH_BOOT_ADDR; size; ) {
 1053                 unsigned int chunk_size = min(size, 256U);
 1054 
 1055                 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
 1056                 if (ret)
 1057                         goto out;
 1058 
 1059                 addr += chunk_size;
 1060                 fw_data += chunk_size;
 1061                 size -= chunk_size;
 1062         }
 1063 
 1064         ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
 1065 out:
 1066         if (ret)
 1067                 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
 1068         return ret;
 1069 }
 1070 
 1071 #define CIM_CTL_BASE 0x2000
 1072 
 1073 /**
 1074  *      t3_cim_ctl_blk_read - read a block from CIM control region
 1075  *      @adap: the adapter
 1076  *      @addr: the start address within the CIM control region
 1077  *      @n: number of words to read
 1078  *      @valp: where to store the result
 1079  *
 1080  *      Reads a block of 4-byte words from the CIM control region.
 1081  */
 1082 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
 1083                         unsigned int *valp)
 1084 {
 1085         int ret = 0;
 1086 
 1087         if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
 1088                 return -EBUSY;
 1089 
 1090         for ( ; !ret && n--; addr += 4) {
 1091                 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
 1092                 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
 1093                                       0, 5, 2);
 1094                 if (!ret)
 1095                         *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
 1096         }
 1097         return ret;
 1098 }
 1099 
 1100 /**
 1101  *      t3_link_changed - handle interface link changes
 1102  *      @adapter: the adapter
 1103  *      @port_id: the port index that changed link state
 1104  *
 1105  *      Called when a port's link settings change to propagate the new values
 1106  *      to the associated PHY and MAC.  After performing the common tasks it
 1107  *      invokes an OS-specific handler.
 1108  */
 1109 void t3_link_changed(adapter_t *adapter, int port_id)
 1110 {
 1111         int link_ok, speed, duplex, fc;
 1112         struct port_info *pi = adap2pinfo(adapter, port_id);
 1113         struct cphy *phy = &pi->phy;
 1114         struct cmac *mac = &pi->mac;
 1115         struct link_config *lc = &pi->link_config;
 1116 
 1117         phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
 1118 
 1119         if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
 1120             uses_xaui(adapter)) {
 1121                 if (link_ok)
 1122                         t3b_pcs_reset(mac);
 1123                 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
 1124                              link_ok ? F_TXACTENABLE | F_RXEN : 0);
 1125         }
 1126         lc->link_ok = (unsigned char)link_ok;
 1127         lc->speed = speed < 0 ? SPEED_INVALID : speed;
 1128         lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
 1129         if (lc->requested_fc & PAUSE_AUTONEG)
 1130                 fc &= lc->requested_fc;
 1131         else
 1132                 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 1133 
 1134         if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
 1135                 /* Set MAC speed, duplex, and flow control to match PHY. */
 1136                 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
 1137                 lc->fc = (unsigned char)fc;
 1138         }
 1139 
 1140         t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
 1141 }
 1142 
 1143 /**
 1144  *      t3_link_start - apply link configuration to MAC/PHY
 1145  *      @phy: the PHY to setup
 1146  *      @mac: the MAC to setup
 1147  *      @lc: the requested link configuration
 1148  *
 1149  *      Set up a port's MAC and PHY according to a desired link configuration.
 1150  *      - If the PHY can auto-negotiate first decide what to advertise, then
 1151  *        enable/disable auto-negotiation as desired, and reset.
 1152  *      - If the PHY does not auto-negotiate just reset it.
 1153  *      - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
 1154  *        otherwise do it later based on the outcome of auto-negotiation.
 1155  */
 1156 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
 1157 {
 1158         unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 1159 
 1160         lc->link_ok = 0;
 1161         if (lc->supported & SUPPORTED_Autoneg) {
 1162                 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
 1163                 if (fc) {
 1164                         lc->advertising |= ADVERTISED_Asym_Pause;
 1165                         if (fc & PAUSE_RX)
 1166                                 lc->advertising |= ADVERTISED_Pause;
 1167                 }
 1168                 phy->ops->advertise(phy, lc->advertising);
 1169 
 1170                 if (lc->autoneg == AUTONEG_DISABLE) {
 1171                         lc->speed = lc->requested_speed;
 1172                         lc->duplex = lc->requested_duplex;
 1173                         lc->fc = (unsigned char)fc;
 1174                         t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
 1175                                                    fc);
 1176                         /* Also disables autoneg */
 1177                         phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
 1178                         phy->ops->reset(phy, 0);
 1179                 } else
 1180                         phy->ops->autoneg_enable(phy);
 1181         } else {
 1182                 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
 1183                 lc->fc = (unsigned char)fc;
 1184                 phy->ops->reset(phy, 0);
 1185         }
 1186         return 0;
 1187 }
 1188 
 1189 /**
 1190  *      t3_set_vlan_accel - control HW VLAN extraction
 1191  *      @adapter: the adapter
 1192  *      @ports: bitmap of adapter ports to operate on
 1193  *      @on: enable (1) or disable (0) HW VLAN extraction
 1194  *
 1195  *      Enables or disables HW extraction of VLAN tags for the given port.
 1196  */
 1197 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
 1198 {
 1199         t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
 1200                          ports << S_VLANEXTRACTIONENABLE,
 1201                          on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
 1202 }
 1203 
 1204 struct intr_info {
 1205         unsigned int mask;       /* bits to check in interrupt status */
 1206         const char *msg;         /* message to print or NULL */
 1207         short stat_idx;          /* stat counter to increment or -1 */
 1208         unsigned short fatal:1;  /* whether the condition reported is fatal */
 1209 };
 1210 
 1211 /**
 1212  *      t3_handle_intr_status - table driven interrupt handler
 1213  *      @adapter: the adapter that generated the interrupt
 1214  *      @reg: the interrupt status register to process
 1215  *      @mask: a mask to apply to the interrupt status
 1216  *      @acts: table of interrupt actions
 1217  *      @stats: statistics counters tracking interrupt occurences
 1218  *
 1219  *      A table driven interrupt handler that applies a set of masks to an
 1220  *      interrupt status word and performs the corresponding actions if the
 1221  *      interrupts described by the mask have occured.  The actions include
 1222  *      optionally printing a warning or alert message, and optionally
 1223  *      incrementing a stat counter.  The table is terminated by an entry
 1224  *      specifying mask 0.  Returns the number of fatal interrupt conditions.
 1225  */
 1226 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
 1227                                  unsigned int mask,
 1228                                  const struct intr_info *acts,
 1229                                  unsigned long *stats)
 1230 {
 1231         int fatal = 0;
 1232         unsigned int status = t3_read_reg(adapter, reg) & mask;
 1233 
 1234         for ( ; acts->mask; ++acts) {
 1235                 if (!(status & acts->mask)) continue;
 1236                 if (acts->fatal) {
 1237                         fatal++;
 1238                         CH_ALERT(adapter, "%s (0x%x)\n",
 1239                                  acts->msg, status & acts->mask);
 1240                 } else if (acts->msg)
 1241                         CH_WARN(adapter, "%s (0x%x)\n",
 1242                                 acts->msg, status & acts->mask);
 1243                 if (acts->stat_idx >= 0)
 1244                         stats[acts->stat_idx]++;
 1245         }
 1246         if (status)                           /* clear processed interrupts */
 1247                 t3_write_reg(adapter, reg, status);
 1248         return fatal;
 1249 }
 1250 
 1251 #define SGE_INTR_MASK (F_RSPQDISABLED)
 1252 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
 1253                        F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
 1254                        F_NFASRCHFAIL)
 1255 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
 1256 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
 1257                        V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
 1258                        F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
 1259 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
 1260                         F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
 1261                         F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
 1262                         F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
 1263                         V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
 1264                         V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
 1265 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
 1266                         F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
 1267                         /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
 1268                         V_BISTERR(M_BISTERR) | F_PEXERR)
 1269 #define ULPRX_INTR_MASK F_PARERR
 1270 #define ULPTX_INTR_MASK 0
 1271 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
 1272                          F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
 1273                          F_ZERO_SWITCH_ERROR)
 1274 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
 1275                        F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
 1276                        F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
 1277                        F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
 1278 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
 1279                         V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
 1280                         V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
 1281 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
 1282                         V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
 1283                         V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
 1284 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
 1285                        V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
 1286                        V_RXTPPARERRENB(M_RXTPPARERRENB) | \
 1287                        V_MCAPARERRENB(M_MCAPARERRENB))
 1288 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
 1289                       F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
 1290                       F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
 1291                       F_MPS0 | F_CPL_SWITCH)
 1292 
 1293 /*
 1294  * Interrupt handler for the PCIX1 module.
 1295  */
 1296 static void pci_intr_handler(adapter_t *adapter)
 1297 {
 1298         static struct intr_info pcix1_intr_info[] = {
 1299                 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
 1300                 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
 1301                 { F_RCVTARABT, "PCI received target abort", -1, 1 },
 1302                 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
 1303                 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
 1304                 { F_DETPARERR, "PCI detected parity error", -1, 1 },
 1305                 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
 1306                 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
 1307                 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
 1308                   1 },
 1309                 { F_DETCORECCERR, "PCI correctable ECC error",
 1310                   STAT_PCI_CORR_ECC, 0 },
 1311                 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
 1312                 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
 1313                 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
 1314                   1 },
 1315                 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
 1316                   1 },
 1317                 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
 1318                   1 },
 1319                 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
 1320                   "error", -1, 1 },
 1321                 { 0 }
 1322         };
 1323 
 1324         if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
 1325                                   pcix1_intr_info, adapter->irq_stats))
 1326                 t3_fatal_err(adapter);
 1327 }
 1328 
 1329 /*
 1330  * Interrupt handler for the PCIE module.
 1331  */
 1332 static void pcie_intr_handler(adapter_t *adapter)
 1333 {
 1334         static struct intr_info pcie_intr_info[] = {
 1335                 { F_PEXERR, "PCI PEX error", -1, 1 },
 1336                 { F_UNXSPLCPLERRR,
 1337                   "PCI unexpected split completion DMA read error", -1, 1 },
 1338                 { F_UNXSPLCPLERRC,
 1339                   "PCI unexpected split completion DMA command error", -1, 1 },
 1340                 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
 1341                 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
 1342                 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
 1343                 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
 1344                 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
 1345                   "PCI MSI-X table/PBA parity error", -1, 1 },
 1346                 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
 1347                 { 0 }
 1348         };
 1349 
 1350         if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
 1351                 CH_ALERT(adapter, "PEX error code 0x%x\n",
 1352                          t3_read_reg(adapter, A_PCIE_PEX_ERR));
 1353 
 1354         if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
 1355                                   pcie_intr_info, adapter->irq_stats))
 1356                 t3_fatal_err(adapter);
 1357 }
 1358 
 1359 /*
 1360  * TP interrupt handler.
 1361  */
 1362 static void tp_intr_handler(adapter_t *adapter)
 1363 {
 1364         static struct intr_info tp_intr_info[] = {
 1365                 { 0xffffff,  "TP parity error", -1, 1 },
 1366                 { 0x1000000, "TP out of Rx pages", -1, 1 },
 1367                 { 0x2000000, "TP out of Tx pages", -1, 1 },
 1368                 { 0 }
 1369         };
 1370 
 1371         if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
 1372                                   tp_intr_info, NULL))
 1373                 t3_fatal_err(adapter);
 1374 }
 1375 
 1376 /*
 1377  * CIM interrupt handler.
 1378  */
 1379 static void cim_intr_handler(adapter_t *adapter)
 1380 {
 1381         static struct intr_info cim_intr_info[] = {
 1382                 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
 1383                 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
 1384                 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
 1385                 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
 1386                 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
 1387                 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
 1388                 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
 1389                 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
 1390                 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
 1391                 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
 1392                 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
 1393                 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
 1394                 { 0 }
 1395         };
 1396 
 1397         if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
 1398                                   cim_intr_info, NULL))
 1399                 t3_fatal_err(adapter);
 1400 }
 1401 
 1402 /*
 1403  * ULP RX interrupt handler.
 1404  */
 1405 static void ulprx_intr_handler(adapter_t *adapter)
 1406 {
 1407         static struct intr_info ulprx_intr_info[] = {
 1408                 { F_PARERR, "ULP RX parity error", -1, 1 },
 1409                 { 0 }
 1410         };
 1411 
 1412         if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
 1413                                   ulprx_intr_info, NULL))
 1414                 t3_fatal_err(adapter);
 1415 }
 1416 
 1417 /*
 1418  * ULP TX interrupt handler.
 1419  */
 1420 static void ulptx_intr_handler(adapter_t *adapter)
 1421 {
 1422         static struct intr_info ulptx_intr_info[] = {
 1423                 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
 1424                   STAT_ULP_CH0_PBL_OOB, 0 },
 1425                 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
 1426                   STAT_ULP_CH1_PBL_OOB, 0 },
 1427                 { 0 }
 1428         };
 1429 
 1430         if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
 1431                                   ulptx_intr_info, adapter->irq_stats))
 1432                 t3_fatal_err(adapter);
 1433 }
 1434 
 1435 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
 1436         F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
 1437         F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
 1438         F_ICSPI1_TX_FRAMING_ERROR)
 1439 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
 1440         F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
 1441         F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
 1442         F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
 1443 
 1444 /*
 1445  * PM TX interrupt handler.
 1446  */
 1447 static void pmtx_intr_handler(adapter_t *adapter)
 1448 {
 1449         static struct intr_info pmtx_intr_info[] = {
 1450                 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
 1451                 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
 1452                 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
 1453                 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
 1454                   "PMTX ispi parity error", -1, 1 },
 1455                 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
 1456                   "PMTX ospi parity error", -1, 1 },
 1457                 { 0 }
 1458         };
 1459 
 1460         if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
 1461                                   pmtx_intr_info, NULL))
 1462                 t3_fatal_err(adapter);
 1463 }
 1464 
 1465 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
 1466         F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
 1467         F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
 1468         F_IESPI1_TX_FRAMING_ERROR)
 1469 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
 1470         F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
 1471         F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
 1472         F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
 1473 
 1474 /*
 1475  * PM RX interrupt handler.
 1476  */
 1477 static void pmrx_intr_handler(adapter_t *adapter)
 1478 {
 1479         static struct intr_info pmrx_intr_info[] = {
 1480                 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
 1481                 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
 1482                 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
 1483                 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
 1484                   "PMRX ispi parity error", -1, 1 },
 1485                 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
 1486                   "PMRX ospi parity error", -1, 1 },
 1487                 { 0 }
 1488         };
 1489 
 1490         if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
 1491                                   pmrx_intr_info, NULL))
 1492                 t3_fatal_err(adapter);
 1493 }
 1494 
 1495 /*
 1496  * CPL switch interrupt handler.
 1497  */
 1498 static void cplsw_intr_handler(adapter_t *adapter)
 1499 {
 1500         static struct intr_info cplsw_intr_info[] = {
 1501 //              { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
 1502                 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
 1503                 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
 1504                 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
 1505                 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
 1506                 { 0 }
 1507         };
 1508 
 1509         if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
 1510                                   cplsw_intr_info, NULL))
 1511                 t3_fatal_err(adapter);
 1512 }
 1513 
 1514 /*
 1515  * MPS interrupt handler.
 1516  */
 1517 static void mps_intr_handler(adapter_t *adapter)
 1518 {
 1519         static struct intr_info mps_intr_info[] = {
 1520                 { 0x1ff, "MPS parity error", -1, 1 },
 1521                 { 0 }
 1522         };
 1523 
 1524         if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
 1525                                   mps_intr_info, NULL))
 1526                 t3_fatal_err(adapter);
 1527 }
 1528 
 1529 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
 1530 
 1531 /*
 1532  * MC7 interrupt handler.
 1533  */
 1534 static void mc7_intr_handler(struct mc7 *mc7)
 1535 {
 1536         adapter_t *adapter = mc7->adapter;
 1537         u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
 1538 
 1539         if (cause & F_CE) {
 1540                 mc7->stats.corr_err++;
 1541                 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
 1542                         "data 0x%x 0x%x 0x%x\n", mc7->name,
 1543                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
 1544                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
 1545                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
 1546                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
 1547         }
 1548 
 1549         if (cause & F_UE) {
 1550                 mc7->stats.uncorr_err++;
 1551                 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
 1552                          "data 0x%x 0x%x 0x%x\n", mc7->name,
 1553                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
 1554                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
 1555                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
 1556                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
 1557         }
 1558 
 1559         if (G_PE(cause)) {
 1560                 mc7->stats.parity_err++;
 1561                 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
 1562                          mc7->name, G_PE(cause));
 1563         }
 1564 
 1565         if (cause & F_AE) {
 1566                 u32 addr = 0;
 1567 
 1568                 if (adapter->params.rev > 0)
 1569                         addr = t3_read_reg(adapter,
 1570                                            mc7->offset + A_MC7_ERR_ADDR);
 1571                 mc7->stats.addr_err++;
 1572                 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
 1573                          mc7->name, addr);
 1574         }
 1575 
 1576         if (cause & MC7_INTR_FATAL)
 1577                 t3_fatal_err(adapter);
 1578 
 1579         t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
 1580 }
 1581 
 1582 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
 1583                         V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
 1584 /*
 1585  * XGMAC interrupt handler.
 1586  */
 1587 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
 1588 {
 1589         u32 cause;
 1590         struct cmac *mac;
 1591 
 1592         idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
 1593         mac = &adap2pinfo(adap, idx)->mac;
 1594         cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
 1595 
 1596         if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
 1597                 mac->stats.tx_fifo_parity_err++;
 1598                 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
 1599         }
 1600         if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
 1601                 mac->stats.rx_fifo_parity_err++;
 1602                 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
 1603         }
 1604         if (cause & F_TXFIFO_UNDERRUN)
 1605                 mac->stats.tx_fifo_urun++;
 1606         if (cause & F_RXFIFO_OVERFLOW)
 1607                 mac->stats.rx_fifo_ovfl++;
 1608         if (cause & V_SERDES_LOS(M_SERDES_LOS))
 1609                 mac->stats.serdes_signal_loss++;
 1610         if (cause & F_XAUIPCSCTCERR)
 1611                 mac->stats.xaui_pcs_ctc_err++;
 1612         if (cause & F_XAUIPCSALIGNCHANGE)
 1613                 mac->stats.xaui_pcs_align_change++;
 1614 
 1615         t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
 1616         if (cause & XGM_INTR_FATAL)
 1617                 t3_fatal_err(adap);
 1618         return cause != 0;
 1619 }
 1620 
 1621 /*
 1622  * Interrupt handler for PHY events.
 1623  */
 1624 int t3_phy_intr_handler(adapter_t *adapter)
 1625 {
 1626         u32 mask, gpi = adapter_info(adapter)->gpio_intr;
 1627         u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
 1628 
 1629         for_each_port(adapter, i) {
 1630                 struct port_info *p = adap2pinfo(adapter, i);
 1631 
 1632                 mask = gpi - (gpi & (gpi - 1));
 1633                 gpi -= mask;
 1634 
 1635                 if (!(p->port_type->caps & SUPPORTED_IRQ))
 1636                         continue;
 1637 
 1638                 if (cause & mask) {
 1639                         int phy_cause = p->phy.ops->intr_handler(&p->phy);
 1640 
 1641                         if (phy_cause & cphy_cause_link_change)
 1642                                 t3_link_changed(adapter, i);
 1643                         if (phy_cause & cphy_cause_fifo_error)
 1644                                 p->phy.fifo_errors++;
 1645                 }
 1646         }
 1647 
 1648         t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
 1649         return 0;
 1650 }
 1651 
 1652 /**
 1653  *      t3_slow_intr_handler - control path interrupt handler
 1654  *      @adapter: the adapter
 1655  *
 1656  *      T3 interrupt handler for non-data interrupt events, e.g., errors.
 1657  *      The designation 'slow' is because it involves register reads, while
 1658  *      data interrupts typically don't involve any MMIOs.
 1659  */
 1660 int t3_slow_intr_handler(adapter_t *adapter)
 1661 {
 1662         u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
 1663 
 1664         cause &= adapter->slow_intr_mask;
 1665         if (!cause)
 1666                 return 0;
 1667         if (cause & F_PCIM0) {
 1668                 if (is_pcie(adapter))
 1669                         pcie_intr_handler(adapter);
 1670                 else
 1671                         pci_intr_handler(adapter);
 1672         }
 1673         if (cause & F_SGE3)
 1674                 t3_sge_err_intr_handler(adapter);
 1675         if (cause & F_MC7_PMRX)
 1676                 mc7_intr_handler(&adapter->pmrx);
 1677         if (cause & F_MC7_PMTX)
 1678                 mc7_intr_handler(&adapter->pmtx);
 1679         if (cause & F_MC7_CM)
 1680                 mc7_intr_handler(&adapter->cm);
 1681         if (cause & F_CIM)
 1682                 cim_intr_handler(adapter);
 1683         if (cause & F_TP1)
 1684                 tp_intr_handler(adapter);
 1685         if (cause & F_ULP2_RX)
 1686                 ulprx_intr_handler(adapter);
 1687         if (cause & F_ULP2_TX)
 1688                 ulptx_intr_handler(adapter);
 1689         if (cause & F_PM1_RX)
 1690                 pmrx_intr_handler(adapter);
 1691         if (cause & F_PM1_TX)
 1692                 pmtx_intr_handler(adapter);
 1693         if (cause & F_CPL_SWITCH)
 1694                 cplsw_intr_handler(adapter);
 1695         if (cause & F_MPS0)
 1696                 mps_intr_handler(adapter);
 1697         if (cause & F_MC5A)
 1698                 t3_mc5_intr_handler(&adapter->mc5);
 1699         if (cause & F_XGMAC0_0)
 1700                 mac_intr_handler(adapter, 0);
 1701         if (cause & F_XGMAC0_1)
 1702                 mac_intr_handler(adapter, 1);
 1703         if (cause & F_T3DBG)
 1704                 t3_os_ext_intr_handler(adapter);
 1705 
 1706         /* Clear the interrupts just processed. */
 1707         t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
 1708         (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
 1709         return 1;
 1710 }
 1711 
 1712 /**
 1713  *      t3_intr_enable - enable interrupts
 1714  *      @adapter: the adapter whose interrupts should be enabled
 1715  *
 1716  *      Enable interrupts by setting the interrupt enable registers of the
 1717  *      various HW modules and then enabling the top-level interrupt
 1718  *      concentrator.
 1719  */
 1720 void t3_intr_enable(adapter_t *adapter)
 1721 {
 1722         static struct addr_val_pair intr_en_avp[] = {
 1723                 { A_SG_INT_ENABLE, SGE_INTR_MASK },
 1724                 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
 1725                 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
 1726                         MC7_INTR_MASK },
 1727                 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
 1728                         MC7_INTR_MASK },
 1729                 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
 1730                 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
 1731                 { A_TP_INT_ENABLE, 0x3bfffff },
 1732                 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
 1733                 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
 1734                 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
 1735                 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
 1736         };
 1737 
 1738         adapter->slow_intr_mask = PL_INTR_MASK;
 1739 
 1740         t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
 1741 
 1742         if (adapter->params.rev > 0) {
 1743                 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
 1744                              CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
 1745                 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
 1746                              ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
 1747                              F_PBL_BOUND_ERR_CH1);
 1748         } else {
 1749                 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
 1750                 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
 1751         }
 1752 
 1753         t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
 1754                      adapter_info(adapter)->gpio_intr);
 1755         t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
 1756                      adapter_info(adapter)->gpio_intr);
 1757         if (is_pcie(adapter))
 1758                 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
 1759         else
 1760                 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
 1761         t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
 1762         (void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
 1763 }
 1764 
 1765 /**
 1766  *      t3_intr_disable - disable a card's interrupts
 1767  *      @adapter: the adapter whose interrupts should be disabled
 1768  *
 1769  *      Disable interrupts.  We only disable the top-level interrupt
 1770  *      concentrator and the SGE data interrupts.
 1771  */
 1772 void t3_intr_disable(adapter_t *adapter)
 1773 {
 1774         t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
 1775         (void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
 1776         adapter->slow_intr_mask = 0;
 1777 }
 1778 
 1779 /**
 1780  *      t3_intr_clear - clear all interrupts
 1781  *      @adapter: the adapter whose interrupts should be cleared
 1782  *
 1783  *      Clears all interrupts.
 1784  */
 1785 void t3_intr_clear(adapter_t *adapter)
 1786 {
 1787         static const unsigned int cause_reg_addr[] = {
 1788                 A_SG_INT_CAUSE,
 1789                 A_SG_RSPQ_FL_STATUS,
 1790                 A_PCIX_INT_CAUSE,
 1791                 A_MC7_INT_CAUSE,
 1792                 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
 1793                 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
 1794                 A_CIM_HOST_INT_CAUSE,
 1795                 A_TP_INT_CAUSE,
 1796                 A_MC5_DB_INT_CAUSE,
 1797                 A_ULPRX_INT_CAUSE,
 1798                 A_ULPTX_INT_CAUSE,
 1799                 A_CPL_INTR_CAUSE,
 1800                 A_PM1_TX_INT_CAUSE,
 1801                 A_PM1_RX_INT_CAUSE,
 1802                 A_MPS_INT_CAUSE,
 1803                 A_T3DBG_INT_CAUSE,
 1804         };
 1805         unsigned int i;
 1806 
 1807         /* Clear PHY and MAC interrupts for each port. */
 1808         for_each_port(adapter, i)
 1809                 t3_port_intr_clear(adapter, i);
 1810 
 1811         for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
 1812                 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
 1813 
 1814         if (is_pcie(adapter))
 1815                 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
 1816         t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
 1817         (void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
 1818 }
 1819 
 1820 /**
 1821  *      t3_port_intr_enable - enable port-specific interrupts
 1822  *      @adapter: associated adapter
 1823  *      @idx: index of port whose interrupts should be enabled
 1824  *
 1825  *      Enable port-specific (i.e., MAC and PHY) interrupts for the given
 1826  *      adapter port.
 1827  */
 1828 void t3_port_intr_enable(adapter_t *adapter, int idx)
 1829 {
 1830         struct port_info *pi = adap2pinfo(adapter, idx);
 1831 
 1832         t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
 1833         pi->phy.ops->intr_enable(&pi->phy);
 1834 }
 1835 
 1836 /**
 1837  *      t3_port_intr_disable - disable port-specific interrupts
 1838  *      @adapter: associated adapter
 1839  *      @idx: index of port whose interrupts should be disabled
 1840  *
 1841  *      Disable port-specific (i.e., MAC and PHY) interrupts for the given
 1842  *      adapter port.
 1843  */
 1844 void t3_port_intr_disable(adapter_t *adapter, int idx)
 1845 {
 1846         struct port_info *pi = adap2pinfo(adapter, idx);
 1847 
 1848         t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
 1849         pi->phy.ops->intr_disable(&pi->phy);
 1850 }
 1851 
 1852 /**
 1853  *      t3_port_intr_clear - clear port-specific interrupts
 1854  *      @adapter: associated adapter
 1855  *      @idx: index of port whose interrupts to clear
 1856  *
 1857  *      Clear port-specific (i.e., MAC and PHY) interrupts for the given
 1858  *      adapter port.
 1859  */
 1860 void t3_port_intr_clear(adapter_t *adapter, int idx)
 1861 {
 1862         struct port_info *pi = adap2pinfo(adapter, idx);
 1863 
 1864         t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
 1865         pi->phy.ops->intr_clear(&pi->phy);
 1866 }
 1867 
 1868 #define SG_CONTEXT_CMD_ATTEMPTS 100
 1869 
 1870 /**
 1871  *      t3_sge_write_context - write an SGE context
 1872  *      @adapter: the adapter
 1873  *      @id: the context id
 1874  *      @type: the context type
 1875  *
 1876  *      Program an SGE context with the values already loaded in the
 1877  *      CONTEXT_DATA? registers.
 1878  */
 1879 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
 1880                                 unsigned int type)
 1881 {
 1882         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
 1883         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
 1884         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
 1885         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
 1886         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
 1887                      V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
 1888         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
 1889                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
 1890 }
 1891 
 1892 /**
 1893  *      t3_sge_init_ecntxt - initialize an SGE egress context
 1894  *      @adapter: the adapter to configure
 1895  *      @id: the context id
 1896  *      @gts_enable: whether to enable GTS for the context
 1897  *      @type: the egress context type
 1898  *      @respq: associated response queue
 1899  *      @base_addr: base address of queue
 1900  *      @size: number of queue entries
 1901  *      @token: uP token
 1902  *      @gen: initial generation value for the context
 1903  *      @cidx: consumer pointer
 1904  *
 1905  *      Initialize an SGE egress context and make it ready for use.  If the
 1906  *      platform allows concurrent context operations, the caller is
 1907  *      responsible for appropriate locking.
 1908  */
 1909 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
 1910                        enum sge_context_type type, int respq, u64 base_addr,
 1911                        unsigned int size, unsigned int token, int gen,
 1912                        unsigned int cidx)
 1913 {
 1914         unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
 1915 
 1916         if (base_addr & 0xfff)     /* must be 4K aligned */
 1917                 return -EINVAL;
 1918         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 1919                 return -EBUSY;
 1920 
 1921         base_addr >>= 12;
 1922         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
 1923                      V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
 1924         t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
 1925                      V_EC_BASE_LO((u32)base_addr & 0xffff));
 1926         base_addr >>= 16;
 1927         t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
 1928         base_addr >>= 32;
 1929         t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
 1930                      V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
 1931                      V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
 1932                      F_EC_VALID);
 1933         return t3_sge_write_context(adapter, id, F_EGRESS);
 1934 }
 1935 
 1936 /**
 1937  *      t3_sge_init_flcntxt - initialize an SGE free-buffer list context
 1938  *      @adapter: the adapter to configure
 1939  *      @id: the context id
 1940  *      @gts_enable: whether to enable GTS for the context
 1941  *      @base_addr: base address of queue
 1942  *      @size: number of queue entries
 1943  *      @bsize: size of each buffer for this queue
 1944  *      @cong_thres: threshold to signal congestion to upstream producers
 1945  *      @gen: initial generation value for the context
 1946  *      @cidx: consumer pointer
 1947  *
 1948  *      Initialize an SGE free list context and make it ready for use.  The
 1949  *      caller is responsible for ensuring only one context operation occurs
 1950  *      at a time.
 1951  */
 1952 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
 1953                         u64 base_addr, unsigned int size, unsigned int bsize,
 1954                         unsigned int cong_thres, int gen, unsigned int cidx)
 1955 {
 1956         if (base_addr & 0xfff)     /* must be 4K aligned */
 1957                 return -EINVAL;
 1958         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 1959                 return -EBUSY;
 1960 
 1961         base_addr >>= 12;
 1962         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
 1963         base_addr >>= 32;
 1964         t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
 1965                      V_FL_BASE_HI((u32)base_addr) |
 1966                      V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
 1967         t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
 1968                      V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
 1969                      V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
 1970         t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
 1971                      V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
 1972                      V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
 1973         return t3_sge_write_context(adapter, id, F_FREELIST);
 1974 }
 1975 
 1976 /**
 1977  *      t3_sge_init_rspcntxt - initialize an SGE response queue context
 1978  *      @adapter: the adapter to configure
 1979  *      @id: the context id
 1980  *      @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
 1981  *      @base_addr: base address of queue
 1982  *      @size: number of queue entries
 1983  *      @fl_thres: threshold for selecting the normal or jumbo free list
 1984  *      @gen: initial generation value for the context
 1985  *      @cidx: consumer pointer
 1986  *
 1987  *      Initialize an SGE response queue context and make it ready for use.
 1988  *      The caller is responsible for ensuring only one context operation
 1989  *      occurs at a time.
 1990  */
 1991 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
 1992                          u64 base_addr, unsigned int size,
 1993                          unsigned int fl_thres, int gen, unsigned int cidx)
 1994 {
 1995         unsigned int intr = 0;
 1996 
 1997         if (base_addr & 0xfff)     /* must be 4K aligned */
 1998                 return -EINVAL;
 1999         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2000                 return -EBUSY;
 2001 
 2002         base_addr >>= 12;
 2003         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
 2004                      V_CQ_INDEX(cidx));
 2005         t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
 2006         base_addr >>= 32;
 2007         if (irq_vec_idx >= 0)
 2008                 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
 2009         t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
 2010                      V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
 2011         t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
 2012         return t3_sge_write_context(adapter, id, F_RESPONSEQ);
 2013 }
 2014 
 2015 /**
 2016  *      t3_sge_init_cqcntxt - initialize an SGE completion queue context
 2017  *      @adapter: the adapter to configure
 2018  *      @id: the context id
 2019  *      @base_addr: base address of queue
 2020  *      @size: number of queue entries
 2021  *      @rspq: response queue for async notifications
 2022  *      @ovfl_mode: CQ overflow mode
 2023  *      @credits: completion queue credits
 2024  *      @credit_thres: the credit threshold
 2025  *
 2026  *      Initialize an SGE completion queue context and make it ready for use.
 2027  *      The caller is responsible for ensuring only one context operation
 2028  *      occurs at a time.
 2029  */
 2030 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
 2031                         unsigned int size, int rspq, int ovfl_mode,
 2032                         unsigned int credits, unsigned int credit_thres)
 2033 {
 2034         if (base_addr & 0xfff)     /* must be 4K aligned */
 2035                 return -EINVAL;
 2036         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2037                 return -EBUSY;
 2038 
 2039         base_addr >>= 12;
 2040         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
 2041         t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
 2042         base_addr >>= 32;
 2043         t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
 2044                      V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
 2045                      V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
 2046                      V_CQ_ERR(ovfl_mode));
 2047         t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
 2048                      V_CQ_CREDIT_THRES(credit_thres));
 2049         return t3_sge_write_context(adapter, id, F_CQ);
 2050 }
 2051 
 2052 /**
 2053  *      t3_sge_enable_ecntxt - enable/disable an SGE egress context
 2054  *      @adapter: the adapter
 2055  *      @id: the egress context id
 2056  *      @enable: enable (1) or disable (0) the context
 2057  *
 2058  *      Enable or disable an SGE egress context.  The caller is responsible for
 2059  *      ensuring only one context operation occurs at a time.
 2060  */
 2061 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
 2062 {
 2063         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2064                 return -EBUSY;
 2065 
 2066         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
 2067         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
 2068         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
 2069         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
 2070         t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
 2071         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
 2072                      V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
 2073         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
 2074                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
 2075 }
 2076 
 2077 /**
 2078  *      t3_sge_disable_fl - disable an SGE free-buffer list
 2079  *      @adapter: the adapter
 2080  *      @id: the free list context id
 2081  *
 2082  *      Disable an SGE free-buffer list.  The caller is responsible for
 2083  *      ensuring only one context operation occurs at a time.
 2084  */
 2085 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
 2086 {
 2087         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2088                 return -EBUSY;
 2089 
 2090         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
 2091         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
 2092         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
 2093         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
 2094         t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
 2095         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
 2096                      V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
 2097         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
 2098                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
 2099 }
 2100 
 2101 /**
 2102  *      t3_sge_disable_rspcntxt - disable an SGE response queue
 2103  *      @adapter: the adapter
 2104  *      @id: the response queue context id
 2105  *
 2106  *      Disable an SGE response queue.  The caller is responsible for
 2107  *      ensuring only one context operation occurs at a time.
 2108  */
 2109 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
 2110 {
 2111         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2112                 return -EBUSY;
 2113 
 2114         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
 2115         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
 2116         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
 2117         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
 2118         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
 2119         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
 2120                      V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
 2121         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
 2122                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
 2123 }
 2124 
 2125 /**
 2126  *      t3_sge_disable_cqcntxt - disable an SGE completion queue
 2127  *      @adapter: the adapter
 2128  *      @id: the completion queue context id
 2129  *
 2130  *      Disable an SGE completion queue.  The caller is responsible for
 2131  *      ensuring only one context operation occurs at a time.
 2132  */
 2133 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
 2134 {
 2135         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2136                 return -EBUSY;
 2137 
 2138         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
 2139         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
 2140         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
 2141         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
 2142         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
 2143         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
 2144                      V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
 2145         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
 2146                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
 2147 }
 2148 
 2149 /**
 2150  *      t3_sge_cqcntxt_op - perform an operation on a completion queue context
 2151  *      @adapter: the adapter
 2152  *      @id: the context id
 2153  *      @op: the operation to perform
 2154  *      @credits: credits to return to the CQ
 2155  *
 2156  *      Perform the selected operation on an SGE completion queue context.
 2157  *      The caller is responsible for ensuring only one context operation
 2158  *      occurs at a time.
 2159  *
 2160  *      For most operations the function returns the current HW position in
 2161  *      the completion queue.
 2162  */
 2163 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
 2164                       unsigned int credits)
 2165 {
 2166         u32 val;
 2167 
 2168         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2169                 return -EBUSY;
 2170 
 2171         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
 2172         t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
 2173                      V_CONTEXT(id) | F_CQ);
 2174         if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
 2175                                 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
 2176                 return -EIO;
 2177 
 2178         if (op >= 2 && op < 7) {
 2179                 if (adapter->params.rev > 0)
 2180                         return G_CQ_INDEX(val);
 2181 
 2182                 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
 2183                              V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
 2184                 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
 2185                                     F_CONTEXT_CMD_BUSY, 0,
 2186                                     SG_CONTEXT_CMD_ATTEMPTS, 1))
 2187                         return -EIO;
 2188                 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
 2189         }
 2190         return 0;
 2191 }
 2192 
 2193 /**
 2194  *      t3_sge_read_context - read an SGE context
 2195  *      @type: the context type
 2196  *      @adapter: the adapter
 2197  *      @id: the context id
 2198  *      @data: holds the retrieved context
 2199  *
 2200  *      Read an SGE egress context.  The caller is responsible for ensuring
 2201  *      only one context operation occurs at a time.
 2202  */
 2203 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
 2204                                unsigned int id, u32 data[4])
 2205 {
 2206         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
 2207                 return -EBUSY;
 2208 
 2209         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
 2210                      V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
 2211         if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
 2212                             SG_CONTEXT_CMD_ATTEMPTS, 1))
 2213                 return -EIO;
 2214         data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
 2215         data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
 2216         data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
 2217         data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
 2218         return 0;
 2219 }
 2220 
 2221 /**
 2222  *      t3_sge_read_ecntxt - read an SGE egress context
 2223  *      @adapter: the adapter
 2224  *      @id: the context id
 2225  *      @data: holds the retrieved context
 2226  *
 2227  *      Read an SGE egress context.  The caller is responsible for ensuring
 2228  *      only one context operation occurs at a time.
 2229  */
 2230 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
 2231 {
 2232         if (id >= 65536)
 2233                 return -EINVAL;
 2234         return t3_sge_read_context(F_EGRESS, adapter, id, data);
 2235 }
 2236 
 2237 /**
 2238  *      t3_sge_read_cq - read an SGE CQ context
 2239  *      @adapter: the adapter
 2240  *      @id: the context id
 2241  *      @data: holds the retrieved context
 2242  *
 2243  *      Read an SGE CQ context.  The caller is responsible for ensuring
 2244  *      only one context operation occurs at a time.
 2245  */
 2246 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
 2247 {
 2248         if (id >= 65536)
 2249                 return -EINVAL;
 2250         return t3_sge_read_context(F_CQ, adapter, id, data);
 2251 }
 2252 
 2253 /**
 2254  *      t3_sge_read_fl - read an SGE free-list context
 2255  *      @adapter: the adapter
 2256  *      @id: the context id
 2257  *      @data: holds the retrieved context
 2258  *
 2259  *      Read an SGE free-list context.  The caller is responsible for ensuring
 2260  *      only one context operation occurs at a time.
 2261  */
 2262 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
 2263 {
 2264         if (id >= SGE_QSETS * 2)
 2265                 return -EINVAL;
 2266         return t3_sge_read_context(F_FREELIST, adapter, id, data);
 2267 }
 2268 
 2269 /**
 2270  *      t3_sge_read_rspq - read an SGE response queue context
 2271  *      @adapter: the adapter
 2272  *      @id: the context id
 2273  *      @data: holds the retrieved context
 2274  *
 2275  *      Read an SGE response queue context.  The caller is responsible for
 2276  *      ensuring only one context operation occurs at a time.
 2277  */
 2278 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
 2279 {
 2280         if (id >= SGE_QSETS)
 2281                 return -EINVAL;
 2282         return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
 2283 }
 2284 
 2285 /**
 2286  *      t3_config_rss - configure Rx packet steering
 2287  *      @adapter: the adapter
 2288  *      @rss_config: RSS settings (written to TP_RSS_CONFIG)
 2289  *      @cpus: values for the CPU lookup table (0xff terminated)
 2290  *      @rspq: values for the response queue lookup table (0xffff terminated)
 2291  *
 2292  *      Programs the receive packet steering logic.  @cpus and @rspq provide
 2293  *      the values for the CPU and response queue lookup tables.  If they
 2294  *      provide fewer values than the size of the tables the supplied values
 2295  *      are used repeatedly until the tables are fully populated.
 2296  */
 2297 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
 2298                    const u16 *rspq)
 2299 {
 2300         int i, j, cpu_idx = 0, q_idx = 0;
 2301 
 2302         if (cpus)
 2303                 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
 2304                         u32 val = i << 16;
 2305 
 2306                         for (j = 0; j < 2; ++j) {
 2307                                 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
 2308                                 if (cpus[cpu_idx] == 0xff)
 2309                                         cpu_idx = 0;
 2310                         }
 2311                         t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
 2312                 }
 2313 
 2314         if (rspq)
 2315                 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
 2316                         t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
 2317                                      (i << 16) | rspq[q_idx++]);
 2318                         if (rspq[q_idx] == 0xffff)
 2319                                 q_idx = 0;
 2320                 }
 2321 
 2322         t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
 2323 }
 2324 
 2325 /**
 2326  *      t3_read_rss - read the contents of the RSS tables
 2327  *      @adapter: the adapter
 2328  *      @lkup: holds the contents of the RSS lookup table
 2329  *      @map: holds the contents of the RSS map table
 2330  *
 2331  *      Reads the contents of the receive packet steering tables.
 2332  */
 2333 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
 2334 {
 2335         int i;
 2336         u32 val;
 2337 
 2338         if (lkup)
 2339                 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
 2340                         t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
 2341                                      0xffff0000 | i);
 2342                         val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
 2343                         if (!(val & 0x80000000))
 2344                                 return -EAGAIN;
 2345                         *lkup++ = (u8)val;
 2346                         *lkup++ = (u8)(val >> 8);
 2347                 }
 2348 
 2349         if (map)
 2350                 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
 2351                         t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
 2352                                      0xffff0000 | i);
 2353                         val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
 2354                         if (!(val & 0x80000000))
 2355                                 return -EAGAIN;
 2356                         *map++ = (u16)val;
 2357                 }
 2358         return 0;
 2359 }
 2360 
 2361 /**
 2362  *      t3_tp_set_offload_mode - put TP in NIC/offload mode
 2363  *      @adap: the adapter
 2364  *      @enable: 1 to select offload mode, 0 for regular NIC
 2365  *
 2366  *      Switches TP to NIC/offload mode.
 2367  */
 2368 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
 2369 {
 2370         if (is_offload(adap) || !enable)
 2371                 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
 2372                                  V_NICMODE(!enable));
 2373 }
 2374 
 2375 /**
 2376  *      tp_wr_bits_indirect - set/clear bits in an indirect TP register
 2377  *      @adap: the adapter
 2378  *      @addr: the indirect TP register address
 2379  *      @mask: specifies the field within the register to modify
 2380  *      @val: new value for the field
 2381  *
 2382  *      Sets a field of an indirect TP register to the given value.
 2383  */
 2384 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
 2385                                 unsigned int mask, unsigned int val)
 2386 {
 2387         t3_write_reg(adap, A_TP_PIO_ADDR, addr);
 2388         val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
 2389         t3_write_reg(adap, A_TP_PIO_DATA, val);
 2390 }
 2391 
 2392 /**
 2393  *      t3_enable_filters - enable the HW filters
 2394  *      @adap: the adapter
 2395  *
 2396  *      Enables the HW filters for NIC traffic.
 2397  */
 2398 void t3_enable_filters(adapter_t *adap)
 2399 {
 2400         t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
 2401         t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
 2402         t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
 2403         tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
 2404 }
 2405 
 2406 /**
 2407  *      pm_num_pages - calculate the number of pages of the payload memory
 2408  *      @mem_size: the size of the payload memory
 2409  *      @pg_size: the size of each payload memory page
 2410  *
 2411  *      Calculate the number of pages, each of the given size, that fit in a
 2412  *      memory of the specified size, respecting the HW requirement that the
 2413  *      number of pages must be a multiple of 24.
 2414  */
 2415 static inline unsigned int pm_num_pages(unsigned int mem_size,
 2416                                         unsigned int pg_size)
 2417 {
 2418         unsigned int n = mem_size / pg_size;
 2419 
 2420         return n - n % 24;
 2421 }
 2422 
 2423 #define mem_region(adap, start, size, reg) \
 2424         t3_write_reg((adap), A_ ## reg, (start)); \
 2425         start += size
 2426 
 2427 /**
 2428  *      partition_mem - partition memory and configure TP memory settings
 2429  *      @adap: the adapter
 2430  *      @p: the TP parameters
 2431  *
 2432  *      Partitions context and payload memory and configures TP's memory
 2433  *      registers.
 2434  */
 2435 static void partition_mem(adapter_t *adap, const struct tp_params *p)
 2436 {
 2437         unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
 2438         unsigned int timers = 0, timers_shift = 22;
 2439 
 2440         if (adap->params.rev > 0) {
 2441                 if (tids <= 16 * 1024) {
 2442                         timers = 1;
 2443                         timers_shift = 16;
 2444                 } else if (tids <= 64 * 1024) {
 2445                         timers = 2;
 2446                         timers_shift = 18;
 2447                 } else if (tids <= 256 * 1024) {
 2448                         timers = 3;
 2449                         timers_shift = 20;
 2450                 }
 2451         }
 2452 
 2453         t3_write_reg(adap, A_TP_PMM_SIZE,
 2454                      p->chan_rx_size | (p->chan_tx_size >> 16));
 2455 
 2456         t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
 2457         t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
 2458         t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
 2459         t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
 2460                          V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
 2461 
 2462         t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
 2463         t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
 2464         t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
 2465 
 2466         pstructs = p->rx_num_pgs + p->tx_num_pgs;
 2467         /* Add a bit of headroom and make multiple of 24 */
 2468         pstructs += 48;
 2469         pstructs -= pstructs % 24;
 2470         t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
 2471 
 2472         m = tids * TCB_SIZE;
 2473         mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
 2474         mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
 2475         t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
 2476         m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
 2477         mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
 2478         mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
 2479         mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
 2480         mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
 2481 
 2482         m = (m + 4095) & ~0xfff;
 2483         t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
 2484         t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
 2485 
 2486         tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
 2487         m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 2488             adap->params.mc5.nfilters - adap->params.mc5.nroutes;
 2489         if (tids < m)
 2490                 adap->params.mc5.nservers += m - tids;
 2491 }
 2492 
 2493 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
 2494 {
 2495         t3_write_reg(adap, A_TP_PIO_ADDR, addr);
 2496         t3_write_reg(adap, A_TP_PIO_DATA, val);
 2497 }
 2498 
 2499 static void tp_config(adapter_t *adap, const struct tp_params *p)
 2500 {
 2501         t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
 2502                      F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
 2503                      F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
 2504         t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
 2505                      F_MTUENABLE | V_WINDOWSCALEMODE(1) |
 2506                      V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
 2507         t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
 2508                      V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
 2509                      V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
 2510                      F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
 2511         t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
 2512                          F_IPV6ENABLE | F_NICMODE);
 2513         t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
 2514         t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
 2515         t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
 2516                          adap->params.rev > 0 ? F_ENABLEESND :
 2517                                                 F_T3A_ENABLEESND);
 2518         t3_set_reg_field(adap, A_TP_PC_CONFIG,
 2519                          F_ENABLEEPCMDAFULL,
 2520                          F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
 2521                          F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
 2522         t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
 2523         t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
 2524         t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
 2525 
 2526         if (adap->params.rev > 0) {
 2527                 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
 2528                 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
 2529                                  F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
 2530                 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
 2531                 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
 2532                 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
 2533                 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
 2534         } else
 2535                 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
 2536 
 2537         t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
 2538         t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
 2539         t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
 2540         t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
 2541 
 2542         if (adap->params.nports > 2) {
 2543                 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
 2544                                  F_ENABLETXPORTFROMDA | F_ENABLERXPORTFROMADDR);
 2545                 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
 2546                                     V_RXMAPMODE(M_RXMAPMODE), 0);
 2547                 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
 2548                                V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
 2549                                F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
 2550                                F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
 2551                 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
 2552                 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
 2553                 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
 2554         }
 2555 }
 2556 
 2557 /* TCP timer values in ms */
 2558 #define TP_DACK_TIMER 50
 2559 #define TP_RTO_MIN    250
 2560 
 2561 /**
 2562  *      tp_set_timers - set TP timing parameters
 2563  *      @adap: the adapter to set
 2564  *      @core_clk: the core clock frequency in Hz
 2565  *
 2566  *      Set TP's timing parameters, such as the various timer resolutions and
 2567  *      the TCP timer values.
 2568  */
 2569 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
 2570 {
 2571         unsigned int tre = adap->params.tp.tre;
 2572         unsigned int dack_re = adap->params.tp.dack_re;
 2573         unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
 2574         unsigned int tps = core_clk >> tre;
 2575 
 2576         t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
 2577                      V_DELAYEDACKRESOLUTION(dack_re) |
 2578                      V_TIMESTAMPRESOLUTION(tstamp_re));
 2579         t3_write_reg(adap, A_TP_DACK_TIMER,
 2580                      (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
 2581         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
 2582         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
 2583         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
 2584         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
 2585         t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
 2586                      V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
 2587                      V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
 2588                      V_KEEPALIVEMAX(9));
 2589 
 2590 #define SECONDS * tps
 2591 
 2592         t3_write_reg(adap, A_TP_MSL,
 2593                      adap->params.rev > 0 ? 0 : 2 SECONDS);
 2594         t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
 2595         t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
 2596         t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
 2597         t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
 2598         t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
 2599         t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
 2600         t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
 2601         t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
 2602 
 2603 #undef SECONDS
 2604 }
 2605 
 2606 #ifdef CONFIG_CHELSIO_T3_CORE
 2607 /**
 2608  *      t3_tp_set_coalescing_size - set receive coalescing size
 2609  *      @adap: the adapter
 2610  *      @size: the receive coalescing size
 2611  *      @psh: whether a set PSH bit should deliver coalesced data
 2612  *
 2613  *      Set the receive coalescing size and PSH bit handling.
 2614  */
 2615 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
 2616 {
 2617         u32 val;
 2618 
 2619         if (size > MAX_RX_COALESCING_LEN)
 2620                 return -EINVAL;
 2621 
 2622         val = t3_read_reg(adap, A_TP_PARA_REG3);
 2623         val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
 2624 
 2625         if (size) {
 2626                 val |= F_RXCOALESCEENABLE;
 2627                 if (psh)
 2628                         val |= F_RXCOALESCEPSHEN;
 2629                 size = min(MAX_RX_COALESCING_LEN, size);
 2630                 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
 2631                              V_MAXRXDATA(MAX_RX_COALESCING_LEN));
 2632         }
 2633         t3_write_reg(adap, A_TP_PARA_REG3, val);
 2634         return 0;
 2635 }
 2636 
 2637 /**
 2638  *      t3_tp_set_max_rxsize - set the max receive size
 2639  *      @adap: the adapter
 2640  *      @size: the max receive size
 2641  *
 2642  *      Set TP's max receive size.  This is the limit that applies when
 2643  *      receive coalescing is disabled.
 2644  */
 2645 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
 2646 {
 2647         t3_write_reg(adap, A_TP_PARA_REG7,
 2648                      V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
 2649 }
 2650 
 2651 static void __devinit init_mtus(unsigned short mtus[])
 2652 {
 2653         /*
 2654          * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
 2655          * it can accomodate max size TCP/IP headers when SACK and timestamps
 2656          * are enabled and still have at least 8 bytes of payload.
 2657          */
 2658         mtus[0] = 88;
 2659         mtus[1] = 88;
 2660         mtus[2] = 256;
 2661         mtus[3] = 512;
 2662         mtus[4] = 576;
 2663         mtus[5] = 1024;
 2664         mtus[6] = 1280;
 2665         mtus[7] = 1492;
 2666         mtus[8] = 1500;
 2667         mtus[9] = 2002;
 2668         mtus[10] = 2048;
 2669         mtus[11] = 4096;
 2670         mtus[12] = 4352;
 2671         mtus[13] = 8192;
 2672         mtus[14] = 9000;
 2673         mtus[15] = 9600;
 2674 }
 2675 
 2676 /**
 2677  *      init_cong_ctrl - initialize congestion control parameters
 2678  *      @a: the alpha values for congestion control
 2679  *      @b: the beta values for congestion control
 2680  *
 2681  *      Initialize the congestion control parameters.
 2682  */
 2683 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
 2684 {
 2685         a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
 2686         a[9] = 2;
 2687         a[10] = 3;
 2688         a[11] = 4;
 2689         a[12] = 5;
 2690         a[13] = 6;
 2691         a[14] = 7;
 2692         a[15] = 8;
 2693         a[16] = 9;
 2694         a[17] = 10;
 2695         a[18] = 14;
 2696         a[19] = 17;
 2697         a[20] = 21;
 2698         a[21] = 25;
 2699         a[22] = 30;
 2700         a[23] = 35;
 2701         a[24] = 45;
 2702         a[25] = 60;
 2703         a[26] = 80;
 2704         a[27] = 100;
 2705         a[28] = 200;
 2706         a[29] = 300;
 2707         a[30] = 400;
 2708         a[31] = 500;
 2709 
 2710         b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
 2711         b[9] = b[10] = 1;
 2712         b[11] = b[12] = 2;
 2713         b[13] = b[14] = b[15] = b[16] = 3;
 2714         b[17] = b[18] = b[19] = b[20] = b[21] = 4;
 2715         b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
 2716         b[28] = b[29] = 6;
 2717         b[30] = b[31] = 7;
 2718 }
 2719 
 2720 /* The minimum additive increment value for the congestion control table */
 2721 #define CC_MIN_INCR 2U
 2722 
 2723 /**
 2724  *      t3_load_mtus - write the MTU and congestion control HW tables
 2725  *      @adap: the adapter
 2726  *      @mtus: the unrestricted values for the MTU table
 2727  *      @alpha: the values for the congestion control alpha parameter
 2728  *      @beta: the values for the congestion control beta parameter
 2729  *      @mtu_cap: the maximum permitted effective MTU
 2730  *
 2731  *      Write the MTU table with the supplied MTUs capping each at &mtu_cap.
 2732  *      Update the high-speed congestion control table with the supplied alpha,
 2733  *      beta, and MTUs.
 2734  */
 2735 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
 2736                   unsigned short alpha[NCCTRL_WIN],
 2737                   unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
 2738 {
 2739         static const unsigned int avg_pkts[NCCTRL_WIN] = {
 2740                 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
 2741                 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
 2742                 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
 2743 
 2744         unsigned int i, w;
 2745 
 2746         for (i = 0; i < NMTUS; ++i) {
 2747                 unsigned int mtu = min(mtus[i], mtu_cap);
 2748                 unsigned int log2 = fls(mtu);
 2749 
 2750                 if (!(mtu & ((1 << log2) >> 2)))     /* round */
 2751                         log2--;
 2752                 t3_write_reg(adap, A_TP_MTU_TABLE,
 2753                              (i << 24) | (log2 << 16) | mtu);
 2754 
 2755                 for (w = 0; w < NCCTRL_WIN; ++w) {
 2756                         unsigned int inc;
 2757 
 2758                         inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
 2759                                   CC_MIN_INCR);
 2760 
 2761                         t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
 2762                                      (w << 16) | (beta[w] << 13) | inc);
 2763                 }
 2764         }
 2765 }
 2766 
 2767 /**
 2768  *      t3_read_hw_mtus - returns the values in the HW MTU table
 2769  *      @adap: the adapter
 2770  *      @mtus: where to store the HW MTU values
 2771  *
 2772  *      Reads the HW MTU table.
 2773  */
 2774 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
 2775 {
 2776         int i;
 2777 
 2778         for (i = 0; i < NMTUS; ++i) {
 2779                 unsigned int val;
 2780 
 2781                 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
 2782                 val = t3_read_reg(adap, A_TP_MTU_TABLE);
 2783                 mtus[i] = val & 0x3fff;
 2784         }
 2785 }
 2786 
 2787 /**
 2788  *      t3_get_cong_cntl_tab - reads the congestion control table
 2789  *      @adap: the adapter
 2790  *      @incr: where to store the alpha values
 2791  *
 2792  *      Reads the additive increments programmed into the HW congestion
 2793  *      control table.
 2794  */
 2795 void t3_get_cong_cntl_tab(adapter_t *adap,
 2796                           unsigned short incr[NMTUS][NCCTRL_WIN])
 2797 {
 2798         unsigned int mtu, w;
 2799 
 2800         for (mtu = 0; mtu < NMTUS; ++mtu)
 2801                 for (w = 0; w < NCCTRL_WIN; ++w) {
 2802                         t3_write_reg(adap, A_TP_CCTRL_TABLE,
 2803                                      0xffff0000 | (mtu << 5) | w);
 2804                         incr[mtu][w] = (unsigned short)t3_read_reg(adap,
 2805                                         A_TP_CCTRL_TABLE) & 0x1fff;
 2806                 }
 2807 }
 2808 
 2809 /**
 2810  *      t3_tp_get_mib_stats - read TP's MIB counters
 2811  *      @adap: the adapter
 2812  *      @tps: holds the returned counter values
 2813  *
 2814  *      Returns the values of TP's MIB counters.
 2815  */
 2816 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
 2817 {
 2818         t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
 2819                          sizeof(*tps) / sizeof(u32), 0);
 2820 }
 2821 
 2822 /**
 2823  *      t3_read_pace_tbl - read the pace table
 2824  *      @adap: the adapter
 2825  *      @pace_vals: holds the returned values
 2826  *
 2827  *      Returns the values of TP's pace table in nanoseconds.
 2828  */
 2829 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
 2830 {
 2831         unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
 2832 
 2833         for (i = 0; i < NTX_SCHED; i++) {
 2834                 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
 2835                 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
 2836         }
 2837 }
 2838 
 2839 /**
 2840  *      t3_set_pace_tbl - set the pace table
 2841  *      @adap: the adapter
 2842  *      @pace_vals: the pace values in nanoseconds
 2843  *      @start: index of the first entry in the HW pace table to set
 2844  *      @n: how many entries to set
 2845  *
 2846  *      Sets (a subset of the) HW pace table.
 2847  */
 2848 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
 2849                      unsigned int start, unsigned int n)
 2850 {
 2851         unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
 2852 
 2853         for ( ; n; n--, start++, pace_vals++)
 2854                 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
 2855                              ((*pace_vals + tick_ns / 2) / tick_ns));
 2856 }
 2857 
 2858 #define ulp_region(adap, name, start, len) \
 2859         t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
 2860         t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
 2861                      (start) + (len) - 1); \
 2862         start += len
 2863 
 2864 #define ulptx_region(adap, name, start, len) \
 2865         t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
 2866         t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
 2867                      (start) + (len) - 1)
 2868 
 2869 static void ulp_config(adapter_t *adap, const struct tp_params *p)
 2870 {
 2871         unsigned int m = p->chan_rx_size;
 2872 
 2873         ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
 2874         ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
 2875         ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
 2876         ulp_region(adap, STAG, m, p->chan_rx_size / 4);
 2877         ulp_region(adap, RQ, m, p->chan_rx_size / 4);
 2878         ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
 2879         ulp_region(adap, PBL, m, p->chan_rx_size / 4);
 2880         t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
 2881 }
 2882 
 2883 
 2884 /**
 2885  *      t3_set_proto_sram - set the contents of the protocol sram
 2886  *      @adapter: the adapter
 2887  *      @data: the protocol image
 2888  *
 2889  *      Write the contents of the protocol SRAM.
 2890  */
 2891 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
 2892 {
 2893         int i;
 2894         const u32 *buf = (const u32 *)data;
 2895 
 2896         for (i = 0; i < PROTO_SRAM_LINES; i++) {
 2897                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
 2898                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
 2899                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
 2900                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
 2901                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
 2902                 
 2903                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
 2904                 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
 2905                         return -EIO;
 2906         }
 2907         return 0;
 2908 }
 2909 #endif
 2910 
 2911 /**
 2912  *      t3_config_trace_filter - configure one of the tracing filters
 2913  *      @adapter: the adapter
 2914  *      @tp: the desired trace filter parameters
 2915  *      @filter_index: which filter to configure
 2916  *      @invert: if set non-matching packets are traced instead of matching ones
 2917  *      @enable: whether to enable or disable the filter
 2918  *
 2919  *      Configures one of the tracing filters available in HW.
 2920  */
 2921 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
 2922                             int filter_index, int invert, int enable)
 2923 {
 2924         u32 addr, key[4], mask[4];
 2925 
 2926         key[0] = tp->sport | (tp->sip << 16);
 2927         key[1] = (tp->sip >> 16) | (tp->dport << 16);
 2928         key[2] = tp->dip;
 2929         key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
 2930 
 2931         mask[0] = tp->sport_mask | (tp->sip_mask << 16);
 2932         mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
 2933         mask[2] = tp->dip_mask;
 2934         mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
 2935 
 2936         if (invert)
 2937                 key[3] |= (1 << 29);
 2938         if (enable)
 2939                 key[3] |= (1 << 28);
 2940 
 2941         addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
 2942         tp_wr_indirect(adapter, addr++, key[0]);
 2943         tp_wr_indirect(adapter, addr++, mask[0]);
 2944         tp_wr_indirect(adapter, addr++, key[1]);
 2945         tp_wr_indirect(adapter, addr++, mask[1]);
 2946         tp_wr_indirect(adapter, addr++, key[2]);
 2947         tp_wr_indirect(adapter, addr++, mask[2]);
 2948         tp_wr_indirect(adapter, addr++, key[3]);
 2949         tp_wr_indirect(adapter, addr,   mask[3]);
 2950         (void) t3_read_reg(adapter, A_TP_PIO_DATA);
 2951 }
 2952 
 2953 /**
 2954  *      t3_config_sched - configure a HW traffic scheduler
 2955  *      @adap: the adapter
 2956  *      @kbps: target rate in Kbps
 2957  *      @sched: the scheduler index
 2958  *
 2959  *      Configure a Tx HW scheduler for the target rate.
 2960  */
 2961 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
 2962 {
 2963         unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
 2964         unsigned int clk = adap->params.vpd.cclk * 1000;
 2965         unsigned int selected_cpt = 0, selected_bpt = 0;
 2966 
 2967         if (kbps > 0) {
 2968                 kbps *= 125;     /* -> bytes */
 2969                 for (cpt = 1; cpt <= 255; cpt++) {
 2970                         tps = clk / cpt;
 2971                         bpt = (kbps + tps / 2) / tps;
 2972                         if (bpt > 0 && bpt <= 255) {
 2973                                 v = bpt * tps;
 2974                                 delta = v >= kbps ? v - kbps : kbps - v;
 2975                                 if (delta <= mindelta) {
 2976                                         mindelta = delta;
 2977                                         selected_cpt = cpt;
 2978                                         selected_bpt = bpt;
 2979                                 }
 2980                         } else if (selected_cpt)
 2981                                 break;
 2982                 }
 2983                 if (!selected_cpt)
 2984                         return -EINVAL;
 2985         }
 2986         t3_write_reg(adap, A_TP_TM_PIO_ADDR,
 2987                      A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
 2988         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 2989         if (sched & 1)
 2990                 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
 2991         else
 2992                 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
 2993         t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
 2994         return 0;
 2995 }
 2996 
 2997 /**
 2998  *      t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
 2999  *      @adap: the adapter
 3000  *      @sched: the scheduler index
 3001  *      @ipg: the interpacket delay in tenths of nanoseconds
 3002  *
 3003  *      Set the interpacket delay for a HW packet rate scheduler.
 3004  */
 3005 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
 3006 {
 3007         unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
 3008 
 3009         /* convert ipg to nearest number of core clocks */
 3010         ipg *= core_ticks_per_usec(adap);
 3011         ipg = (ipg + 5000) / 10000;
 3012         if (ipg > 0xffff)
 3013                 return -EINVAL;
 3014 
 3015         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 3016         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 3017         if (sched & 1)
 3018                 v = (v & 0xffff) | (ipg << 16);
 3019         else
 3020                 v = (v & 0xffff0000) | ipg;
 3021         t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
 3022         t3_read_reg(adap, A_TP_TM_PIO_DATA);
 3023         return 0;
 3024 }
 3025 
 3026 /**
 3027  *      t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
 3028  *      @adap: the adapter
 3029  *      @sched: the scheduler index
 3030  *      @kbps: the byte rate in Kbps
 3031  *      @ipg: the interpacket delay in tenths of nanoseconds
 3032  *
 3033  *      Return the current configuration of a HW Tx scheduler.
 3034  */
 3035 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
 3036                      unsigned int *ipg)
 3037 {
 3038         unsigned int v, addr, bpt, cpt;
 3039 
 3040         if (kbps) {
 3041                 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 3042                 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 3043                 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 3044                 if (sched & 1)
 3045                         v >>= 16;
 3046                 bpt = (v >> 8) & 0xff;
 3047                 cpt = v & 0xff;
 3048                 if (!cpt)
 3049                         *kbps = 0;        /* scheduler disabled */
 3050                 else {
 3051                         v = (adap->params.vpd.cclk * 1000) / cpt;
 3052                         *kbps = (v * bpt) / 125;
 3053                 }
 3054         }
 3055         if (ipg) {
 3056                 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
 3057                 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 3058                 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 3059                 if (sched & 1)
 3060                         v >>= 16;
 3061                 v &= 0xffff;
 3062                 *ipg = (10000 * v) / core_ticks_per_usec(adap);
 3063         }
 3064 }
 3065 
 3066 /**
 3067  *      tp_init - configure TP
 3068  *      @adap: the adapter
 3069  *      @p: TP configuration parameters
 3070  *
 3071  *      Initializes the TP HW module.
 3072  */
 3073 static int tp_init(adapter_t *adap, const struct tp_params *p)
 3074 {
 3075         int busy = 0;
 3076 
 3077         tp_config(adap, p);
 3078         t3_set_vlan_accel(adap, 3, 0);
 3079 
 3080         if (is_offload(adap)) {
 3081                 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
 3082                 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
 3083                 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
 3084                                        0, 1000, 5);
 3085                 if (busy)
 3086                         CH_ERR(adap, "TP initialization timed out\n");
 3087         }
 3088 
 3089         if (!busy)
 3090                 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
 3091         return busy;
 3092 }
 3093 
 3094 /**
 3095  *      t3_mps_set_active_ports - configure port failover
 3096  *      @adap: the adapter
 3097  *      @port_mask: bitmap of active ports
 3098  *
 3099  *      Sets the active ports according to the supplied bitmap.
 3100  */
 3101 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
 3102 {
 3103         if (port_mask & ~((1 << adap->params.nports) - 1))
 3104                 return -EINVAL;
 3105         t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
 3106                          port_mask << S_PORT0ACTIVE);
 3107         return 0;
 3108 }
 3109 
 3110 /**
 3111  *      chan_init_hw - channel-dependent HW initialization
 3112  *      @adap: the adapter
 3113  *      @chan_map: bitmap of Tx channels being used
 3114  *
 3115  *      Perform the bits of HW initialization that are dependent on the Tx
 3116  *      channels being used.
 3117  */
 3118 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
 3119 {
 3120         int i;
 3121 
 3122         if (chan_map != 3) {                                 /* one channel */
 3123                 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
 3124                 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
 3125                 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
 3126                              (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
 3127                                               F_TPTXPORT1EN | F_PORT1ACTIVE));
 3128                 t3_write_reg(adap, A_PM1_TX_CFG,
 3129                              chan_map == 1 ? 0xffffffff : 0);
 3130                 if (chan_map == 2)
 3131                         t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3132                                      V_TX_MOD_QUEUE_REQ_MAP(0xff));
 3133                 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
 3134                 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
 3135         } else {                                             /* two channels */
 3136                 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
 3137                 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
 3138                 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
 3139                              V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
 3140                 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
 3141                              F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
 3142                              F_ENFORCEPKT);
 3143                 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
 3144                 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
 3145                 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3146                              V_TX_MOD_QUEUE_REQ_MAP(0xaa));
 3147                 for (i = 0; i < 16; i++)
 3148                         t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
 3149                                      (i << 16) | 0x1010);
 3150                 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
 3151                 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
 3152         }
 3153 }
 3154 
 3155 static int calibrate_xgm(adapter_t *adapter)
 3156 {
 3157         if (uses_xaui(adapter)) {
 3158                 unsigned int v, i;
 3159 
 3160                 for (i = 0; i < 5; ++i) {
 3161                         t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
 3162                         (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
 3163                         msleep(1);
 3164                         v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
 3165                         if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
 3166                                 t3_write_reg(adapter, A_XGM_XAUI_IMP,
 3167                                              V_XAUIIMP(G_CALIMP(v) >> 2));
 3168                                 return 0;
 3169                         }
 3170                 }
 3171                 CH_ERR(adapter, "MAC calibration failed\n");
 3172                 return -1;
 3173         } else {
 3174                 t3_write_reg(adapter, A_XGM_RGMII_IMP,
 3175                              V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
 3176                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
 3177                                  F_XGM_IMPSETUPDATE);
 3178         }
 3179         return 0;
 3180 }
 3181 
 3182 static void calibrate_xgm_t3b(adapter_t *adapter)
 3183 {
 3184         if (!uses_xaui(adapter)) {
 3185                 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
 3186                              F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
 3187                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
 3188                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
 3189                                  F_XGM_IMPSETUPDATE);
 3190                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
 3191                                  0);
 3192                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
 3193                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
 3194         }
 3195 }
 3196 
 3197 struct mc7_timing_params {
 3198         unsigned char ActToPreDly;
 3199         unsigned char ActToRdWrDly;
 3200         unsigned char PreCyc;
 3201         unsigned char RefCyc[5];
 3202         unsigned char BkCyc;
 3203         unsigned char WrToRdDly;
 3204         unsigned char RdToWrDly;
 3205 };
 3206 
 3207 /*
 3208  * Write a value to a register and check that the write completed.  These
 3209  * writes normally complete in a cycle or two, so one read should suffice.
 3210  * The very first read exists to flush the posted write to the device.
 3211  */
 3212 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
 3213 {
 3214         t3_write_reg(adapter,   addr, val);
 3215         (void) t3_read_reg(adapter, addr);                   /* flush */
 3216         if (!(t3_read_reg(adapter, addr) & F_BUSY))
 3217                 return 0;
 3218         CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
 3219         return -EIO;
 3220 }
 3221 
 3222 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
 3223 {
 3224         static const unsigned int mc7_mode[] = {
 3225                 0x632, 0x642, 0x652, 0x432, 0x442
 3226         };
 3227         static const struct mc7_timing_params mc7_timings[] = {
 3228                 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
 3229                 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
 3230                 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
 3231                 { 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
 3232                 { 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
 3233         };
 3234 
 3235         u32 val;
 3236         unsigned int width, density, slow, attempts;
 3237         adapter_t *adapter = mc7->adapter;
 3238         const struct mc7_timing_params *p = &mc7_timings[mem_type];
 3239 
 3240         if (!mc7->size)
 3241                 return 0;
 3242 
 3243         val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
 3244         slow = val & F_SLOW;
 3245         width = G_WIDTH(val);
 3246         density = G_DEN(val);
 3247 
 3248         t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
 3249         val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
 3250         msleep(1);
 3251 
 3252         if (!slow) {
 3253                 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
 3254                 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
 3255                 msleep(1);
 3256                 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
 3257                     (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
 3258                         CH_ERR(adapter, "%s MC7 calibration timed out\n",
 3259                                mc7->name);
 3260                         goto out_fail;
 3261                 }
 3262         }
 3263 
 3264         t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
 3265                      V_ACTTOPREDLY(p->ActToPreDly) |
 3266                      V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
 3267                      V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
 3268                      V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
 3269 
 3270         t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
 3271                      val | F_CLKEN | F_TERM150);
 3272         (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
 3273 
 3274         if (!slow)
 3275                 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
 3276                                  F_DLLENB);
 3277         udelay(1);
 3278 
 3279         val = slow ? 3 : 6;
 3280         if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
 3281             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
 3282             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
 3283             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
 3284                 goto out_fail;
 3285 
 3286         if (!slow) {
 3287                 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
 3288                 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
 3289                                  F_DLLRST, 0);
 3290                 udelay(5);
 3291         }
 3292 
 3293         if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
 3294             wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
 3295             wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
 3296             wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
 3297                        mc7_mode[mem_type]) ||
 3298             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
 3299             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
 3300                 goto out_fail;
 3301 
 3302         /* clock value is in KHz */
 3303         mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
 3304         mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
 3305 
 3306         t3_write_reg(adapter, mc7->offset + A_MC7_REF,
 3307                      F_PERREFEN | V_PREREFDIV(mc7_clock));
 3308         (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
 3309 
 3310         t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
 3311                      F_ECCGENEN | F_ECCCHKEN);
 3312         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
 3313         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
 3314         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
 3315                      (mc7->size << width) - 1);
 3316         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
 3317         (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
 3318 
 3319         attempts = 50;
 3320         do {
 3321                 msleep(250);
 3322                 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
 3323         } while ((val & F_BUSY) && --attempts);
 3324         if (val & F_BUSY) {
 3325                 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
 3326                 goto out_fail;
 3327         }
 3328 
 3329         /* Enable normal memory accesses. */
 3330         t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
 3331         return 0;
 3332 
 3333  out_fail:
 3334         return -1;
 3335 }
 3336 
 3337 static void config_pcie(adapter_t *adap)
 3338 {
 3339         static const u16 ack_lat[4][6] = {
 3340                 { 237, 416, 559, 1071, 2095, 4143 },
 3341                 { 128, 217, 289, 545, 1057, 2081 },
 3342                 { 73, 118, 154, 282, 538, 1050 },
 3343                 { 67, 107, 86, 150, 278, 534 }
 3344         };
 3345         static const u16 rpl_tmr[4][6] = {
 3346                 { 711, 1248, 1677, 3213, 6285, 12429 },
 3347                 { 384, 651, 867, 1635, 3171, 6243 },
 3348                 { 219, 354, 462, 846, 1614, 3150 },
 3349                 { 201, 321, 258, 450, 834, 1602 }
 3350         };
 3351 
 3352         u16 val;
 3353         unsigned int log2_width, pldsize;
 3354         unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
 3355 
 3356         t3_os_pci_read_config_2(adap,
 3357                                 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
 3358                                 &val);
 3359         pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
 3360 
 3361         t3_os_pci_read_config_2(adap,
 3362                                 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
 3363                                 &val);
 3364 
 3365         fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
 3366         fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
 3367                         G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
 3368         log2_width = fls(adap->params.pci.width) - 1;
 3369         acklat = ack_lat[log2_width][pldsize];
 3370         if (val & 1)                            /* check LOsEnable */
 3371                 acklat += fst_trn_tx * 4;
 3372         rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
 3373 
 3374         if (adap->params.rev == 0)
 3375                 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
 3376                                  V_T3A_ACKLAT(M_T3A_ACKLAT),
 3377                                  V_T3A_ACKLAT(acklat));
 3378         else
 3379                 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
 3380                                  V_ACKLAT(acklat));
 3381 
 3382         t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
 3383                          V_REPLAYLMT(rpllmt));
 3384 
 3385         t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
 3386         t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
 3387 }
 3388 
 3389 /**
 3390  *      t3_init_hw - initialize and configure T3 HW modules
 3391  *      @adapter: the adapter
 3392  *      @fw_params: initial parameters to pass to firmware (optional)
 3393  *
 3394  *      Initialize and configure T3 HW modules.  This performs the
 3395  *      initialization steps that need to be done once after a card is reset.
 3396  *      MAC and PHY initialization is handled separarely whenever a port is
 3397  *      enabled.
 3398  *
 3399  *      @fw_params are passed to FW and their value is platform dependent.
 3400  *      Only the top 8 bits are available for use, the rest must be 0.
 3401  */
 3402 int t3_init_hw(adapter_t *adapter, u32 fw_params)
 3403 {
 3404         int err = -EIO, attempts = 100;
 3405         const struct vpd_params *vpd = &adapter->params.vpd;
 3406 
 3407         if (adapter->params.rev > 0)
 3408                 calibrate_xgm_t3b(adapter);
 3409         else if (calibrate_xgm(adapter))
 3410                 goto out_err;
 3411 
 3412         if (adapter->params.nports > 2)
 3413                 t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
 3414 
 3415         if (vpd->mclk) {
 3416                 partition_mem(adapter, &adapter->params.tp);
 3417 
 3418                 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
 3419                     mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
 3420                     mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
 3421                     t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
 3422                                 adapter->params.mc5.nfilters,
 3423                                 adapter->params.mc5.nroutes))
 3424                         goto out_err;
 3425         }
 3426 
 3427         if (tp_init(adapter, &adapter->params.tp))
 3428                 goto out_err;
 3429 
 3430 #ifdef CONFIG_CHELSIO_T3_CORE
 3431         t3_tp_set_coalescing_size(adapter,
 3432                                   min(adapter->params.sge.max_pkt_size,
 3433                                       MAX_RX_COALESCING_LEN), 1);
 3434         t3_tp_set_max_rxsize(adapter,
 3435                              min(adapter->params.sge.max_pkt_size, 16384U));
 3436         ulp_config(adapter, &adapter->params.tp);
 3437 #endif
 3438         if (is_pcie(adapter))
 3439                 config_pcie(adapter);
 3440         else
 3441                 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
 3442 
 3443         t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
 3444         t3_write_reg(adapter, A_PM1_RX_MODE, 0);
 3445         t3_write_reg(adapter, A_PM1_TX_MODE, 0);
 3446         chan_init_hw(adapter, adapter->params.chan_map);
 3447         t3_sge_init(adapter, &adapter->params.sge);
 3448 
 3449         t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
 3450         t3_write_reg(adapter, A_CIM_BOOT_CFG,
 3451                      V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
 3452         (void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
 3453 
 3454         do {                          /* wait for uP to initialize */
 3455                 msleep(20);
 3456         } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
 3457         if (!attempts) {
 3458                 CH_ERR(adapter, "uP initialization timed out\n");
 3459                 goto out_err;
 3460         }
 3461 
 3462         err = 0;
 3463  out_err:
 3464         return err;
 3465 }
 3466 
 3467 /**
 3468  *      get_pci_mode - determine a card's PCI mode
 3469  *      @adapter: the adapter
 3470  *      @p: where to store the PCI settings
 3471  *
 3472  *      Determines a card's PCI mode and associated parameters, such as speed
 3473  *      and width.
 3474  */
 3475 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
 3476 {
 3477         static unsigned short speed_map[] = { 33, 66, 100, 133 };
 3478         u32 pci_mode, pcie_cap;
 3479 
 3480         pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
 3481         if (pcie_cap) {
 3482                 u16 val;
 3483 
 3484                 p->variant = PCI_VARIANT_PCIE;
 3485                 p->pcie_cap_addr = pcie_cap;
 3486                 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
 3487                                         &val);
 3488                 p->width = (val >> 4) & 0x3f;
 3489                 return;
 3490         }
 3491 
 3492         pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
 3493         p->speed = speed_map[G_PCLKRANGE(pci_mode)];
 3494         p->width = (pci_mode & F_64BIT) ? 64 : 32;
 3495         pci_mode = G_PCIXINITPAT(pci_mode);
 3496         if (pci_mode == 0)
 3497                 p->variant = PCI_VARIANT_PCI;
 3498         else if (pci_mode < 4)
 3499                 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
 3500         else if (pci_mode < 8)
 3501                 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
 3502         else
 3503                 p->variant = PCI_VARIANT_PCIX_266_MODE2;
 3504 }
 3505 
 3506 /**
 3507  *      init_link_config - initialize a link's SW state
 3508  *      @lc: structure holding the link state
 3509  *      @caps: link capabilities
 3510  *
 3511  *      Initializes the SW state maintained for each link, including the link's
 3512  *      capabilities and default speed/duplex/flow-control/autonegotiation
 3513  *      settings.
 3514  */
 3515 static void __devinit init_link_config(struct link_config *lc,
 3516                                        unsigned int caps)
 3517 {
 3518         lc->supported = caps;
 3519         lc->requested_speed = lc->speed = SPEED_INVALID;
 3520         lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
 3521         lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
 3522         if (lc->supported & SUPPORTED_Autoneg) {
 3523                 lc->advertising = lc->supported;
 3524                 lc->autoneg = AUTONEG_ENABLE;
 3525                 lc->requested_fc |= PAUSE_AUTONEG;
 3526         } else {
 3527                 lc->advertising = 0;
 3528                 lc->autoneg = AUTONEG_DISABLE;
 3529         }
 3530 }
 3531 
 3532 /**
 3533  *      mc7_calc_size - calculate MC7 memory size
 3534  *      @cfg: the MC7 configuration
 3535  *
 3536  *      Calculates the size of an MC7 memory in bytes from the value of its
 3537  *      configuration register.
 3538  */
 3539 static unsigned int __devinit mc7_calc_size(u32 cfg)
 3540 {
 3541         unsigned int width = G_WIDTH(cfg);
 3542         unsigned int banks = !!(cfg & F_BKS) + 1;
 3543         unsigned int org = !!(cfg & F_ORG) + 1;
 3544         unsigned int density = G_DEN(cfg);
 3545         unsigned int MBs = ((256 << density) * banks) / (org << width);
 3546 
 3547         return MBs << 20;
 3548 }
 3549 
 3550 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
 3551                                unsigned int base_addr, const char *name)
 3552 {
 3553         u32 cfg;
 3554 
 3555         mc7->adapter = adapter;
 3556         mc7->name = name;
 3557         mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
 3558         cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
 3559         mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
 3560         mc7->width = G_WIDTH(cfg);
 3561 }
 3562 
 3563 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
 3564 {
 3565         mac->adapter = adapter;
 3566         mac->multiport = adapter->params.nports > 2;
 3567         if (mac->multiport) {
 3568                 mac->ext_port = (unsigned char)index;
 3569                 mac->nucast = 8;
 3570                 index = 0;
 3571         } else
 3572                 mac->nucast = 1;
 3573 
 3574         mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
 3575 
 3576         if (adapter->params.rev == 0 && uses_xaui(adapter)) {
 3577                 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
 3578                              is_10G(adapter) ? 0x2901c04 : 0x2301c04);
 3579                 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
 3580                                  F_ENRGMII, 0);
 3581         }
 3582 }
 3583 
 3584 /**
 3585  *      early_hw_init - HW initialization done at card detection time
 3586  *      @adapter: the adapter
 3587  *      @ai: contains information about the adapter type and properties
 3588  *
 3589  *      Perfoms the part of HW initialization that is done early on when the
 3590  *      driver first detecs the card.  Most of the HW state is initialized
 3591  *      lazily later on when a port or an offload function are first used.
 3592  */
 3593 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
 3594 {
 3595         u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
 3596                               3 : 2);
 3597 
 3598         mi1_init(adapter, ai);
 3599         t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
 3600                      V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
 3601         t3_write_reg(adapter, A_T3DBG_GPIO_EN,
 3602                      ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
 3603         t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
 3604 
 3605         if (adapter->params.rev == 0 || !uses_xaui(adapter))
 3606                 val |= F_ENRGMII;
 3607 
 3608         /* Enable MAC clocks so we can access the registers */
 3609         t3_write_reg(adapter, A_XGM_PORT_CFG, val);
 3610         (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
 3611 
 3612         val |= F_CLKDIVRESET_;
 3613         t3_write_reg(adapter, A_XGM_PORT_CFG, val);
 3614         (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
 3615         t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
 3616         (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
 3617 }
 3618 
 3619 /**
 3620  *      t3_reset_adapter - reset the adapter
 3621  *      @adapter: the adapter
 3622  *
 3623  *      Reset the adapter.
 3624  */
 3625 static int t3_reset_adapter(adapter_t *adapter)
 3626 {
 3627         int i, save_and_restore_pcie = 
 3628             adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
 3629         uint16_t devid = 0;
 3630 
 3631         if (save_and_restore_pcie)
 3632                 t3_os_pci_save_state(adapter);
 3633         t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
 3634 
 3635         /*
 3636          * Delay. Give Some time to device to reset fully.
 3637          * XXX The delay time should be modified.
 3638          */
 3639         for (i = 0; i < 10; i++) {
 3640                 msleep(50);
 3641                 t3_os_pci_read_config_2(adapter, 0x00, &devid);
 3642                 if (devid == 0x1425)
 3643                         break;
 3644         }
 3645 
 3646         if (devid != 0x1425)
 3647                 return -1;
 3648 
 3649         if (save_and_restore_pcie)
 3650                 t3_os_pci_restore_state(adapter);
 3651         return 0;
 3652 }
 3653 
 3654 /**
 3655  *      t3_prep_adapter - prepare SW and HW for operation
 3656  *      @adapter: the adapter
 3657  *      @ai: contains information about the adapter type and properties
 3658  *
 3659  *      Initialize adapter SW state for the various HW modules, set initial
 3660  *      values for some adapter tunables, take PHYs out of reset, and
 3661  *      initialize the MDIO interface.
 3662  */
 3663 int __devinit t3_prep_adapter(adapter_t *adapter,
 3664                               const struct adapter_info *ai, int reset)
 3665 {
 3666         int ret;
 3667         unsigned int i, j = 0;
 3668 
 3669         get_pci_mode(adapter, &adapter->params.pci);
 3670 
 3671         adapter->params.info = ai;
 3672         adapter->params.nports = ai->nports0 + ai->nports1;
 3673         adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
 3674         adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
 3675         adapter->params.linkpoll_period = 0;
 3676         if (adapter->params.nports > 2)
 3677                 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
 3678         else
 3679                 adapter->params.stats_update_period = is_10G(adapter) ?
 3680                         MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
 3681         adapter->params.pci.vpd_cap_addr =
 3682                 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
 3683 
 3684         ret = get_vpd_params(adapter, &adapter->params.vpd);
 3685         if (ret < 0)
 3686                 return ret;
 3687 
 3688         if (reset && t3_reset_adapter(adapter))
 3689                 return -1;
 3690 
 3691         t3_sge_prep(adapter, &adapter->params.sge);
 3692 
 3693         if (adapter->params.vpd.mclk) {
 3694                 struct tp_params *p = &adapter->params.tp;
 3695 
 3696                 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
 3697                 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
 3698                 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
 3699 
 3700                 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
 3701                 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
 3702                 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
 3703                 p->cm_size = t3_mc7_size(&adapter->cm);
 3704                 p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
 3705                 p->chan_tx_size = p->pmtx_size / p->nchan;
 3706                 p->rx_pg_size = 64 * 1024;
 3707                 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
 3708                 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
 3709                 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
 3710                 p->ntimer_qs = p->cm_size >= (128 << 20) ||
 3711                                adapter->params.rev > 0 ? 12 : 6;
 3712                 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
 3713                          1;
 3714                 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
 3715         }
 3716 
 3717         adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
 3718                                   t3_mc7_size(&adapter->pmtx) &&
 3719                                   t3_mc7_size(&adapter->cm);
 3720 
 3721         if (is_offload(adapter)) {
 3722                 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
 3723                 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
 3724                                                DEFAULT_NFILTERS : 0;
 3725                 adapter->params.mc5.nroutes = 0;
 3726                 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
 3727 
 3728 #ifdef CONFIG_CHELSIO_T3_CORE
 3729                 init_mtus(adapter->params.mtus);
 3730                 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
 3731 #endif
 3732         }
 3733 
 3734         early_hw_init(adapter, ai);
 3735 
 3736         if (adapter->params.nports > 2 &&
 3737             (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
 3738                 return ret;
 3739 
 3740         for_each_port(adapter, i) {
 3741                 u8 hw_addr[6];
 3742                 struct port_info *p = adap2pinfo(adapter, i);
 3743 
 3744                 while (!adapter->params.vpd.port_type[j])
 3745                         ++j;
 3746 
 3747                 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
 3748                 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
 3749                                        ai->mdio_ops);
 3750                 mac_prep(&p->mac, adapter, j);
 3751                 ++j;
 3752 
 3753                 /*
 3754                  * The VPD EEPROM stores the base Ethernet address for the
 3755                  * card.  A port's address is derived from the base by adding
 3756                  * the port's index to the base's low octet.
 3757                  */
 3758                 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
 3759                 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
 3760 
 3761                 t3_os_set_hw_addr(adapter, i, hw_addr);
 3762                 init_link_config(&p->link_config, p->port_type->caps);
 3763                 p->phy.ops->power_down(&p->phy, 1);
 3764                 if (!(p->port_type->caps & SUPPORTED_IRQ))
 3765                         adapter->params.linkpoll_period = 10;
 3766         }
 3767 
 3768         return 0;
 3769 }
 3770 
 3771 void t3_led_ready(adapter_t *adapter)
 3772 {
 3773         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
 3774                          F_GPIO0_OUT_VAL);
 3775 }
 3776 
 3777 void t3_port_failover(adapter_t *adapter, int port)
 3778 {
 3779         u32 val;
 3780 
 3781         val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
 3782         t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
 3783                          val);
 3784 }
 3785 
 3786 void t3_failover_done(adapter_t *adapter, int port)
 3787 {
 3788         t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
 3789                          F_PORT0ACTIVE | F_PORT1ACTIVE);
 3790 }
 3791 
 3792 void t3_failover_clear(adapter_t *adapter)
 3793 {
 3794         t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
 3795                          F_PORT0ACTIVE | F_PORT1ACTIVE);
 3796 }

Cache object: 47a8caa489e011bee412180ca1328d66


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.