The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgbe/t4_main.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2011 Chelsio Communications, Inc.
    3  * All rights reserved.
    4  * Written by: Navdeep Parhar <np@FreeBSD.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/8.4/sys/dev/cxgbe/t4_main.c 247670 2013-03-02 21:59:07Z np $");
   30 
   31 #include "opt_inet.h"
   32 #include "opt_inet6.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/conf.h>
   36 #include <sys/priv.h>
   37 #include <sys/kernel.h>
   38 #include <sys/bus.h>
   39 #include <sys/module.h>
   40 #include <sys/malloc.h>
   41 #include <sys/queue.h>
   42 #include <sys/taskqueue.h>
   43 #include <sys/pciio.h>
   44 #include <dev/pci/pcireg.h>
   45 #include <dev/pci/pcivar.h>
   46 #include <dev/pci/pci_private.h>
   47 #include <sys/firmware.h>
   48 #include <sys/sbuf.h>
   49 #include <sys/smp.h>
   50 #include <sys/socket.h>
   51 #include <sys/sockio.h>
   52 #include <sys/sysctl.h>
   53 #include <net/ethernet.h>
   54 #include <net/if.h>
   55 #include <net/if_types.h>
   56 #include <net/if_dl.h>
   57 #include <net/if_vlan_var.h>
   58 
   59 #include "common/common.h"
   60 #include "common/t4_msg.h"
   61 #include "common/t4_regs.h"
   62 #include "common/t4_regs_values.h"
   63 #include "t4_ioctl.h"
   64 #include "t4_l2t.h"
   65 
   66 /* T4 bus driver interface */
   67 static int t4_probe(device_t);
   68 static int t4_attach(device_t);
   69 static int t4_detach(device_t);
   70 static device_method_t t4_methods[] = {
   71         DEVMETHOD(device_probe,         t4_probe),
   72         DEVMETHOD(device_attach,        t4_attach),
   73         DEVMETHOD(device_detach,        t4_detach),
   74 
   75         DEVMETHOD_END
   76 };
   77 static driver_t t4_driver = {
   78         "t4nex",
   79         t4_methods,
   80         sizeof(struct adapter)
   81 };
   82 
   83 
   84 /* T4 port (cxgbe) interface */
   85 static int cxgbe_probe(device_t);
   86 static int cxgbe_attach(device_t);
   87 static int cxgbe_detach(device_t);
   88 static device_method_t cxgbe_methods[] = {
   89         DEVMETHOD(device_probe,         cxgbe_probe),
   90         DEVMETHOD(device_attach,        cxgbe_attach),
   91         DEVMETHOD(device_detach,        cxgbe_detach),
   92         { 0, 0 }
   93 };
   94 static driver_t cxgbe_driver = {
   95         "cxgbe",
   96         cxgbe_methods,
   97         sizeof(struct port_info)
   98 };
   99 
  100 static d_ioctl_t t4_ioctl;
  101 static d_open_t t4_open;
  102 static d_close_t t4_close;
  103 
  104 static struct cdevsw t4_cdevsw = {
  105        .d_version = D_VERSION,
  106        .d_flags = 0,
  107        .d_open = t4_open,
  108        .d_close = t4_close,
  109        .d_ioctl = t4_ioctl,
  110        .d_name = "t4nex",
  111 };
  112 
  113 /* ifnet + media interface */
  114 static void cxgbe_init(void *);
  115 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
  116 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
  117 static void cxgbe_qflush(struct ifnet *);
  118 static int cxgbe_media_change(struct ifnet *);
  119 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
  120 
  121 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
  122 
  123 /*
  124  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
  125  * then ADAPTER_LOCK, then t4_uld_list_lock.
  126  */
  127 static struct mtx t4_list_lock;
  128 static SLIST_HEAD(, adapter) t4_list;
  129 #ifdef TCP_OFFLOAD
  130 static struct mtx t4_uld_list_lock;
  131 static SLIST_HEAD(, uld_info) t4_uld_list;
  132 #endif
  133 
  134 /*
  135  * Tunables.  See tweak_tunables() too.
  136  */
  137 
  138 /*
  139  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
  140  */
  141 #define NTXQ_10G 16
  142 static int t4_ntxq10g = -1;
  143 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
  144 
  145 #define NRXQ_10G 8
  146 static int t4_nrxq10g = -1;
  147 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
  148 
  149 #define NTXQ_1G 4
  150 static int t4_ntxq1g = -1;
  151 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
  152 
  153 #define NRXQ_1G 2
  154 static int t4_nrxq1g = -1;
  155 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
  156 
  157 #ifdef TCP_OFFLOAD
  158 #define NOFLDTXQ_10G 8
  159 static int t4_nofldtxq10g = -1;
  160 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
  161 
  162 #define NOFLDRXQ_10G 2
  163 static int t4_nofldrxq10g = -1;
  164 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
  165 
  166 #define NOFLDTXQ_1G 2
  167 static int t4_nofldtxq1g = -1;
  168 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
  169 
  170 #define NOFLDRXQ_1G 1
  171 static int t4_nofldrxq1g = -1;
  172 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
  173 #endif
  174 
  175 /*
  176  * Holdoff parameters for 10G and 1G ports.
  177  */
  178 #define TMR_IDX_10G 1
  179 static int t4_tmr_idx_10g = TMR_IDX_10G;
  180 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
  181 
  182 #define PKTC_IDX_10G (-1)
  183 static int t4_pktc_idx_10g = PKTC_IDX_10G;
  184 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
  185 
  186 #define TMR_IDX_1G 1
  187 static int t4_tmr_idx_1g = TMR_IDX_1G;
  188 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
  189 
  190 #define PKTC_IDX_1G (-1)
  191 static int t4_pktc_idx_1g = PKTC_IDX_1G;
  192 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
  193 
  194 /*
  195  * Size (# of entries) of each tx and rx queue.
  196  */
  197 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
  198 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
  199 
  200 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
  201 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
  202 
  203 /*
  204  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
  205  */
  206 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
  207 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
  208 
  209 /*
  210  * Configuration file.
  211  */
  212 static char t4_cfg_file[32] = "default";
  213 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
  214 
  215 /*
  216  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
  217  * encouraged respectively).
  218  */
  219 static unsigned int t4_fw_install = 1;
  220 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
  221 
  222 /*
  223  * ASIC features that will be used.  Disable the ones you don't want so that the
  224  * chip resources aren't wasted on features that will not be used.
  225  */
  226 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
  227 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
  228 
  229 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
  230 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
  231 
  232 static int t4_toecaps_allowed = -1;
  233 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
  234 
  235 static int t4_rdmacaps_allowed = 0;
  236 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
  237 
  238 static int t4_iscsicaps_allowed = 0;
  239 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
  240 
  241 static int t4_fcoecaps_allowed = 0;
  242 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
  243 
  244 struct intrs_and_queues {
  245         int intr_type;          /* INTx, MSI, or MSI-X */
  246         int nirq;               /* Number of vectors */
  247         int intr_flags;
  248         int ntxq10g;            /* # of NIC txq's for each 10G port */
  249         int nrxq10g;            /* # of NIC rxq's for each 10G port */
  250         int ntxq1g;             /* # of NIC txq's for each 1G port */
  251         int nrxq1g;             /* # of NIC rxq's for each 1G port */
  252 #ifdef TCP_OFFLOAD
  253         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
  254         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
  255         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
  256         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
  257 #endif
  258 };
  259 
  260 struct filter_entry {
  261         uint32_t valid:1;       /* filter allocated and valid */
  262         uint32_t locked:1;      /* filter is administratively locked */
  263         uint32_t pending:1;     /* filter action is pending firmware reply */
  264         uint32_t smtidx:8;      /* Source MAC Table index for smac */
  265         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
  266 
  267         struct t4_filter_specification fs;
  268 };
  269 
  270 enum {
  271         XGMAC_MTU       = (1 << 0),
  272         XGMAC_PROMISC   = (1 << 1),
  273         XGMAC_ALLMULTI  = (1 << 2),
  274         XGMAC_VLANEX    = (1 << 3),
  275         XGMAC_UCADDR    = (1 << 4),
  276         XGMAC_MCADDRS   = (1 << 5),
  277 
  278         XGMAC_ALL       = 0xffff
  279 };
  280 
  281 static int map_bars(struct adapter *);
  282 static void setup_memwin(struct adapter *);
  283 static int cfg_itype_and_nqueues(struct adapter *, int, int,
  284     struct intrs_and_queues *);
  285 static int prep_firmware(struct adapter *);
  286 static int upload_config_file(struct adapter *, const struct firmware *,
  287     uint32_t *, uint32_t *);
  288 static int partition_resources(struct adapter *, const struct firmware *);
  289 static int get_params__pre_init(struct adapter *);
  290 static int get_params__post_init(struct adapter *);
  291 static int set_params__post_init(struct adapter *);
  292 static void t4_set_desc(struct adapter *);
  293 static void build_medialist(struct port_info *);
  294 static int update_mac_settings(struct port_info *, int);
  295 static int cxgbe_init_synchronized(struct port_info *);
  296 static int cxgbe_uninit_synchronized(struct port_info *);
  297 static int setup_intr_handlers(struct adapter *);
  298 static int adapter_full_init(struct adapter *);
  299 static int adapter_full_uninit(struct adapter *);
  300 static int port_full_init(struct port_info *);
  301 static int port_full_uninit(struct port_info *);
  302 static void quiesce_eq(struct adapter *, struct sge_eq *);
  303 static void quiesce_iq(struct adapter *, struct sge_iq *);
  304 static void quiesce_fl(struct adapter *, struct sge_fl *);
  305 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
  306     driver_intr_t *, void *, char *);
  307 static int t4_free_irq(struct adapter *, struct irq *);
  308 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
  309     unsigned int);
  310 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
  311 static void cxgbe_tick(void *);
  312 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
  313     struct mbuf *);
  314 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
  315 static int fw_msg_not_handled(struct adapter *, const __be64 *);
  316 static int t4_sysctls(struct adapter *);
  317 static int cxgbe_sysctls(struct port_info *);
  318 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
  319 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
  320 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
  321 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
  322 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
  323 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
  324 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
  325 #ifdef SBUF_DRAIN
  326 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
  327 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
  328 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
  329 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
  330 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
  331 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
  332 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
  333 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
  334 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
  335 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
  336 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
  337 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
  338 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
  339 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
  340 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
  341 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
  342 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
  343 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
  344 #endif
  345 static inline void txq_start(struct ifnet *, struct sge_txq *);
  346 static uint32_t fconf_to_mode(uint32_t);
  347 static uint32_t mode_to_fconf(uint32_t);
  348 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
  349 static int get_filter_mode(struct adapter *, uint32_t *);
  350 static int set_filter_mode(struct adapter *, uint32_t);
  351 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
  352 static int get_filter(struct adapter *, struct t4_filter *);
  353 static int set_filter(struct adapter *, struct t4_filter *);
  354 static int del_filter(struct adapter *, struct t4_filter *);
  355 static void clear_filter(struct filter_entry *);
  356 static int set_filter_wr(struct adapter *, int);
  357 static int del_filter_wr(struct adapter *, int);
  358 static int get_sge_context(struct adapter *, struct t4_sge_context *);
  359 static int load_fw(struct adapter *, struct t4_data *);
  360 static int read_card_mem(struct adapter *, struct t4_mem_range *);
  361 static int read_i2c(struct adapter *, struct t4_i2c_data *);
  362 #ifdef TCP_OFFLOAD
  363 static int toe_capability(struct port_info *, int);
  364 #endif
  365 static int t4_mod_event(module_t, int, void *);
  366 
  367 struct t4_pciids {
  368         uint16_t device;
  369         char *desc;
  370 } t4_pciids[] = {
  371         {0xa000, "Chelsio Terminator 4 FPGA"},
  372         {0x4400, "Chelsio T440-dbg"},
  373         {0x4401, "Chelsio T420-CR"},
  374         {0x4402, "Chelsio T422-CR"},
  375         {0x4403, "Chelsio T440-CR"},
  376         {0x4404, "Chelsio T420-BCH"},
  377         {0x4405, "Chelsio T440-BCH"},
  378         {0x4406, "Chelsio T440-CH"},
  379         {0x4407, "Chelsio T420-SO"},
  380         {0x4408, "Chelsio T420-CX"},
  381         {0x4409, "Chelsio T420-BT"},
  382         {0x440a, "Chelsio T404-BT"},
  383         {0x440e, "Chelsio T440-LP-CR"},
  384 };
  385 
  386 #ifdef TCP_OFFLOAD
  387 /*
  388  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
  389  * exactly the same for both rxq and ofld_rxq.
  390  */
  391 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
  392 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
  393 #endif
  394 
  395 /* No easy way to include t4_msg.h before adapter.h so we check this way */
  396 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
  397 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
  398 
  399 static int
  400 t4_probe(device_t dev)
  401 {
  402         int i;
  403         uint16_t v = pci_get_vendor(dev);
  404         uint16_t d = pci_get_device(dev);
  405         uint8_t f = pci_get_function(dev);
  406 
  407         if (v != PCI_VENDOR_ID_CHELSIO)
  408                 return (ENXIO);
  409 
  410         /* Attach only to PF0 of the FPGA */
  411         if (d == 0xa000 && f != 0)
  412                 return (ENXIO);
  413 
  414         for (i = 0; i < nitems(t4_pciids); i++) {
  415                 if (d == t4_pciids[i].device) {
  416                         device_set_desc(dev, t4_pciids[i].desc);
  417                         return (BUS_PROBE_DEFAULT);
  418                 }
  419         }
  420 
  421         return (ENXIO);
  422 }
  423 
  424 static int
  425 t4_attach(device_t dev)
  426 {
  427         struct adapter *sc;
  428         int rc = 0, i, n10g, n1g, rqidx, tqidx;
  429         struct intrs_and_queues iaq;
  430         struct sge *s;
  431 #ifdef TCP_OFFLOAD
  432         int ofld_rqidx, ofld_tqidx;
  433 #endif
  434 
  435         sc = device_get_softc(dev);
  436         sc->dev = dev;
  437 
  438         pci_enable_busmaster(dev);
  439         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
  440                 uint32_t v;
  441 
  442                 pci_set_max_read_req(dev, 4096);
  443                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
  444                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
  445                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
  446         }
  447 
  448         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
  449             device_get_nameunit(dev));
  450         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
  451         mtx_lock(&t4_list_lock);
  452         SLIST_INSERT_HEAD(&t4_list, sc, link);
  453         mtx_unlock(&t4_list_lock);
  454 
  455         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
  456         TAILQ_INIT(&sc->sfl);
  457         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
  458 
  459         rc = map_bars(sc);
  460         if (rc != 0)
  461                 goto done; /* error message displayed already */
  462 
  463         /*
  464          * This is the real PF# to which we're attaching.  Works from within PCI
  465          * passthrough environments too, where pci_get_function() could return a
  466          * different PF# depending on the passthrough configuration.  We need to
  467          * use the real PF# in all our communication with the firmware.
  468          */
  469         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
  470         sc->mbox = sc->pf;
  471 
  472         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
  473         sc->an_handler = an_not_handled;
  474         for (i = 0; i < nitems(sc->cpl_handler); i++)
  475                 sc->cpl_handler[i] = cpl_not_handled;
  476         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
  477                 sc->fw_msg_handler[i] = fw_msg_not_handled;
  478         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
  479 
  480         /* Prepare the adapter for operation */
  481         rc = -t4_prep_adapter(sc);
  482         if (rc != 0) {
  483                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
  484                 goto done;
  485         }
  486 
  487         /*
  488          * Do this really early, with the memory windows set up even before the
  489          * character device.  The userland tool's register i/o and mem read
  490          * will work even in "recovery mode".
  491          */
  492         setup_memwin(sc);
  493         sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
  494             GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
  495         sc->cdev->si_drv1 = sc;
  496 
  497         /* Go no further if recovery mode has been requested. */
  498         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
  499                 device_printf(dev, "recovery mode.\n");
  500                 goto done;
  501         }
  502 
  503         /* Prepare the firmware for operation */
  504         rc = prep_firmware(sc);
  505         if (rc != 0)
  506                 goto done; /* error message displayed already */
  507 
  508         rc = get_params__pre_init(sc);
  509         if (rc != 0)
  510                 goto done; /* error message displayed already */
  511 
  512         rc = t4_sge_init(sc);
  513         if (rc != 0)
  514                 goto done; /* error message displayed already */
  515 
  516         if (sc->flags & MASTER_PF) {
  517                 /* get basic stuff going */
  518                 rc = -t4_fw_initialize(sc, sc->mbox);
  519                 if (rc != 0) {
  520                         device_printf(dev, "early init failed: %d.\n", rc);
  521                         goto done;
  522                 }
  523         }
  524 
  525         rc = get_params__post_init(sc);
  526         if (rc != 0)
  527                 goto done; /* error message displayed already */
  528 
  529         rc = set_params__post_init(sc);
  530         if (rc != 0)
  531                 goto done; /* error message displayed already */
  532 
  533         if (sc->flags & MASTER_PF) {
  534                 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
  535 
  536                 /* final tweaks to some settings */
  537 
  538                 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd,
  539                     sc->params.b_wnd);
  540                 /* 4K, 16K, 64K, 256K DDP "page sizes" */
  541                 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(0) | V_HPZ1(2) |
  542                     V_HPZ2(4) | V_HPZ3(6));
  543                 t4_set_reg_field(sc, A_ULP_RX_CTL, F_TDDPTAGTCB, F_TDDPTAGTCB);
  544                 t4_set_reg_field(sc, A_TP_PARA_REG5,
  545                     V_INDICATESIZE(M_INDICATESIZE) |
  546                     F_REARMDDPOFFSET | F_RESETDDPOFFSET,
  547                     V_INDICATESIZE(indsz) |
  548                     F_REARMDDPOFFSET | F_RESETDDPOFFSET);
  549         } else {
  550                 /*
  551                  * XXX: Verify that we can live with whatever the master driver
  552                  * has done so far, and hope that it doesn't change any global
  553                  * setting from underneath us in the future.
  554                  */
  555         }
  556 
  557         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
  558             A_TP_VLAN_PRI_MAP);
  559 
  560         for (i = 0; i < NCHAN; i++)
  561                 sc->params.tp.tx_modq[i] = i;
  562 
  563         rc = t4_create_dma_tag(sc);
  564         if (rc != 0)
  565                 goto done; /* error message displayed already */
  566 
  567         /*
  568          * First pass over all the ports - allocate VIs and initialize some
  569          * basic parameters like mac address, port type, etc.  We also figure
  570          * out whether a port is 10G or 1G and use that information when
  571          * calculating how many interrupts to attempt to allocate.
  572          */
  573         n10g = n1g = 0;
  574         for_each_port(sc, i) {
  575                 struct port_info *pi;
  576 
  577                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
  578                 sc->port[i] = pi;
  579 
  580                 /* These must be set before t4_port_init */
  581                 pi->adapter = sc;
  582                 pi->port_id = i;
  583 
  584                 /* Allocate the vi and initialize parameters like mac addr */
  585                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
  586                 if (rc != 0) {
  587                         device_printf(dev, "unable to initialize port %d: %d\n",
  588                             i, rc);
  589                         free(pi, M_CXGBE);
  590                         sc->port[i] = NULL;
  591                         goto done;
  592                 }
  593 
  594                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
  595                     device_get_nameunit(dev), i);
  596                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
  597 
  598                 if (is_10G_port(pi)) {
  599                         n10g++;
  600                         pi->tmr_idx = t4_tmr_idx_10g;
  601                         pi->pktc_idx = t4_pktc_idx_10g;
  602                 } else {
  603                         n1g++;
  604                         pi->tmr_idx = t4_tmr_idx_1g;
  605                         pi->pktc_idx = t4_pktc_idx_1g;
  606                 }
  607 
  608                 pi->xact_addr_filt = -1;
  609 
  610                 pi->qsize_rxq = t4_qsize_rxq;
  611                 pi->qsize_txq = t4_qsize_txq;
  612 
  613                 pi->dev = device_add_child(dev, "cxgbe", -1);
  614                 if (pi->dev == NULL) {
  615                         device_printf(dev,
  616                             "failed to add device for port %d.\n", i);
  617                         rc = ENXIO;
  618                         goto done;
  619                 }
  620                 device_set_softc(pi->dev, pi);
  621         }
  622 
  623         /*
  624          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
  625          */
  626         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
  627         if (rc != 0)
  628                 goto done; /* error message displayed already */
  629 
  630         sc->intr_type = iaq.intr_type;
  631         sc->intr_count = iaq.nirq;
  632         sc->flags |= iaq.intr_flags;
  633 
  634         s = &sc->sge;
  635         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
  636         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
  637         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
  638         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
  639         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
  640 
  641 #ifdef TCP_OFFLOAD
  642         if (is_offload(sc)) {
  643 
  644                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
  645                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
  646                 s->neq += s->nofldtxq + s->nofldrxq;
  647                 s->niq += s->nofldrxq;
  648 
  649                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
  650                     M_CXGBE, M_ZERO | M_WAITOK);
  651                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
  652                     M_CXGBE, M_ZERO | M_WAITOK);
  653         }
  654 #endif
  655 
  656         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
  657             M_ZERO | M_WAITOK);
  658         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
  659             M_ZERO | M_WAITOK);
  660         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
  661             M_ZERO | M_WAITOK);
  662         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
  663             M_ZERO | M_WAITOK);
  664         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
  665             M_ZERO | M_WAITOK);
  666 
  667         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
  668             M_ZERO | M_WAITOK);
  669 
  670         t4_init_l2t(sc, M_WAITOK);
  671 
  672         /*
  673          * Second pass over the ports.  This time we know the number of rx and
  674          * tx queues that each port should get.
  675          */
  676         rqidx = tqidx = 0;
  677 #ifdef TCP_OFFLOAD
  678         ofld_rqidx = ofld_tqidx = 0;
  679 #endif
  680         for_each_port(sc, i) {
  681                 struct port_info *pi = sc->port[i];
  682 
  683                 if (pi == NULL)
  684                         continue;
  685 
  686                 pi->first_rxq = rqidx;
  687                 pi->first_txq = tqidx;
  688                 if (is_10G_port(pi)) {
  689                         pi->nrxq = iaq.nrxq10g;
  690                         pi->ntxq = iaq.ntxq10g;
  691                 } else {
  692                         pi->nrxq = iaq.nrxq1g;
  693                         pi->ntxq = iaq.ntxq1g;
  694                 }
  695 
  696                 rqidx += pi->nrxq;
  697                 tqidx += pi->ntxq;
  698 
  699 #ifdef TCP_OFFLOAD
  700                 if (is_offload(sc)) {
  701                         pi->first_ofld_rxq = ofld_rqidx;
  702                         pi->first_ofld_txq = ofld_tqidx;
  703                         if (is_10G_port(pi)) {
  704                                 pi->nofldrxq = iaq.nofldrxq10g;
  705                                 pi->nofldtxq = iaq.nofldtxq10g;
  706                         } else {
  707                                 pi->nofldrxq = iaq.nofldrxq1g;
  708                                 pi->nofldtxq = iaq.nofldtxq1g;
  709                         }
  710                         ofld_rqidx += pi->nofldrxq;
  711                         ofld_tqidx += pi->nofldtxq;
  712                 }
  713 #endif
  714         }
  715 
  716         rc = setup_intr_handlers(sc);
  717         if (rc != 0) {
  718                 device_printf(dev,
  719                     "failed to setup interrupt handlers: %d\n", rc);
  720                 goto done;
  721         }
  722 
  723         rc = bus_generic_attach(dev);
  724         if (rc != 0) {
  725                 device_printf(dev,
  726                     "failed to attach all child ports: %d\n", rc);
  727                 goto done;
  728         }
  729 
  730         device_printf(dev,
  731             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
  732             sc->params.pci.width, sc->params.nports, sc->intr_count,
  733             sc->intr_type == INTR_MSIX ? "MSI-X" :
  734             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
  735             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
  736 
  737         t4_set_desc(sc);
  738 
  739 done:
  740         if (rc != 0 && sc->cdev) {
  741                 /* cdev was created and so cxgbetool works; recover that way. */
  742                 device_printf(dev,
  743                     "error during attach, adapter is now in recovery mode.\n");
  744                 rc = 0;
  745         }
  746 
  747         if (rc != 0)
  748                 t4_detach(dev);
  749         else
  750                 t4_sysctls(sc);
  751 
  752         return (rc);
  753 }
  754 
  755 /*
  756  * Idempotent
  757  */
  758 static int
  759 t4_detach(device_t dev)
  760 {
  761         struct adapter *sc;
  762         struct port_info *pi;
  763         int i, rc;
  764 
  765         sc = device_get_softc(dev);
  766 
  767         if (sc->flags & FULL_INIT_DONE)
  768                 t4_intr_disable(sc);
  769 
  770         if (sc->cdev) {
  771                 destroy_dev(sc->cdev);
  772                 sc->cdev = NULL;
  773         }
  774 
  775         rc = bus_generic_detach(dev);
  776         if (rc) {
  777                 device_printf(dev,
  778                     "failed to detach child devices: %d\n", rc);
  779                 return (rc);
  780         }
  781 
  782         for (i = 0; i < sc->intr_count; i++)
  783                 t4_free_irq(sc, &sc->irq[i]);
  784 
  785         for (i = 0; i < MAX_NPORTS; i++) {
  786                 pi = sc->port[i];
  787                 if (pi) {
  788                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
  789                         if (pi->dev)
  790                                 device_delete_child(dev, pi->dev);
  791 
  792                         mtx_destroy(&pi->pi_lock);
  793                         free(pi, M_CXGBE);
  794                 }
  795         }
  796 
  797         if (sc->flags & FULL_INIT_DONE)
  798                 adapter_full_uninit(sc);
  799 
  800         if (sc->flags & FW_OK)
  801                 t4_fw_bye(sc, sc->mbox);
  802 
  803         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
  804                 pci_release_msi(dev);
  805 
  806         if (sc->regs_res)
  807                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
  808                     sc->regs_res);
  809 
  810         if (sc->msix_res)
  811                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
  812                     sc->msix_res);
  813 
  814         if (sc->l2t)
  815                 t4_free_l2t(sc->l2t);
  816 
  817 #ifdef TCP_OFFLOAD
  818         free(sc->sge.ofld_rxq, M_CXGBE);
  819         free(sc->sge.ofld_txq, M_CXGBE);
  820 #endif
  821         free(sc->irq, M_CXGBE);
  822         free(sc->sge.rxq, M_CXGBE);
  823         free(sc->sge.txq, M_CXGBE);
  824         free(sc->sge.ctrlq, M_CXGBE);
  825         free(sc->sge.iqmap, M_CXGBE);
  826         free(sc->sge.eqmap, M_CXGBE);
  827         free(sc->tids.ftid_tab, M_CXGBE);
  828         t4_destroy_dma_tag(sc);
  829         if (mtx_initialized(&sc->sc_lock)) {
  830                 mtx_lock(&t4_list_lock);
  831                 SLIST_REMOVE(&t4_list, sc, adapter, link);
  832                 mtx_unlock(&t4_list_lock);
  833                 mtx_destroy(&sc->sc_lock);
  834         }
  835 
  836         if (mtx_initialized(&sc->tids.ftid_lock))
  837                 mtx_destroy(&sc->tids.ftid_lock);
  838         if (mtx_initialized(&sc->sfl_lock))
  839                 mtx_destroy(&sc->sfl_lock);
  840 
  841         bzero(sc, sizeof(*sc));
  842 
  843         return (0);
  844 }
  845 
  846 
  847 static int
  848 cxgbe_probe(device_t dev)
  849 {
  850         char buf[128];
  851         struct port_info *pi = device_get_softc(dev);
  852 
  853         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
  854         device_set_desc_copy(dev, buf);
  855 
  856         return (BUS_PROBE_DEFAULT);
  857 }
  858 
  859 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
  860     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
  861     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
  862 #define T4_CAP_ENABLE (T4_CAP)
  863 
  864 static int
  865 cxgbe_attach(device_t dev)
  866 {
  867         struct port_info *pi = device_get_softc(dev);
  868         struct ifnet *ifp;
  869 
  870         /* Allocate an ifnet and set it up */
  871         ifp = if_alloc(IFT_ETHER);
  872         if (ifp == NULL) {
  873                 device_printf(dev, "Cannot allocate ifnet\n");
  874                 return (ENOMEM);
  875         }
  876         pi->ifp = ifp;
  877         ifp->if_softc = pi;
  878 
  879         callout_init(&pi->tick, CALLOUT_MPSAFE);
  880 
  881         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  882         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  883 
  884         ifp->if_init = cxgbe_init;
  885         ifp->if_ioctl = cxgbe_ioctl;
  886         ifp->if_transmit = cxgbe_transmit;
  887         ifp->if_qflush = cxgbe_qflush;
  888 
  889         ifp->if_capabilities = T4_CAP;
  890 #ifdef TCP_OFFLOAD
  891         if (is_offload(pi->adapter))
  892                 ifp->if_capabilities |= IFCAP_TOE;
  893 #endif
  894         ifp->if_capenable = T4_CAP_ENABLE;
  895         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
  896             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
  897 
  898         /* Initialize ifmedia for this port */
  899         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
  900             cxgbe_media_status);
  901         build_medialist(pi);
  902 
  903         ether_ifattach(ifp, pi->hw_addr);
  904 
  905 #ifdef TCP_OFFLOAD
  906         if (is_offload(pi->adapter)) {
  907                 device_printf(dev,
  908                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
  909                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
  910         } else
  911 #endif
  912                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
  913 
  914         cxgbe_sysctls(pi);
  915 
  916         return (0);
  917 }
  918 
  919 static int
  920 cxgbe_detach(device_t dev)
  921 {
  922         struct port_info *pi = device_get_softc(dev);
  923         struct adapter *sc = pi->adapter;
  924         struct ifnet *ifp = pi->ifp;
  925 
  926         /* Tell if_ioctl and if_init that the port is going away */
  927         ADAPTER_LOCK(sc);
  928         SET_DOOMED(pi);
  929         wakeup(&sc->flags);
  930         while (IS_BUSY(sc))
  931                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
  932         SET_BUSY(sc);
  933 #ifdef INVARIANTS
  934         sc->last_op = "t4detach";
  935         sc->last_op_thr = curthread;
  936 #endif
  937         ADAPTER_UNLOCK(sc);
  938 
  939         PORT_LOCK(pi);
  940         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  941         callout_stop(&pi->tick);
  942         PORT_UNLOCK(pi);
  943         callout_drain(&pi->tick);
  944 
  945         /* Let detach proceed even if these fail. */
  946         cxgbe_uninit_synchronized(pi);
  947         port_full_uninit(pi);
  948 
  949         ifmedia_removeall(&pi->media);
  950         ether_ifdetach(pi->ifp);
  951         if_free(pi->ifp);
  952 
  953         ADAPTER_LOCK(sc);
  954         CLR_BUSY(sc);
  955         wakeup(&sc->flags);
  956         ADAPTER_UNLOCK(sc);
  957 
  958         return (0);
  959 }
  960 
  961 static void
  962 cxgbe_init(void *arg)
  963 {
  964         struct port_info *pi = arg;
  965         struct adapter *sc = pi->adapter;
  966 
  967         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
  968                 return;
  969         cxgbe_init_synchronized(pi);
  970         end_synchronized_op(sc, 0);
  971 }
  972 
  973 static int
  974 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
  975 {
  976         int rc = 0, mtu, flags;
  977         struct port_info *pi = ifp->if_softc;
  978         struct adapter *sc = pi->adapter;
  979         struct ifreq *ifr = (struct ifreq *)data;
  980         uint32_t mask;
  981 
  982         switch (cmd) {
  983         case SIOCSIFMTU:
  984                 mtu = ifr->ifr_mtu;
  985                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
  986                         return (EINVAL);
  987 
  988                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
  989                 if (rc)
  990                         return (rc);
  991                 ifp->if_mtu = mtu;
  992                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  993                         t4_update_fl_bufsize(ifp);
  994                         rc = update_mac_settings(pi, XGMAC_MTU);
  995                 }
  996                 end_synchronized_op(sc, 0);
  997                 break;
  998 
  999         case SIOCSIFFLAGS:
 1000                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
 1001                 if (rc)
 1002                         return (rc);
 1003 
 1004                 if (ifp->if_flags & IFF_UP) {
 1005                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1006                                 flags = pi->if_flags;
 1007                                 if ((ifp->if_flags ^ flags) &
 1008                                     (IFF_PROMISC | IFF_ALLMULTI)) {
 1009                                         rc = update_mac_settings(pi,
 1010                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
 1011                                 }
 1012                         } else
 1013                                 rc = cxgbe_init_synchronized(pi);
 1014                         pi->if_flags = ifp->if_flags;
 1015                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1016                         rc = cxgbe_uninit_synchronized(pi);
 1017                 end_synchronized_op(sc, 0);
 1018                 break;
 1019 
 1020         case SIOCADDMULTI:      
 1021         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
 1022                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
 1023                 if (rc)
 1024                         return (rc);
 1025                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1026                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
 1027                 end_synchronized_op(sc, LOCK_HELD);
 1028                 break;
 1029 
 1030         case SIOCSIFCAP:
 1031                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
 1032                 if (rc)
 1033                         return (rc);
 1034 
 1035                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1036                 if (mask & IFCAP_TXCSUM) {
 1037                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1038                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
 1039 
 1040                         if (IFCAP_TSO4 & ifp->if_capenable &&
 1041                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
 1042                                 ifp->if_capenable &= ~IFCAP_TSO4;
 1043                                 if_printf(ifp,
 1044                                     "tso4 disabled due to -txcsum.\n");
 1045                         }
 1046                 }
 1047                 if (mask & IFCAP_TXCSUM_IPV6) {
 1048                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
 1049                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
 1050 
 1051                         if (IFCAP_TSO6 & ifp->if_capenable &&
 1052                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
 1053                                 ifp->if_capenable &= ~IFCAP_TSO6;
 1054                                 if_printf(ifp,
 1055                                     "tso6 disabled due to -txcsum6.\n");
 1056                         }
 1057                 }
 1058                 if (mask & IFCAP_RXCSUM)
 1059                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1060                 if (mask & IFCAP_RXCSUM_IPV6)
 1061                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
 1062 
 1063                 /*
 1064                  * Note that we leave CSUM_TSO alone (it is always set).  The
 1065                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
 1066                  * sending a TSO request our way, so it's sufficient to toggle
 1067                  * IFCAP_TSOx only.
 1068                  */
 1069                 if (mask & IFCAP_TSO4) {
 1070                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
 1071                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
 1072                                 if_printf(ifp, "enable txcsum first.\n");
 1073                                 rc = EAGAIN;
 1074                                 goto fail;
 1075                         }
 1076                         ifp->if_capenable ^= IFCAP_TSO4;
 1077                 }
 1078                 if (mask & IFCAP_TSO6) {
 1079                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
 1080                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
 1081                                 if_printf(ifp, "enable txcsum6 first.\n");
 1082                                 rc = EAGAIN;
 1083                                 goto fail;
 1084                         }
 1085                         ifp->if_capenable ^= IFCAP_TSO6;
 1086                 }
 1087                 if (mask & IFCAP_LRO) {
 1088 #if defined(INET) || defined(INET6)
 1089                         int i;
 1090                         struct sge_rxq *rxq;
 1091 
 1092                         ifp->if_capenable ^= IFCAP_LRO;
 1093                         for_each_rxq(pi, i, rxq) {
 1094                                 if (ifp->if_capenable & IFCAP_LRO)
 1095                                         rxq->iq.flags |= IQ_LRO_ENABLED;
 1096                                 else
 1097                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
 1098                         }
 1099 #endif
 1100                 }
 1101 #ifdef TCP_OFFLOAD
 1102                 if (mask & IFCAP_TOE) {
 1103                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
 1104 
 1105                         rc = toe_capability(pi, enable);
 1106                         if (rc != 0)
 1107                                 goto fail;
 1108 
 1109                         ifp->if_capenable ^= mask;
 1110                 }
 1111 #endif
 1112                 if (mask & IFCAP_VLAN_HWTAGGING) {
 1113                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1114                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1115                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
 1116                 }
 1117                 if (mask & IFCAP_VLAN_MTU) {
 1118                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
 1119 
 1120                         /* Need to find out how to disable auto-mtu-inflation */
 1121                 }
 1122                 if (mask & IFCAP_VLAN_HWTSO)
 1123                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 1124                 if (mask & IFCAP_VLAN_HWCSUM)
 1125                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1126 
 1127 #ifdef VLAN_CAPABILITIES
 1128                 VLAN_CAPABILITIES(ifp);
 1129 #endif
 1130 fail:
 1131                 end_synchronized_op(sc, 0);
 1132                 break;
 1133 
 1134         case SIOCSIFMEDIA:
 1135         case SIOCGIFMEDIA:
 1136                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
 1137                 break;
 1138 
 1139         default:
 1140                 rc = ether_ioctl(ifp, cmd, data);
 1141         }
 1142 
 1143         return (rc);
 1144 }
 1145 
 1146 static int
 1147 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
 1148 {
 1149         struct port_info *pi = ifp->if_softc;
 1150         struct adapter *sc = pi->adapter;
 1151         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
 1152         struct buf_ring *br;
 1153         int rc;
 1154 
 1155         M_ASSERTPKTHDR(m);
 1156 
 1157         if (__predict_false(pi->link_cfg.link_ok == 0)) {
 1158                 m_freem(m);
 1159                 return (ENETDOWN);
 1160         }
 1161 
 1162         if (m->m_flags & M_FLOWID)
 1163                 txq += (m->m_pkthdr.flowid % pi->ntxq);
 1164         br = txq->br;
 1165 
 1166         if (TXQ_TRYLOCK(txq) == 0) {
 1167                 struct sge_eq *eq = &txq->eq;
 1168 
 1169                 /*
 1170                  * It is possible that t4_eth_tx finishes up and releases the
 1171                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
 1172                  * need to make sure that this mbuf doesn't just sit there in
 1173                  * the drbr.
 1174                  */
 1175 
 1176                 rc = drbr_enqueue(ifp, br, m);
 1177                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
 1178                     !(eq->flags & EQ_DOOMED))
 1179                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
 1180                 return (rc);
 1181         }
 1182 
 1183         /*
 1184          * txq->m is the mbuf that is held up due to a temporary shortage of
 1185          * resources and it should be put on the wire first.  Then what's in
 1186          * drbr and finally the mbuf that was just passed in to us.
 1187          *
 1188          * Return code should indicate the fate of the mbuf that was passed in
 1189          * this time.
 1190          */
 1191 
 1192         TXQ_LOCK_ASSERT_OWNED(txq);
 1193         if (drbr_needs_enqueue(ifp, br) || txq->m) {
 1194 
 1195                 /* Queued for transmission. */
 1196 
 1197                 rc = drbr_enqueue(ifp, br, m);
 1198                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
 1199                 (void) t4_eth_tx(ifp, txq, m);
 1200                 TXQ_UNLOCK(txq);
 1201                 return (rc);
 1202         }
 1203 
 1204         /* Direct transmission. */
 1205         rc = t4_eth_tx(ifp, txq, m);
 1206         if (rc != 0 && txq->m)
 1207                 rc = 0; /* held, will be transmitted soon (hopefully) */
 1208 
 1209         TXQ_UNLOCK(txq);
 1210         return (rc);
 1211 }
 1212 
 1213 static void
 1214 cxgbe_qflush(struct ifnet *ifp)
 1215 {
 1216         struct port_info *pi = ifp->if_softc;
 1217         struct sge_txq *txq;
 1218         int i;
 1219         struct mbuf *m;
 1220 
 1221         /* queues do not exist if !PORT_INIT_DONE. */
 1222         if (pi->flags & PORT_INIT_DONE) {
 1223                 for_each_txq(pi, i, txq) {
 1224                         TXQ_LOCK(txq);
 1225                         m_freem(txq->m);
 1226                         txq->m = NULL;
 1227                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
 1228                                 m_freem(m);
 1229                         TXQ_UNLOCK(txq);
 1230                 }
 1231         }
 1232         if_qflush(ifp);
 1233 }
 1234 
 1235 static int
 1236 cxgbe_media_change(struct ifnet *ifp)
 1237 {
 1238         struct port_info *pi = ifp->if_softc;
 1239 
 1240         device_printf(pi->dev, "%s unimplemented.\n", __func__);
 1241 
 1242         return (EOPNOTSUPP);
 1243 }
 1244 
 1245 static void
 1246 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 1247 {
 1248         struct port_info *pi = ifp->if_softc;
 1249         struct ifmedia_entry *cur = pi->media.ifm_cur;
 1250         int speed = pi->link_cfg.speed;
 1251         int data = (pi->port_type << 8) | pi->mod_type;
 1252 
 1253         if (cur->ifm_data != data) {
 1254                 build_medialist(pi);
 1255                 cur = pi->media.ifm_cur;
 1256         }
 1257 
 1258         ifmr->ifm_status = IFM_AVALID;
 1259         if (!pi->link_cfg.link_ok)
 1260                 return;
 1261 
 1262         ifmr->ifm_status |= IFM_ACTIVE;
 1263 
 1264         /* active and current will differ iff current media is autoselect. */
 1265         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
 1266                 return;
 1267 
 1268         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
 1269         if (speed == SPEED_10000)
 1270                 ifmr->ifm_active |= IFM_10G_T;
 1271         else if (speed == SPEED_1000)
 1272                 ifmr->ifm_active |= IFM_1000_T;
 1273         else if (speed == SPEED_100)
 1274                 ifmr->ifm_active |= IFM_100_TX;
 1275         else if (speed == SPEED_10)
 1276                 ifmr->ifm_active |= IFM_10_T;
 1277         else
 1278                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
 1279                             speed));
 1280 }
 1281 
 1282 void
 1283 t4_fatal_err(struct adapter *sc)
 1284 {
 1285         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
 1286         t4_intr_disable(sc);
 1287         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
 1288             device_get_nameunit(sc->dev));
 1289 }
 1290 
 1291 static int
 1292 map_bars(struct adapter *sc)
 1293 {
 1294         sc->regs_rid = PCIR_BAR(0);
 1295         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
 1296             &sc->regs_rid, RF_ACTIVE);
 1297         if (sc->regs_res == NULL) {
 1298                 device_printf(sc->dev, "cannot map registers.\n");
 1299                 return (ENXIO);
 1300         }
 1301         sc->bt = rman_get_bustag(sc->regs_res);
 1302         sc->bh = rman_get_bushandle(sc->regs_res);
 1303         sc->mmio_len = rman_get_size(sc->regs_res);
 1304 
 1305         sc->msix_rid = PCIR_BAR(4);
 1306         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
 1307             &sc->msix_rid, RF_ACTIVE);
 1308         if (sc->msix_res == NULL) {
 1309                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
 1310                 return (ENXIO);
 1311         }
 1312 
 1313         return (0);
 1314 }
 1315 
 1316 static void
 1317 setup_memwin(struct adapter *sc)
 1318 {
 1319         uint32_t bar0;
 1320 
 1321         /*
 1322          * Read low 32b of bar0 indirectly via the hardware backdoor mechanism.
 1323          * Works from within PCI passthrough environments too, where
 1324          * rman_get_start() can return a different value.  We need to program
 1325          * the memory window decoders with the actual addresses that will be
 1326          * coming across the PCIe link.
 1327          */
 1328         bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
 1329         bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
 1330 
 1331         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
 1332                      (bar0 + MEMWIN0_BASE) | V_BIR(0) |
 1333                      V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
 1334 
 1335         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
 1336                      (bar0 + MEMWIN1_BASE) | V_BIR(0) |
 1337                      V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
 1338 
 1339         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
 1340                      (bar0 + MEMWIN2_BASE) | V_BIR(0) |
 1341                      V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
 1342 
 1343         /* flush */
 1344         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
 1345 }
 1346 
 1347 static int
 1348 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
 1349     struct intrs_and_queues *iaq)
 1350 {
 1351         int rc, itype, navail, nrxq10g, nrxq1g, n;
 1352         int nofldrxq10g = 0, nofldrxq1g = 0;
 1353 
 1354         bzero(iaq, sizeof(*iaq));
 1355 
 1356         iaq->ntxq10g = t4_ntxq10g;
 1357         iaq->ntxq1g = t4_ntxq1g;
 1358         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
 1359         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
 1360 #ifdef TCP_OFFLOAD
 1361         if (is_offload(sc)) {
 1362                 iaq->nofldtxq10g = t4_nofldtxq10g;
 1363                 iaq->nofldtxq1g = t4_nofldtxq1g;
 1364                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
 1365                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
 1366         }
 1367 #endif
 1368 
 1369         for (itype = INTR_MSIX; itype; itype >>= 1) {
 1370 
 1371                 if ((itype & t4_intr_types) == 0)
 1372                         continue;       /* not allowed */
 1373 
 1374                 if (itype == INTR_MSIX)
 1375                         navail = pci_msix_count(sc->dev);
 1376                 else if (itype == INTR_MSI)
 1377                         navail = pci_msi_count(sc->dev);
 1378                 else
 1379                         navail = 1;
 1380 restart:
 1381                 if (navail == 0)
 1382                         continue;
 1383 
 1384                 iaq->intr_type = itype;
 1385                 iaq->intr_flags = 0;
 1386 
 1387                 /*
 1388                  * Best option: an interrupt vector for errors, one for the
 1389                  * firmware event queue, and one each for each rxq (NIC as well
 1390                  * as offload).
 1391                  */
 1392                 iaq->nirq = T4_EXTRA_INTR;
 1393                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
 1394                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
 1395                 if (iaq->nirq <= navail &&
 1396                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
 1397                         iaq->intr_flags |= INTR_DIRECT;
 1398                         goto allocate;
 1399                 }
 1400 
 1401                 /*
 1402                  * Second best option: an interrupt vector for errors, one for
 1403                  * the firmware event queue, and one each for either NIC or
 1404                  * offload rxq's.
 1405                  */
 1406                 iaq->nirq = T4_EXTRA_INTR;
 1407                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
 1408                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
 1409                 if (iaq->nirq <= navail &&
 1410                     (itype != INTR_MSI || powerof2(iaq->nirq)))
 1411                         goto allocate;
 1412 
 1413                 /*
 1414                  * Next best option: an interrupt vector for errors, one for the
 1415                  * firmware event queue, and at least one per port.  At this
 1416                  * point we know we'll have to downsize nrxq or nofldrxq to fit
 1417                  * what's available to us.
 1418                  */
 1419                 iaq->nirq = T4_EXTRA_INTR;
 1420                 iaq->nirq += n10g + n1g;
 1421                 if (iaq->nirq <= navail) {
 1422                         int leftover = navail - iaq->nirq;
 1423 
 1424                         if (n10g > 0) {
 1425                                 int target = max(nrxq10g, nofldrxq10g);
 1426 
 1427                                 n = 1;
 1428                                 while (n < target && leftover >= n10g) {
 1429                                         leftover -= n10g;
 1430                                         iaq->nirq += n10g;
 1431                                         n++;
 1432                                 }
 1433                                 iaq->nrxq10g = min(n, nrxq10g);
 1434 #ifdef TCP_OFFLOAD
 1435                                 if (is_offload(sc))
 1436                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
 1437 #endif
 1438                         }
 1439 
 1440                         if (n1g > 0) {
 1441                                 int target = max(nrxq1g, nofldrxq1g);
 1442 
 1443                                 n = 1;
 1444                                 while (n < target && leftover >= n1g) {
 1445                                         leftover -= n1g;
 1446                                         iaq->nirq += n1g;
 1447                                         n++;
 1448                                 }
 1449                                 iaq->nrxq1g = min(n, nrxq1g);
 1450 #ifdef TCP_OFFLOAD
 1451                                 if (is_offload(sc))
 1452                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
 1453 #endif
 1454                         }
 1455 
 1456                         if (itype != INTR_MSI || powerof2(iaq->nirq))
 1457                                 goto allocate;
 1458                 }
 1459 
 1460                 /*
 1461                  * Least desirable option: one interrupt vector for everything.
 1462                  */
 1463                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
 1464 #ifdef TCP_OFFLOAD
 1465                 if (is_offload(sc))
 1466                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
 1467 #endif
 1468 
 1469 allocate:
 1470                 navail = iaq->nirq;
 1471                 rc = 0;
 1472                 if (itype == INTR_MSIX)
 1473                         rc = pci_alloc_msix(sc->dev, &navail);
 1474                 else if (itype == INTR_MSI)
 1475                         rc = pci_alloc_msi(sc->dev, &navail);
 1476 
 1477                 if (rc == 0) {
 1478                         if (navail == iaq->nirq)
 1479                                 return (0);
 1480 
 1481                         /*
 1482                          * Didn't get the number requested.  Use whatever number
 1483                          * the kernel is willing to allocate (it's in navail).
 1484                          */
 1485                         device_printf(sc->dev, "fewer vectors than requested, "
 1486                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
 1487                             itype, iaq->nirq, navail);
 1488                         pci_release_msi(sc->dev);
 1489                         goto restart;
 1490                 }
 1491 
 1492                 device_printf(sc->dev,
 1493                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
 1494                     itype, rc, iaq->nirq, navail);
 1495         }
 1496 
 1497         device_printf(sc->dev,
 1498             "failed to find a usable interrupt type.  "
 1499             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
 1500             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
 1501 
 1502         return (ENXIO);
 1503 }
 1504 
 1505 /*
 1506  * Is the given firmware compatible with the one the driver was compiled with?
 1507  */
 1508 static int
 1509 fw_compatible(const struct fw_hdr *hdr)
 1510 {
 1511 
 1512         if (hdr->fw_ver == htonl(FW_VERSION))
 1513                 return (1);
 1514 
 1515         /*
 1516          * XXX: Is this too conservative?  Perhaps I should limit this to the
 1517          * features that are supported in the driver.
 1518          */
 1519         if (hdr->intfver_nic == FW_HDR_INTFVER_NIC &&
 1520             hdr->intfver_vnic == FW_HDR_INTFVER_VNIC &&
 1521             hdr->intfver_ofld == FW_HDR_INTFVER_OFLD &&
 1522             hdr->intfver_ri == FW_HDR_INTFVER_RI &&
 1523             hdr->intfver_iscsipdu == FW_HDR_INTFVER_ISCSIPDU &&
 1524             hdr->intfver_iscsi == FW_HDR_INTFVER_ISCSI &&
 1525             hdr->intfver_fcoepdu == FW_HDR_INTFVER_FCOEPDU &&
 1526             hdr->intfver_fcoe == FW_HDR_INTFVER_FCOEPDU)
 1527                 return (1);
 1528 
 1529         return (0);
 1530 }
 1531 
 1532 /*
 1533  * Install a compatible firmware (if required), establish contact with it (by
 1534  * saying hello), and reset the device.  If we end up as the master driver,
 1535  * partition adapter resources by providing a configuration file to the
 1536  * firmware.
 1537  */
 1538 static int
 1539 prep_firmware(struct adapter *sc)
 1540 {
 1541         const struct firmware *fw = NULL, *cfg = NULL, *default_cfg;
 1542         int rc, card_fw_usable, kld_fw_usable;
 1543         enum dev_state state;
 1544         struct fw_hdr *card_fw;
 1545         const struct fw_hdr *kld_fw;
 1546 
 1547         default_cfg = firmware_get(T4_CFGNAME);
 1548 
 1549         /* Read the header of the firmware on the card */
 1550         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
 1551         rc = -t4_read_flash(sc, FLASH_FW_START,
 1552             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
 1553         if (rc == 0)
 1554                 card_fw_usable = fw_compatible((const void*)card_fw);
 1555         else {
 1556                 device_printf(sc->dev,
 1557                     "Unable to read card's firmware header: %d\n", rc);
 1558                 card_fw_usable = 0;
 1559         }
 1560 
 1561         /* This is the firmware in the KLD */
 1562         fw = firmware_get(T4_FWNAME);
 1563         if (fw != NULL) {
 1564                 kld_fw = (const void *)fw->data;
 1565                 kld_fw_usable = fw_compatible(kld_fw);
 1566         } else {
 1567                 kld_fw = NULL;
 1568                 kld_fw_usable = 0;
 1569         }
 1570 
 1571         /*
 1572          * Short circuit for the common case: the firmware on the card is an
 1573          * exact match and the KLD is an exact match too, or it's
 1574          * absent/incompatible, or we're prohibited from using it.  Note that
 1575          * t4_fw_install = 2 is ignored here -- use cxgbetool loadfw if you want
 1576          * to reinstall the same firmware as the one on the card.
 1577          */
 1578         if (card_fw_usable && card_fw->fw_ver == htonl(FW_VERSION) &&
 1579             (!kld_fw_usable || kld_fw->fw_ver == htonl(FW_VERSION) ||
 1580             t4_fw_install == 0))
 1581                 goto hello;
 1582 
 1583         if (kld_fw_usable && (!card_fw_usable ||
 1584             ntohl(kld_fw->fw_ver) > ntohl(card_fw->fw_ver) ||
 1585             (t4_fw_install == 2 && kld_fw->fw_ver != card_fw->fw_ver))) {
 1586                 uint32_t v = ntohl(kld_fw->fw_ver);
 1587 
 1588                 device_printf(sc->dev,
 1589                     "installing firmware %d.%d.%d.%d on card.\n",
 1590                     G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
 1591                     G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
 1592 
 1593                 rc = -t4_load_fw(sc, fw->data, fw->datasize);
 1594                 if (rc != 0) {
 1595                         device_printf(sc->dev,
 1596                             "failed to install firmware: %d\n", rc);
 1597                         goto done;
 1598                 }
 1599 
 1600                 /* Installed successfully, update the cached header too. */
 1601                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
 1602                 card_fw_usable = 1;
 1603         }
 1604 
 1605         if (!card_fw_usable) {
 1606                 uint32_t c, k;
 1607 
 1608                 c = ntohl(card_fw->fw_ver);
 1609                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
 1610 
 1611                 device_printf(sc->dev, "Cannot find a usable firmware: "
 1612                     "fw_install %d, driver compiled with %d.%d.%d.%d, "
 1613                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
 1614                     t4_fw_install,
 1615                     G_FW_HDR_FW_VER_MAJOR(FW_VERSION),
 1616                     G_FW_HDR_FW_VER_MINOR(FW_VERSION),
 1617                     G_FW_HDR_FW_VER_MICRO(FW_VERSION),
 1618                     G_FW_HDR_FW_VER_BUILD(FW_VERSION),
 1619                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
 1620                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
 1621                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
 1622                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
 1623                 goto done;
 1624         }
 1625 
 1626 hello:
 1627         /* We're using whatever's on the card and it's known to be good. */
 1628         sc->params.fw_vers = ntohl(card_fw->fw_ver);
 1629         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
 1630             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
 1631             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
 1632             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
 1633             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
 1634 
 1635         /* Contact firmware.  */
 1636         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
 1637         if (rc < 0) {
 1638                 rc = -rc;
 1639                 device_printf(sc->dev,
 1640                     "failed to connect to the firmware: %d.\n", rc);
 1641                 goto done;
 1642         }
 1643         if (rc == sc->mbox)
 1644                 sc->flags |= MASTER_PF;
 1645 
 1646         /* Reset device */
 1647         rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
 1648         if (rc != 0) {
 1649                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
 1650                 if (rc != ETIMEDOUT && rc != EIO)
 1651                         t4_fw_bye(sc, sc->mbox);
 1652                 goto done;
 1653         }
 1654 
 1655         /* Partition adapter resources as specified in the config file. */
 1656         if (sc->flags & MASTER_PF) {
 1657                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s",
 1658                     pci_get_device(sc->dev) == 0x440a ? "uwire" : t4_cfg_file);
 1659                 if (strncmp(sc->cfg_file, "default", sizeof(sc->cfg_file))) {
 1660                         char s[32];
 1661 
 1662                         snprintf(s, sizeof(s), "t4fw_cfg_%s", sc->cfg_file);
 1663                         cfg = firmware_get(s);
 1664                         if (cfg == NULL) {
 1665                                 device_printf(sc->dev,
 1666                                     "unable to locate %s module, "
 1667                                     "will use default config file.\n", s);
 1668                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
 1669                                     "%s", "default");
 1670                         }
 1671                 }
 1672 
 1673                 rc = partition_resources(sc, cfg ? cfg : default_cfg);
 1674                 if (rc != 0)
 1675                         goto done;      /* error message displayed already */
 1676         } else {
 1677                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", "notme");
 1678                 sc->cfcsum = (u_int)-1;
 1679         }
 1680 
 1681         sc->flags |= FW_OK;
 1682 
 1683 done:
 1684         free(card_fw, M_CXGBE);
 1685         if (fw != NULL)
 1686                 firmware_put(fw, FIRMWARE_UNLOAD);
 1687         if (cfg != NULL)
 1688                 firmware_put(cfg, FIRMWARE_UNLOAD);
 1689         if (default_cfg != NULL)
 1690                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
 1691 
 1692         return (rc);
 1693 }
 1694 
 1695 #define FW_PARAM_DEV(param) \
 1696         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
 1697          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
 1698 #define FW_PARAM_PFVF(param) \
 1699         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
 1700          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
 1701 
 1702 /*
 1703  * Upload configuration file to card's memory.
 1704  */
 1705 static int
 1706 upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt,
 1707     uint32_t *ma)
 1708 {
 1709         int rc, i;
 1710         uint32_t param, val, mtype, maddr, bar, off, win, remaining;
 1711         const uint32_t *b;
 1712 
 1713         /* Figure out where the firmware wants us to upload it. */
 1714         param = FW_PARAM_DEV(CF);
 1715         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
 1716         if (rc != 0) {
 1717                 /* Firmwares without config file support will fail this way */
 1718                 device_printf(sc->dev,
 1719                     "failed to query config file location: %d.\n", rc);
 1720                 return (rc);
 1721         }
 1722         *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
 1723         *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
 1724 
 1725         if (maddr & 3) {
 1726                 device_printf(sc->dev,
 1727                     "cannot upload config file (type %u, addr %x).\n",
 1728                     mtype, maddr);
 1729                 return (EFAULT);
 1730         }
 1731 
 1732         /* Translate mtype/maddr to an address suitable for the PCIe window */
 1733         val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
 1734         val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
 1735         switch (mtype) {
 1736         case FW_MEMTYPE_CF_EDC0:
 1737                 if (!(val & F_EDRAM0_ENABLE))
 1738                         goto err;
 1739                 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
 1740                 maddr += G_EDRAM0_BASE(bar) << 20;
 1741                 break;
 1742 
 1743         case FW_MEMTYPE_CF_EDC1:
 1744                 if (!(val & F_EDRAM1_ENABLE))
 1745                         goto err;
 1746                 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
 1747                 maddr += G_EDRAM1_BASE(bar) << 20;
 1748                 break;
 1749 
 1750         case FW_MEMTYPE_CF_EXTMEM:
 1751                 if (!(val & F_EXT_MEM_ENABLE))
 1752                         goto err;
 1753                 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
 1754                 maddr += G_EXT_MEM_BASE(bar) << 20;
 1755                 break;
 1756 
 1757         default:
 1758 err:
 1759                 device_printf(sc->dev,
 1760                     "cannot upload config file (type %u, enabled %u).\n",
 1761                     mtype, val);
 1762                 return (EFAULT);
 1763         }
 1764 
 1765         /*
 1766          * Position the PCIe window (we use memwin2) to the 16B aligned area
 1767          * just at/before the upload location.
 1768          */
 1769         win = maddr & ~0xf;
 1770         off = maddr - win;  /* offset from the start of the window. */
 1771         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
 1772         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
 1773 
 1774         remaining = fw->datasize;
 1775         if (remaining > FLASH_CFG_MAX_SIZE ||
 1776             remaining > MEMWIN2_APERTURE - off) {
 1777                 device_printf(sc->dev, "cannot upload config file all at once "
 1778                     "(size %u, max %u, room %u).\n",
 1779                     remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
 1780                 return (EFBIG);
 1781         }
 1782 
 1783         /*
 1784          * XXX: sheer laziness.  We deliberately added 4 bytes of useless
 1785          * stuffing/comments at the end of the config file so it's ok to simply
 1786          * throw away the last remaining bytes when the config file is not an
 1787          * exact multiple of 4.
 1788          */
 1789         b = fw->data;
 1790         for (i = 0; remaining >= 4; i += 4, remaining -= 4)
 1791                 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
 1792 
 1793         return (rc);
 1794 }
 1795 
 1796 /*
 1797  * Partition chip resources for use between various PFs, VFs, etc.  This is done
 1798  * by uploading the firmware configuration file to the adapter and instructing
 1799  * the firmware to process it.
 1800  */
 1801 static int
 1802 partition_resources(struct adapter *sc, const struct firmware *cfg)
 1803 {
 1804         int rc;
 1805         struct fw_caps_config_cmd caps;
 1806         uint32_t mtype, maddr, finicsum, cfcsum;
 1807 
 1808         rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT;
 1809         if (rc != 0) {
 1810                 mtype = FW_MEMTYPE_CF_FLASH;
 1811                 maddr = t4_flash_cfg_addr(sc);
 1812         }
 1813 
 1814         bzero(&caps, sizeof(caps));
 1815         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 1816             F_FW_CMD_REQUEST | F_FW_CMD_READ);
 1817         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
 1818             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
 1819             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
 1820         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
 1821         if (rc != 0) {
 1822                 device_printf(sc->dev,
 1823                     "failed to pre-process config file: %d.\n", rc);
 1824                 return (rc);
 1825         }
 1826 
 1827         finicsum = be32toh(caps.finicsum);
 1828         cfcsum = be32toh(caps.cfcsum);
 1829         if (finicsum != cfcsum) {
 1830                 device_printf(sc->dev,
 1831                     "WARNING: config file checksum mismatch: %08x %08x\n",
 1832                     finicsum, cfcsum);
 1833         }
 1834         sc->cfcsum = cfcsum;
 1835 
 1836 #define LIMIT_CAPS(x) do { \
 1837         caps.x &= htobe16(t4_##x##_allowed); \
 1838         sc->x = htobe16(caps.x); \
 1839 } while (0)
 1840 
 1841         /*
 1842          * Let the firmware know what features will (not) be used so it can tune
 1843          * things accordingly.
 1844          */
 1845         LIMIT_CAPS(linkcaps);
 1846         LIMIT_CAPS(niccaps);
 1847         LIMIT_CAPS(toecaps);
 1848         LIMIT_CAPS(rdmacaps);
 1849         LIMIT_CAPS(iscsicaps);
 1850         LIMIT_CAPS(fcoecaps);
 1851 #undef LIMIT_CAPS
 1852 
 1853         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 1854             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
 1855         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
 1856         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
 1857         if (rc != 0) {
 1858                 device_printf(sc->dev,
 1859                     "failed to process config file: %d.\n", rc);
 1860                 return (rc);
 1861         }
 1862 
 1863         return (0);
 1864 }
 1865 
 1866 /*
 1867  * Retrieve parameters that are needed (or nice to have) prior to calling
 1868  * t4_sge_init and t4_fw_initialize.
 1869  */
 1870 static int
 1871 get_params__pre_init(struct adapter *sc)
 1872 {
 1873         int rc;
 1874         uint32_t param[2], val[2];
 1875         struct fw_devlog_cmd cmd;
 1876         struct devlog_params *dlog = &sc->params.devlog;
 1877 
 1878         param[0] = FW_PARAM_DEV(PORTVEC);
 1879         param[1] = FW_PARAM_DEV(CCLK);
 1880         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
 1881         if (rc != 0) {
 1882                 device_printf(sc->dev,
 1883                     "failed to query parameters (pre_init): %d.\n", rc);
 1884                 return (rc);
 1885         }
 1886 
 1887         sc->params.portvec = val[0];
 1888         sc->params.nports = bitcount32(val[0]);
 1889         sc->params.vpd.cclk = val[1];
 1890 
 1891         /* Read device log parameters. */
 1892         bzero(&cmd, sizeof(cmd));
 1893         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
 1894             F_FW_CMD_REQUEST | F_FW_CMD_READ);
 1895         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
 1896         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
 1897         if (rc != 0) {
 1898                 device_printf(sc->dev,
 1899                     "failed to get devlog parameters: %d.\n", rc);
 1900                 bzero(dlog, sizeof (*dlog));
 1901                 rc = 0; /* devlog isn't critical for device operation */
 1902         } else {
 1903                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
 1904                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
 1905                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
 1906                 dlog->size = be32toh(cmd.memsize_devlog);
 1907         }
 1908 
 1909         return (rc);
 1910 }
 1911 
 1912 /*
 1913  * Retrieve various parameters that are of interest to the driver.  The device
 1914  * has been initialized by the firmware at this point.
 1915  */
 1916 static int
 1917 get_params__post_init(struct adapter *sc)
 1918 {
 1919         int rc;
 1920         uint32_t param[7], val[7];
 1921         struct fw_caps_config_cmd caps;
 1922 
 1923         param[0] = FW_PARAM_PFVF(IQFLINT_START);
 1924         param[1] = FW_PARAM_PFVF(EQ_START);
 1925         param[2] = FW_PARAM_PFVF(FILTER_START);
 1926         param[3] = FW_PARAM_PFVF(FILTER_END);
 1927         param[4] = FW_PARAM_PFVF(L2T_START);
 1928         param[5] = FW_PARAM_PFVF(L2T_END);
 1929         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
 1930         if (rc != 0) {
 1931                 device_printf(sc->dev,
 1932                     "failed to query parameters (post_init): %d.\n", rc);
 1933                 return (rc);
 1934         }
 1935 
 1936         sc->sge.iq_start = val[0];
 1937         sc->sge.eq_start = val[1];
 1938         sc->tids.ftid_base = val[2];
 1939         sc->tids.nftids = val[3] - val[2] + 1;
 1940         sc->vres.l2t.start = val[4];
 1941         sc->vres.l2t.size = val[5] - val[4] + 1;
 1942         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
 1943             ("%s: L2 table size (%u) larger than expected (%u)",
 1944             __func__, sc->vres.l2t.size, L2T_SIZE));
 1945 
 1946         /* get capabilites */
 1947         bzero(&caps, sizeof(caps));
 1948         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 1949             F_FW_CMD_REQUEST | F_FW_CMD_READ);
 1950         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
 1951         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
 1952         if (rc != 0) {
 1953                 device_printf(sc->dev,
 1954                     "failed to get card capabilities: %d.\n", rc);
 1955                 return (rc);
 1956         }
 1957 
 1958         if (caps.toecaps) {
 1959                 /* query offload-related parameters */
 1960                 param[0] = FW_PARAM_DEV(NTID);
 1961                 param[1] = FW_PARAM_PFVF(SERVER_START);
 1962                 param[2] = FW_PARAM_PFVF(SERVER_END);
 1963                 param[3] = FW_PARAM_PFVF(TDDP_START);
 1964                 param[4] = FW_PARAM_PFVF(TDDP_END);
 1965                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
 1966                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
 1967                 if (rc != 0) {
 1968                         device_printf(sc->dev,
 1969                             "failed to query TOE parameters: %d.\n", rc);
 1970                         return (rc);
 1971                 }
 1972                 sc->tids.ntids = val[0];
 1973                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
 1974                 sc->tids.stid_base = val[1];
 1975                 sc->tids.nstids = val[2] - val[1] + 1;
 1976                 sc->vres.ddp.start = val[3];
 1977                 sc->vres.ddp.size = val[4] - val[3] + 1;
 1978                 sc->params.ofldq_wr_cred = val[5];
 1979                 sc->params.offload = 1;
 1980         }
 1981         if (caps.rdmacaps) {
 1982                 param[0] = FW_PARAM_PFVF(STAG_START);
 1983                 param[1] = FW_PARAM_PFVF(STAG_END);
 1984                 param[2] = FW_PARAM_PFVF(RQ_START);
 1985                 param[3] = FW_PARAM_PFVF(RQ_END);
 1986                 param[4] = FW_PARAM_PFVF(PBL_START);
 1987                 param[5] = FW_PARAM_PFVF(PBL_END);
 1988                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
 1989                 if (rc != 0) {
 1990                         device_printf(sc->dev,
 1991                             "failed to query RDMA parameters(1): %d.\n", rc);
 1992                         return (rc);
 1993                 }
 1994                 sc->vres.stag.start = val[0];
 1995                 sc->vres.stag.size = val[1] - val[0] + 1;
 1996                 sc->vres.rq.start = val[2];
 1997                 sc->vres.rq.size = val[3] - val[2] + 1;
 1998                 sc->vres.pbl.start = val[4];
 1999                 sc->vres.pbl.size = val[5] - val[4] + 1;
 2000 
 2001                 param[0] = FW_PARAM_PFVF(SQRQ_START);
 2002                 param[1] = FW_PARAM_PFVF(SQRQ_END);
 2003                 param[2] = FW_PARAM_PFVF(CQ_START);
 2004                 param[3] = FW_PARAM_PFVF(CQ_END);
 2005                 param[4] = FW_PARAM_PFVF(OCQ_START);
 2006                 param[5] = FW_PARAM_PFVF(OCQ_END);
 2007                 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
 2008                 if (rc != 0) {
 2009                         device_printf(sc->dev,
 2010                             "failed to query RDMA parameters(2): %d.\n", rc);
 2011                         return (rc);
 2012                 }
 2013                 sc->vres.qp.start = val[0];
 2014                 sc->vres.qp.size = val[1] - val[0] + 1;
 2015                 sc->vres.cq.start = val[2];
 2016                 sc->vres.cq.size = val[3] - val[2] + 1;
 2017                 sc->vres.ocq.start = val[4];
 2018                 sc->vres.ocq.size = val[5] - val[4] + 1;
 2019         }
 2020         if (caps.iscsicaps) {
 2021                 param[0] = FW_PARAM_PFVF(ISCSI_START);
 2022                 param[1] = FW_PARAM_PFVF(ISCSI_END);
 2023                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
 2024                 if (rc != 0) {
 2025                         device_printf(sc->dev,
 2026                             "failed to query iSCSI parameters: %d.\n", rc);
 2027                         return (rc);
 2028                 }
 2029                 sc->vres.iscsi.start = val[0];
 2030                 sc->vres.iscsi.size = val[1] - val[0] + 1;
 2031         }
 2032 
 2033         /* These are finalized by FW initialization, load their values now */
 2034         val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
 2035         sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
 2036         sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
 2037         t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
 2038 
 2039         return (rc);
 2040 }
 2041 
 2042 static int
 2043 set_params__post_init(struct adapter *sc)
 2044 {
 2045         uint32_t param, val;
 2046         int rc;
 2047 
 2048         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
 2049         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
 2050         if (rc == 0) {
 2051                 /* ask for encapsulated CPLs */
 2052                 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
 2053                 val = 1;
 2054                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
 2055                 if (rc != 0) {
 2056                         device_printf(sc->dev,
 2057                             "failed to set parameter (post_init): %d.\n", rc);
 2058                         return (rc);
 2059                 }
 2060         } else if (rc != FW_EINVAL) {
 2061                 device_printf(sc->dev,
 2062                     "failed to check for encapsulated CPLs: %d.\n", rc);
 2063         } else
 2064                 rc = 0; /* the firmware doesn't support the param, no worries */
 2065 
 2066         return (rc);
 2067 }
 2068 
 2069 #undef FW_PARAM_PFVF
 2070 #undef FW_PARAM_DEV
 2071 
 2072 static void
 2073 t4_set_desc(struct adapter *sc)
 2074 {
 2075         char buf[128];
 2076         struct adapter_params *p = &sc->params;
 2077 
 2078         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
 2079             p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec);
 2080 
 2081         device_set_desc_copy(sc->dev, buf);
 2082 }
 2083 
 2084 static void
 2085 build_medialist(struct port_info *pi)
 2086 {
 2087         struct ifmedia *media = &pi->media;
 2088         int data, m;
 2089 
 2090         PORT_LOCK(pi);
 2091 
 2092         ifmedia_removeall(media);
 2093 
 2094         m = IFM_ETHER | IFM_FDX;
 2095         data = (pi->port_type << 8) | pi->mod_type;
 2096 
 2097         switch(pi->port_type) {
 2098         case FW_PORT_TYPE_BT_XFI:
 2099                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
 2100                 break;
 2101 
 2102         case FW_PORT_TYPE_BT_XAUI:
 2103                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
 2104                 /* fall through */
 2105 
 2106         case FW_PORT_TYPE_BT_SGMII:
 2107                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
 2108                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
 2109                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
 2110                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
 2111                 break;
 2112 
 2113         case FW_PORT_TYPE_CX4:
 2114                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
 2115                 ifmedia_set(media, m | IFM_10G_CX4);
 2116                 break;
 2117 
 2118         case FW_PORT_TYPE_SFP:
 2119         case FW_PORT_TYPE_FIBER_XFI:
 2120         case FW_PORT_TYPE_FIBER_XAUI:
 2121                 switch (pi->mod_type) {
 2122 
 2123                 case FW_PORT_MOD_TYPE_LR:
 2124                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
 2125                         ifmedia_set(media, m | IFM_10G_LR);
 2126                         break;
 2127 
 2128                 case FW_PORT_MOD_TYPE_SR:
 2129                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
 2130                         ifmedia_set(media, m | IFM_10G_SR);
 2131                         break;
 2132 
 2133                 case FW_PORT_MOD_TYPE_LRM:
 2134                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
 2135                         ifmedia_set(media, m | IFM_10G_LRM);
 2136                         break;
 2137 
 2138                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
 2139                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
 2140                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
 2141                         ifmedia_set(media, m | IFM_10G_TWINAX);
 2142                         break;
 2143 
 2144                 case FW_PORT_MOD_TYPE_NONE:
 2145                         m &= ~IFM_FDX;
 2146                         ifmedia_add(media, m | IFM_NONE, data, NULL);
 2147                         ifmedia_set(media, m | IFM_NONE);
 2148                         break;
 2149 
 2150                 case FW_PORT_MOD_TYPE_NA:
 2151                 case FW_PORT_MOD_TYPE_ER:
 2152                 default:
 2153                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
 2154                         ifmedia_set(media, m | IFM_UNKNOWN);
 2155                         break;
 2156                 }
 2157                 break;
 2158 
 2159         case FW_PORT_TYPE_KX4:
 2160         case FW_PORT_TYPE_KX:
 2161         case FW_PORT_TYPE_KR:
 2162         default:
 2163                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
 2164                 ifmedia_set(media, m | IFM_UNKNOWN);
 2165                 break;
 2166         }
 2167 
 2168         PORT_UNLOCK(pi);
 2169 }
 2170 
 2171 #define FW_MAC_EXACT_CHUNK      7
 2172 
 2173 /*
 2174  * Program the port's XGMAC based on parameters in ifnet.  The caller also
 2175  * indicates which parameters should be programmed (the rest are left alone).
 2176  */
 2177 static int
 2178 update_mac_settings(struct port_info *pi, int flags)
 2179 {
 2180         int rc;
 2181         struct ifnet *ifp = pi->ifp;
 2182         struct adapter *sc = pi->adapter;
 2183         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
 2184 
 2185         ASSERT_SYNCHRONIZED_OP(sc);
 2186         KASSERT(flags, ("%s: not told what to update.", __func__));
 2187 
 2188         if (flags & XGMAC_MTU)
 2189                 mtu = ifp->if_mtu;
 2190 
 2191         if (flags & XGMAC_PROMISC)
 2192                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
 2193 
 2194         if (flags & XGMAC_ALLMULTI)
 2195                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
 2196 
 2197         if (flags & XGMAC_VLANEX)
 2198                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
 2199 
 2200         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
 2201             vlanex, false);
 2202         if (rc) {
 2203                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
 2204                 return (rc);
 2205         }
 2206 
 2207         if (flags & XGMAC_UCADDR) {
 2208                 uint8_t ucaddr[ETHER_ADDR_LEN];
 2209 
 2210                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
 2211                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
 2212                     ucaddr, true, true);
 2213                 if (rc < 0) {
 2214                         rc = -rc;
 2215                         if_printf(ifp, "change_mac failed: %d\n", rc);
 2216                         return (rc);
 2217                 } else {
 2218                         pi->xact_addr_filt = rc;
 2219                         rc = 0;
 2220                 }
 2221         }
 2222 
 2223         if (flags & XGMAC_MCADDRS) {
 2224                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
 2225                 int del = 1;
 2226                 uint64_t hash = 0;
 2227                 struct ifmultiaddr *ifma;
 2228                 int i = 0, j;
 2229 
 2230                 if_maddr_rlock(ifp);
 2231                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2232                         if (ifma->ifma_addr->sa_family != AF_LINK)
 2233                                 continue;
 2234                         mcaddr[i++] =
 2235                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
 2236 
 2237                         if (i == FW_MAC_EXACT_CHUNK) {
 2238                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
 2239                                     del, i, mcaddr, NULL, &hash, 0);
 2240                                 if (rc < 0) {
 2241                                         rc = -rc;
 2242                                         for (j = 0; j < i; j++) {
 2243                                                 if_printf(ifp,
 2244                                                     "failed to add mc address"
 2245                                                     " %02x:%02x:%02x:"
 2246                                                     "%02x:%02x:%02x rc=%d\n",
 2247                                                     mcaddr[j][0], mcaddr[j][1],
 2248                                                     mcaddr[j][2], mcaddr[j][3],
 2249                                                     mcaddr[j][4], mcaddr[j][5],
 2250                                                     rc);
 2251                                         }
 2252                                         goto mcfail;
 2253                                 }
 2254                                 del = 0;
 2255                                 i = 0;
 2256                         }
 2257                 }
 2258                 if (i > 0) {
 2259                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
 2260                             del, i, mcaddr, NULL, &hash, 0);
 2261                         if (rc < 0) {
 2262                                 rc = -rc;
 2263                                 for (j = 0; j < i; j++) {
 2264                                         if_printf(ifp,
 2265                                             "failed to add mc address"
 2266                                             " %02x:%02x:%02x:"
 2267                                             "%02x:%02x:%02x rc=%d\n",
 2268                                             mcaddr[j][0], mcaddr[j][1],
 2269                                             mcaddr[j][2], mcaddr[j][3],
 2270                                             mcaddr[j][4], mcaddr[j][5],
 2271                                             rc);
 2272                                 }
 2273                                 goto mcfail;
 2274                         }
 2275                 }
 2276 
 2277                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
 2278                 if (rc != 0)
 2279                         if_printf(ifp, "failed to set mc address hash: %d", rc);
 2280 mcfail:
 2281                 if_maddr_runlock(ifp);
 2282         }
 2283 
 2284         return (rc);
 2285 }
 2286 
 2287 int
 2288 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
 2289     char *wmesg)
 2290 {
 2291         int rc, pri;
 2292 
 2293 #ifdef WITNESS
 2294         /* the caller thinks it's ok to sleep, but is it really? */
 2295         if (flags & SLEEP_OK)
 2296                 pause("t4slptst", 1);
 2297 #endif
 2298 
 2299         if (INTR_OK)
 2300                 pri = PCATCH;
 2301         else
 2302                 pri = 0;
 2303 
 2304         ADAPTER_LOCK(sc);
 2305         for (;;) {
 2306 
 2307                 if (pi && IS_DOOMED(pi)) {
 2308                         rc = ENXIO;
 2309                         goto done;
 2310                 }
 2311 
 2312                 if (!IS_BUSY(sc)) {
 2313                         rc = 0;
 2314                         break;
 2315                 }
 2316 
 2317                 if (!(flags & SLEEP_OK)) {
 2318                         rc = EBUSY;
 2319                         goto done;
 2320                 }
 2321 
 2322                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
 2323                         rc = EINTR;
 2324                         goto done;
 2325                 }
 2326         }
 2327 
 2328         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 2329         SET_BUSY(sc);
 2330 #ifdef INVARIANTS
 2331         sc->last_op = wmesg;
 2332         sc->last_op_thr = curthread;
 2333 #endif
 2334 
 2335 done:
 2336         if (!(flags & HOLD_LOCK) || rc)
 2337                 ADAPTER_UNLOCK(sc);
 2338 
 2339         return (rc);
 2340 }
 2341 
 2342 void
 2343 end_synchronized_op(struct adapter *sc, int flags)
 2344 {
 2345 
 2346         if (flags & LOCK_HELD)
 2347                 ADAPTER_LOCK_ASSERT_OWNED(sc);
 2348         else
 2349                 ADAPTER_LOCK(sc);
 2350 
 2351         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 2352         CLR_BUSY(sc);
 2353         wakeup(&sc->flags);
 2354         ADAPTER_UNLOCK(sc);
 2355 }
 2356 
 2357 static int
 2358 cxgbe_init_synchronized(struct port_info *pi)
 2359 {
 2360         struct adapter *sc = pi->adapter;
 2361         struct ifnet *ifp = pi->ifp;
 2362         int rc = 0;
 2363 
 2364         ASSERT_SYNCHRONIZED_OP(sc);
 2365 
 2366         if (isset(&sc->open_device_map, pi->port_id)) {
 2367                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
 2368                     ("mismatch between open_device_map and if_drv_flags"));
 2369                 return (0);     /* already running */
 2370         }
 2371 
 2372         if (!(sc->flags & FULL_INIT_DONE) &&
 2373             ((rc = adapter_full_init(sc)) != 0))
 2374                 return (rc);    /* error message displayed already */
 2375 
 2376         if (!(pi->flags & PORT_INIT_DONE) &&
 2377             ((rc = port_full_init(pi)) != 0))
 2378                 return (rc); /* error message displayed already */
 2379 
 2380         rc = update_mac_settings(pi, XGMAC_ALL);
 2381         if (rc)
 2382                 goto done;      /* error message displayed already */
 2383 
 2384         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
 2385         if (rc != 0) {
 2386                 if_printf(ifp, "start_link failed: %d\n", rc);
 2387                 goto done;
 2388         }
 2389 
 2390         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
 2391         if (rc != 0) {
 2392                 if_printf(ifp, "enable_vi failed: %d\n", rc);
 2393                 goto done;
 2394         }
 2395 
 2396         /* all ok */
 2397         setbit(&sc->open_device_map, pi->port_id);
 2398         PORT_LOCK(pi);
 2399         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2400         PORT_UNLOCK(pi);
 2401 
 2402         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
 2403 done:
 2404         if (rc != 0)
 2405                 cxgbe_uninit_synchronized(pi);
 2406 
 2407         return (rc);
 2408 }
 2409 
 2410 /*
 2411  * Idempotent.
 2412  */
 2413 static int
 2414 cxgbe_uninit_synchronized(struct port_info *pi)
 2415 {
 2416         struct adapter *sc = pi->adapter;
 2417         struct ifnet *ifp = pi->ifp;
 2418         int rc;
 2419 
 2420         ASSERT_SYNCHRONIZED_OP(sc);
 2421 
 2422         /*
 2423          * Disable the VI so that all its data in either direction is discarded
 2424          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
 2425          * tick) intact as the TP can deliver negative advice or data that it's
 2426          * holding in its RAM (for an offloaded connection) even after the VI is
 2427          * disabled.
 2428          */
 2429         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
 2430         if (rc) {
 2431                 if_printf(ifp, "disable_vi failed: %d\n", rc);
 2432                 return (rc);
 2433         }
 2434 
 2435         clrbit(&sc->open_device_map, pi->port_id);
 2436         PORT_LOCK(pi);
 2437         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2438         PORT_UNLOCK(pi);
 2439 
 2440         pi->link_cfg.link_ok = 0;
 2441         pi->link_cfg.speed = 0;
 2442         t4_os_link_changed(sc, pi->port_id, 0);
 2443 
 2444         return (0);
 2445 }
 2446 
 2447 /*
 2448  * It is ok for this function to fail midway and return right away.  t4_detach
 2449  * will walk the entire sc->irq list and clean up whatever is valid.
 2450  */
 2451 static int
 2452 setup_intr_handlers(struct adapter *sc)
 2453 {
 2454         int rc, rid, p, q;
 2455         char s[8];
 2456         struct irq *irq;
 2457         struct port_info *pi;
 2458         struct sge_rxq *rxq;
 2459 #ifdef TCP_OFFLOAD
 2460         struct sge_ofld_rxq *ofld_rxq;
 2461 #endif
 2462 
 2463         /*
 2464          * Setup interrupts.
 2465          */
 2466         irq = &sc->irq[0];
 2467         rid = sc->intr_type == INTR_INTX ? 0 : 1;
 2468         if (sc->intr_count == 1) {
 2469                 KASSERT(!(sc->flags & INTR_DIRECT),
 2470                     ("%s: single interrupt && INTR_DIRECT?", __func__));
 2471 
 2472                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
 2473                 if (rc != 0)
 2474                         return (rc);
 2475         } else {
 2476                 /* Multiple interrupts. */
 2477                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
 2478                     ("%s: too few intr.", __func__));
 2479 
 2480                 /* The first one is always error intr */
 2481                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
 2482                 if (rc != 0)
 2483                         return (rc);
 2484                 irq++;
 2485                 rid++;
 2486 
 2487                 /* The second one is always the firmware event queue */
 2488                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
 2489                     "evt");
 2490                 if (rc != 0)
 2491                         return (rc);
 2492                 irq++;
 2493                 rid++;
 2494 
 2495                 /*
 2496                  * Note that if INTR_DIRECT is not set then either the NIC rx
 2497                  * queues or (exclusive or) the TOE rx queueus will be taking
 2498                  * direct interrupts.
 2499                  *
 2500                  * There is no need to check for is_offload(sc) as nofldrxq
 2501                  * will be 0 if offload is disabled.
 2502                  */
 2503                 for_each_port(sc, p) {
 2504                         pi = sc->port[p];
 2505 
 2506 #ifdef TCP_OFFLOAD
 2507                         /*
 2508                          * Skip over the NIC queues if they aren't taking direct
 2509                          * interrupts.
 2510                          */
 2511                         if (!(sc->flags & INTR_DIRECT) &&
 2512                             pi->nofldrxq > pi->nrxq)
 2513                                 goto ofld_queues;
 2514 #endif
 2515                         rxq = &sc->sge.rxq[pi->first_rxq];
 2516                         for (q = 0; q < pi->nrxq; q++, rxq++) {
 2517                                 snprintf(s, sizeof(s), "%d.%d", p, q);
 2518                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
 2519                                     s);
 2520                                 if (rc != 0)
 2521                                         return (rc);
 2522                                 irq++;
 2523                                 rid++;
 2524                         }
 2525 
 2526 #ifdef TCP_OFFLOAD
 2527                         /*
 2528                          * Skip over the offload queues if they aren't taking
 2529                          * direct interrupts.
 2530                          */
 2531                         if (!(sc->flags & INTR_DIRECT))
 2532                                 continue;
 2533 ofld_queues:
 2534                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
 2535                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
 2536                                 snprintf(s, sizeof(s), "%d,%d", p, q);
 2537                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
 2538                                     ofld_rxq, s);
 2539                                 if (rc != 0)
 2540                                         return (rc);
 2541                                 irq++;
 2542                                 rid++;
 2543                         }
 2544 #endif
 2545                 }
 2546         }
 2547 
 2548         return (0);
 2549 }
 2550 
 2551 static int
 2552 adapter_full_init(struct adapter *sc)
 2553 {
 2554         int rc, i;
 2555 
 2556         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 2557         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
 2558             ("%s: FULL_INIT_DONE already", __func__));
 2559 
 2560         /*
 2561          * queues that belong to the adapter (not any particular port).
 2562          */
 2563         rc = t4_setup_adapter_queues(sc);
 2564         if (rc != 0)
 2565                 goto done;
 2566 
 2567         for (i = 0; i < nitems(sc->tq); i++) {
 2568                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
 2569                     taskqueue_thread_enqueue, &sc->tq[i]);
 2570                 if (sc->tq[i] == NULL) {
 2571                         device_printf(sc->dev,
 2572                             "failed to allocate task queue %d\n", i);
 2573                         rc = ENOMEM;
 2574                         goto done;
 2575                 }
 2576                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
 2577                     device_get_nameunit(sc->dev), i);
 2578         }
 2579 
 2580         t4_intr_enable(sc);
 2581         sc->flags |= FULL_INIT_DONE;
 2582 done:
 2583         if (rc != 0)
 2584                 adapter_full_uninit(sc);
 2585 
 2586         return (rc);
 2587 }
 2588 
 2589 static int
 2590 adapter_full_uninit(struct adapter *sc)
 2591 {
 2592         int i;
 2593 
 2594         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 2595 
 2596         t4_teardown_adapter_queues(sc);
 2597 
 2598         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
 2599                 taskqueue_free(sc->tq[i]);
 2600                 sc->tq[i] = NULL;
 2601         }
 2602 
 2603         sc->flags &= ~FULL_INIT_DONE;
 2604 
 2605         return (0);
 2606 }
 2607 
 2608 static int
 2609 port_full_init(struct port_info *pi)
 2610 {
 2611         struct adapter *sc = pi->adapter;
 2612         struct ifnet *ifp = pi->ifp;
 2613         uint16_t *rss;
 2614         struct sge_rxq *rxq;
 2615         int rc, i;
 2616 
 2617         ASSERT_SYNCHRONIZED_OP(sc);
 2618         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
 2619             ("%s: PORT_INIT_DONE already", __func__));
 2620 
 2621         sysctl_ctx_init(&pi->ctx);
 2622         pi->flags |= PORT_SYSCTL_CTX;
 2623 
 2624         /*
 2625          * Allocate tx/rx/fl queues for this port.
 2626          */
 2627         rc = t4_setup_port_queues(pi);
 2628         if (rc != 0)
 2629                 goto done;      /* error message displayed already */
 2630 
 2631         /*
 2632          * Setup RSS for this port.
 2633          */
 2634         rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
 2635             M_ZERO | M_WAITOK);
 2636         for_each_rxq(pi, i, rxq) {
 2637                 rss[i] = rxq->iq.abs_id;
 2638         }
 2639         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
 2640             pi->rss_size, rss, pi->nrxq);
 2641         free(rss, M_CXGBE);
 2642         if (rc != 0) {
 2643                 if_printf(ifp, "rss_config failed: %d\n", rc);
 2644                 goto done;
 2645         }
 2646 
 2647         pi->flags |= PORT_INIT_DONE;
 2648 done:
 2649         if (rc != 0)
 2650                 port_full_uninit(pi);
 2651 
 2652         return (rc);
 2653 }
 2654 
 2655 /*
 2656  * Idempotent.
 2657  */
 2658 static int
 2659 port_full_uninit(struct port_info *pi)
 2660 {
 2661         struct adapter *sc = pi->adapter;
 2662         int i;
 2663         struct sge_rxq *rxq;
 2664         struct sge_txq *txq;
 2665 #ifdef TCP_OFFLOAD
 2666         struct sge_ofld_rxq *ofld_rxq;
 2667         struct sge_wrq *ofld_txq;
 2668 #endif
 2669 
 2670         if (pi->flags & PORT_INIT_DONE) {
 2671 
 2672                 /* Need to quiesce queues.  XXX: ctrl queues? */
 2673 
 2674                 for_each_txq(pi, i, txq) {
 2675                         quiesce_eq(sc, &txq->eq);
 2676                 }
 2677 
 2678 #ifdef TCP_OFFLOAD
 2679                 for_each_ofld_txq(pi, i, ofld_txq) {
 2680                         quiesce_eq(sc, &ofld_txq->eq);
 2681                 }
 2682 #endif
 2683 
 2684                 for_each_rxq(pi, i, rxq) {
 2685                         quiesce_iq(sc, &rxq->iq);
 2686                         quiesce_fl(sc, &rxq->fl);
 2687                 }
 2688 
 2689 #ifdef TCP_OFFLOAD
 2690                 for_each_ofld_rxq(pi, i, ofld_rxq) {
 2691                         quiesce_iq(sc, &ofld_rxq->iq);
 2692                         quiesce_fl(sc, &ofld_rxq->fl);
 2693                 }
 2694 #endif
 2695         }
 2696 
 2697         t4_teardown_port_queues(pi);
 2698         pi->flags &= ~PORT_INIT_DONE;
 2699 
 2700         return (0);
 2701 }
 2702 
 2703 static void
 2704 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
 2705 {
 2706         EQ_LOCK(eq);
 2707         eq->flags |= EQ_DOOMED;
 2708 
 2709         /*
 2710          * Wait for the response to a credit flush if one's
 2711          * pending.
 2712          */
 2713         while (eq->flags & EQ_CRFLUSHED)
 2714                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
 2715         EQ_UNLOCK(eq);
 2716 
 2717         callout_drain(&eq->tx_callout); /* XXX: iffy */
 2718         pause("callout", 10);           /* Still iffy */
 2719 
 2720         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
 2721 }
 2722 
 2723 static void
 2724 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
 2725 {
 2726         (void) sc;      /* unused */
 2727 
 2728         /* Synchronize with the interrupt handler */
 2729         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
 2730                 pause("iqfree", 1);
 2731 }
 2732 
 2733 static void
 2734 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
 2735 {
 2736         mtx_lock(&sc->sfl_lock);
 2737         FL_LOCK(fl);
 2738         fl->flags |= FL_DOOMED;
 2739         FL_UNLOCK(fl);
 2740         mtx_unlock(&sc->sfl_lock);
 2741 
 2742         callout_drain(&sc->sfl_callout);
 2743         KASSERT((fl->flags & FL_STARVING) == 0,
 2744             ("%s: still starving", __func__));
 2745 }
 2746 
 2747 static int
 2748 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
 2749     driver_intr_t *handler, void *arg, char *name)
 2750 {
 2751         int rc;
 2752 
 2753         irq->rid = rid;
 2754         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
 2755             RF_SHAREABLE | RF_ACTIVE);
 2756         if (irq->res == NULL) {
 2757                 device_printf(sc->dev,
 2758                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
 2759                 return (ENOMEM);
 2760         }
 2761 
 2762         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
 2763             NULL, handler, arg, &irq->tag);
 2764         if (rc != 0) {
 2765                 device_printf(sc->dev,
 2766                     "failed to setup interrupt for rid %d, name %s: %d\n",
 2767                     rid, name, rc);
 2768         } else if (name)
 2769                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
 2770 
 2771         return (rc);
 2772 }
 2773 
 2774 static int
 2775 t4_free_irq(struct adapter *sc, struct irq *irq)
 2776 {
 2777         if (irq->tag)
 2778                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
 2779         if (irq->res)
 2780                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
 2781 
 2782         bzero(irq, sizeof(*irq));
 2783 
 2784         return (0);
 2785 }
 2786 
 2787 static void
 2788 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
 2789     unsigned int end)
 2790 {
 2791         uint32_t *p = (uint32_t *)(buf + start);
 2792 
 2793         for ( ; start <= end; start += sizeof(uint32_t))
 2794                 *p++ = t4_read_reg(sc, start);
 2795 }
 2796 
 2797 static void
 2798 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
 2799 {
 2800         int i;
 2801         static const unsigned int reg_ranges[] = {
 2802                 0x1008, 0x1108,
 2803                 0x1180, 0x11b4,
 2804                 0x11fc, 0x123c,
 2805                 0x1300, 0x173c,
 2806                 0x1800, 0x18fc,
 2807                 0x3000, 0x30d8,
 2808                 0x30e0, 0x5924,
 2809                 0x5960, 0x59d4,
 2810                 0x5a00, 0x5af8,
 2811                 0x6000, 0x6098,
 2812                 0x6100, 0x6150,
 2813                 0x6200, 0x6208,
 2814                 0x6240, 0x6248,
 2815                 0x6280, 0x6338,
 2816                 0x6370, 0x638c,
 2817                 0x6400, 0x643c,
 2818                 0x6500, 0x6524,
 2819                 0x6a00, 0x6a38,
 2820                 0x6a60, 0x6a78,
 2821                 0x6b00, 0x6b84,
 2822                 0x6bf0, 0x6c84,
 2823                 0x6cf0, 0x6d84,
 2824                 0x6df0, 0x6e84,
 2825                 0x6ef0, 0x6f84,
 2826                 0x6ff0, 0x7084,
 2827                 0x70f0, 0x7184,
 2828                 0x71f0, 0x7284,
 2829                 0x72f0, 0x7384,
 2830                 0x73f0, 0x7450,
 2831                 0x7500, 0x7530,
 2832                 0x7600, 0x761c,
 2833                 0x7680, 0x76cc,
 2834                 0x7700, 0x7798,
 2835                 0x77c0, 0x77fc,
 2836                 0x7900, 0x79fc,
 2837                 0x7b00, 0x7c38,
 2838                 0x7d00, 0x7efc,
 2839                 0x8dc0, 0x8e1c,
 2840                 0x8e30, 0x8e78,
 2841                 0x8ea0, 0x8f6c,
 2842                 0x8fc0, 0x9074,
 2843                 0x90fc, 0x90fc,
 2844                 0x9400, 0x9458,
 2845                 0x9600, 0x96bc,
 2846                 0x9800, 0x9808,
 2847                 0x9820, 0x983c,
 2848                 0x9850, 0x9864,
 2849                 0x9c00, 0x9c6c,
 2850                 0x9c80, 0x9cec,
 2851                 0x9d00, 0x9d6c,
 2852                 0x9d80, 0x9dec,
 2853                 0x9e00, 0x9e6c,
 2854                 0x9e80, 0x9eec,
 2855                 0x9f00, 0x9f6c,
 2856                 0x9f80, 0x9fec,
 2857                 0xd004, 0xd03c,
 2858                 0xdfc0, 0xdfe0,
 2859                 0xe000, 0xea7c,
 2860                 0xf000, 0x11190,
 2861                 0x19040, 0x1906c,
 2862                 0x19078, 0x19080,
 2863                 0x1908c, 0x19124,
 2864                 0x19150, 0x191b0,
 2865                 0x191d0, 0x191e8,
 2866                 0x19238, 0x1924c,
 2867                 0x193f8, 0x19474,
 2868                 0x19490, 0x194f8,
 2869                 0x19800, 0x19f30,
 2870                 0x1a000, 0x1a06c,
 2871                 0x1a0b0, 0x1a120,
 2872                 0x1a128, 0x1a138,
 2873                 0x1a190, 0x1a1c4,
 2874                 0x1a1fc, 0x1a1fc,
 2875                 0x1e040, 0x1e04c,
 2876                 0x1e284, 0x1e28c,
 2877                 0x1e2c0, 0x1e2c0,
 2878                 0x1e2e0, 0x1e2e0,
 2879                 0x1e300, 0x1e384,
 2880                 0x1e3c0, 0x1e3c8,
 2881                 0x1e440, 0x1e44c,
 2882                 0x1e684, 0x1e68c,
 2883                 0x1e6c0, 0x1e6c0,
 2884                 0x1e6e0, 0x1e6e0,
 2885                 0x1e700, 0x1e784,
 2886                 0x1e7c0, 0x1e7c8,
 2887                 0x1e840, 0x1e84c,
 2888                 0x1ea84, 0x1ea8c,
 2889                 0x1eac0, 0x1eac0,
 2890                 0x1eae0, 0x1eae0,
 2891                 0x1eb00, 0x1eb84,
 2892                 0x1ebc0, 0x1ebc8,
 2893                 0x1ec40, 0x1ec4c,
 2894                 0x1ee84, 0x1ee8c,
 2895                 0x1eec0, 0x1eec0,
 2896                 0x1eee0, 0x1eee0,
 2897                 0x1ef00, 0x1ef84,
 2898                 0x1efc0, 0x1efc8,
 2899                 0x1f040, 0x1f04c,
 2900                 0x1f284, 0x1f28c,
 2901                 0x1f2c0, 0x1f2c0,
 2902                 0x1f2e0, 0x1f2e0,
 2903                 0x1f300, 0x1f384,
 2904                 0x1f3c0, 0x1f3c8,
 2905                 0x1f440, 0x1f44c,
 2906                 0x1f684, 0x1f68c,
 2907                 0x1f6c0, 0x1f6c0,
 2908                 0x1f6e0, 0x1f6e0,
 2909                 0x1f700, 0x1f784,
 2910                 0x1f7c0, 0x1f7c8,
 2911                 0x1f840, 0x1f84c,
 2912                 0x1fa84, 0x1fa8c,
 2913                 0x1fac0, 0x1fac0,
 2914                 0x1fae0, 0x1fae0,
 2915                 0x1fb00, 0x1fb84,
 2916                 0x1fbc0, 0x1fbc8,
 2917                 0x1fc40, 0x1fc4c,
 2918                 0x1fe84, 0x1fe8c,
 2919                 0x1fec0, 0x1fec0,
 2920                 0x1fee0, 0x1fee0,
 2921                 0x1ff00, 0x1ff84,
 2922                 0x1ffc0, 0x1ffc8,
 2923                 0x20000, 0x2002c,
 2924                 0x20100, 0x2013c,
 2925                 0x20190, 0x201c8,
 2926                 0x20200, 0x20318,
 2927                 0x20400, 0x20528,
 2928                 0x20540, 0x20614,
 2929                 0x21000, 0x21040,
 2930                 0x2104c, 0x21060,
 2931                 0x210c0, 0x210ec,
 2932                 0x21200, 0x21268,
 2933                 0x21270, 0x21284,
 2934                 0x212fc, 0x21388,
 2935                 0x21400, 0x21404,
 2936                 0x21500, 0x21518,
 2937                 0x2152c, 0x2153c,
 2938                 0x21550, 0x21554,
 2939                 0x21600, 0x21600,
 2940                 0x21608, 0x21628,
 2941                 0x21630, 0x2163c,
 2942                 0x21700, 0x2171c,
 2943                 0x21780, 0x2178c,
 2944                 0x21800, 0x21c38,
 2945                 0x21c80, 0x21d7c,
 2946                 0x21e00, 0x21e04,
 2947                 0x22000, 0x2202c,
 2948                 0x22100, 0x2213c,
 2949                 0x22190, 0x221c8,
 2950                 0x22200, 0x22318,
 2951                 0x22400, 0x22528,
 2952                 0x22540, 0x22614,
 2953                 0x23000, 0x23040,
 2954                 0x2304c, 0x23060,
 2955                 0x230c0, 0x230ec,
 2956                 0x23200, 0x23268,
 2957                 0x23270, 0x23284,
 2958                 0x232fc, 0x23388,
 2959                 0x23400, 0x23404,
 2960                 0x23500, 0x23518,
 2961                 0x2352c, 0x2353c,
 2962                 0x23550, 0x23554,
 2963                 0x23600, 0x23600,
 2964                 0x23608, 0x23628,
 2965                 0x23630, 0x2363c,
 2966                 0x23700, 0x2371c,
 2967                 0x23780, 0x2378c,
 2968                 0x23800, 0x23c38,
 2969                 0x23c80, 0x23d7c,
 2970                 0x23e00, 0x23e04,
 2971                 0x24000, 0x2402c,
 2972                 0x24100, 0x2413c,
 2973                 0x24190, 0x241c8,
 2974                 0x24200, 0x24318,
 2975                 0x24400, 0x24528,
 2976                 0x24540, 0x24614,
 2977                 0x25000, 0x25040,
 2978                 0x2504c, 0x25060,
 2979                 0x250c0, 0x250ec,
 2980                 0x25200, 0x25268,
 2981                 0x25270, 0x25284,
 2982                 0x252fc, 0x25388,
 2983                 0x25400, 0x25404,
 2984                 0x25500, 0x25518,
 2985                 0x2552c, 0x2553c,
 2986                 0x25550, 0x25554,
 2987                 0x25600, 0x25600,
 2988                 0x25608, 0x25628,
 2989                 0x25630, 0x2563c,
 2990                 0x25700, 0x2571c,
 2991                 0x25780, 0x2578c,
 2992                 0x25800, 0x25c38,
 2993                 0x25c80, 0x25d7c,
 2994                 0x25e00, 0x25e04,
 2995                 0x26000, 0x2602c,
 2996                 0x26100, 0x2613c,
 2997                 0x26190, 0x261c8,
 2998                 0x26200, 0x26318,
 2999                 0x26400, 0x26528,
 3000                 0x26540, 0x26614,
 3001                 0x27000, 0x27040,
 3002                 0x2704c, 0x27060,
 3003                 0x270c0, 0x270ec,
 3004                 0x27200, 0x27268,
 3005                 0x27270, 0x27284,
 3006                 0x272fc, 0x27388,
 3007                 0x27400, 0x27404,
 3008                 0x27500, 0x27518,
 3009                 0x2752c, 0x2753c,
 3010                 0x27550, 0x27554,
 3011                 0x27600, 0x27600,
 3012                 0x27608, 0x27628,
 3013                 0x27630, 0x2763c,
 3014                 0x27700, 0x2771c,
 3015                 0x27780, 0x2778c,
 3016                 0x27800, 0x27c38,
 3017                 0x27c80, 0x27d7c,
 3018                 0x27e00, 0x27e04
 3019         };
 3020 
 3021         regs->version = 4 | (sc->params.rev << 10);
 3022         for (i = 0; i < nitems(reg_ranges); i += 2)
 3023                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
 3024 }
 3025 
 3026 static void
 3027 cxgbe_tick(void *arg)
 3028 {
 3029         struct port_info *pi = arg;
 3030         struct ifnet *ifp = pi->ifp;
 3031         struct sge_txq *txq;
 3032         int i, drops;
 3033         struct port_stats *s = &pi->stats;
 3034 
 3035         PORT_LOCK(pi);
 3036         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 3037                 PORT_UNLOCK(pi);
 3038                 return; /* without scheduling another callout */
 3039         }
 3040 
 3041         t4_get_port_stats(pi->adapter, pi->tx_chan, s);
 3042 
 3043         ifp->if_opackets = s->tx_frames - s->tx_pause;
 3044         ifp->if_ipackets = s->rx_frames - s->rx_pause;
 3045         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
 3046         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
 3047         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
 3048         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
 3049         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
 3050             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
 3051             s->rx_trunc3;
 3052 
 3053         drops = s->tx_drop;
 3054         for_each_txq(pi, i, txq)
 3055                 drops += txq->br->br_drops;
 3056         ifp->if_snd.ifq_drops = drops;
 3057 
 3058         ifp->if_oerrors = s->tx_error_frames;
 3059         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
 3060             s->rx_fcs_err + s->rx_len_err;
 3061 
 3062         callout_schedule(&pi->tick, hz);
 3063         PORT_UNLOCK(pi);
 3064 }
 3065 
 3066 static int
 3067 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 3068 {
 3069 
 3070 #ifdef INVARIANTS
 3071         panic("%s: opcode 0x%02x on iq %p with payload %p",
 3072             __func__, rss->opcode, iq, m);
 3073 #else
 3074         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
 3075             __func__, rss->opcode, iq, m);
 3076         m_freem(m);
 3077 #endif
 3078         return (EDOOFUS);
 3079 }
 3080 
 3081 int
 3082 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
 3083 {
 3084         uintptr_t *loc, new;
 3085 
 3086         if (opcode >= nitems(sc->cpl_handler))
 3087                 return (EINVAL);
 3088 
 3089         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
 3090         loc = (uintptr_t *) &sc->cpl_handler[opcode];
 3091         atomic_store_rel_ptr(loc, new);
 3092 
 3093         return (0);
 3094 }
 3095 
 3096 static int
 3097 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
 3098 {
 3099 
 3100 #ifdef INVARIANTS
 3101         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
 3102 #else
 3103         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
 3104             __func__, iq, ctrl);
 3105 #endif
 3106         return (EDOOFUS);
 3107 }
 3108 
 3109 int
 3110 t4_register_an_handler(struct adapter *sc, an_handler_t h)
 3111 {
 3112         uintptr_t *loc, new;
 3113 
 3114         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
 3115         loc = (uintptr_t *) &sc->an_handler;
 3116         atomic_store_rel_ptr(loc, new);
 3117 
 3118         return (0);
 3119 }
 3120 
 3121 static int
 3122 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
 3123 {
 3124         __be64 *r = __DECONST(__be64 *, rpl);
 3125         struct cpl_fw6_msg *cpl = member2struct(cpl_fw6_msg, data, r);
 3126 
 3127 #ifdef INVARIANTS
 3128         panic("%s: fw_msg type %d", __func__, cpl->type);
 3129 #else
 3130         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
 3131 #endif
 3132         return (EDOOFUS);
 3133 }
 3134 
 3135 int
 3136 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
 3137 {
 3138         uintptr_t *loc, new;
 3139 
 3140         if (type >= nitems(sc->fw_msg_handler))
 3141                 return (EINVAL);
 3142 
 3143         /*
 3144          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
 3145          * handler dispatch table.  Reject any attempt to install a handler for
 3146          * this subtype.
 3147          */
 3148         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
 3149                 return (EINVAL);
 3150 
 3151         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
 3152         loc = (uintptr_t *) &sc->fw_msg_handler[type];
 3153         atomic_store_rel_ptr(loc, new);
 3154 
 3155         return (0);
 3156 }
 3157 
 3158 static int
 3159 t4_sysctls(struct adapter *sc)
 3160 {
 3161         struct sysctl_ctx_list *ctx;
 3162         struct sysctl_oid *oid;
 3163         struct sysctl_oid_list *children, *c0;
 3164         static char *caps[] = {
 3165                 "\2\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
 3166                 "\2\1NIC\2VM\3IDS\4UM\5UM_ISGL",       /* caps[1] niccaps */
 3167                 "\2\1TOE",                             /* caps[2] toecaps */
 3168                 "\2\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
 3169                 "\2\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
 3170                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
 3171                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
 3172                 "\2\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
 3173         };
 3174 
 3175         ctx = device_get_sysctl_ctx(sc->dev);
 3176 
 3177         /*
 3178          * dev.t4nex.X.
 3179          */
 3180         oid = device_get_sysctl_tree(sc->dev);
 3181         c0 = children = SYSCTL_CHILDREN(oid);
 3182 
 3183         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
 3184             &sc->params.nports, 0, "# of ports");
 3185 
 3186         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
 3187             &sc->params.rev, 0, "chip hardware revision");
 3188 
 3189         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
 3190             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
 3191 
 3192         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
 3193             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
 3194 
 3195         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD,
 3196             &sc->cfcsum, 0, "config file checksum");
 3197 
 3198         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
 3199             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
 3200             sysctl_bitfield, "A", "available link capabilities");
 3201 
 3202         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
 3203             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
 3204             sysctl_bitfield, "A", "available NIC capabilities");
 3205 
 3206         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
 3207             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
 3208             sysctl_bitfield, "A", "available TCP offload capabilities");
 3209 
 3210         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
 3211             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
 3212             sysctl_bitfield, "A", "available RDMA capabilities");
 3213 
 3214         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
 3215             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
 3216             sysctl_bitfield, "A", "available iSCSI capabilities");
 3217 
 3218         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
 3219             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
 3220             sysctl_bitfield, "A", "available FCoE capabilities");
 3221 
 3222         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
 3223             &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
 3224 
 3225         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
 3226             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
 3227             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
 3228             "interrupt holdoff timer values (us)");
 3229 
 3230         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
 3231             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
 3232             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
 3233             "interrupt holdoff packet counter values");
 3234 
 3235 #ifdef SBUF_DRAIN
 3236         /*
 3237          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
 3238          */
 3239         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
 3240             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
 3241             "logs and miscellaneous information");
 3242         children = SYSCTL_CHILDREN(oid);
 3243 
 3244         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
 3245             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3246             sysctl_cctrl, "A", "congestion control");
 3247 
 3248         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
 3249             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3250             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
 3251 
 3252         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
 3253             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
 3254             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
 3255 
 3256         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
 3257             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
 3258             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
 3259 
 3260         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
 3261             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
 3262             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
 3263 
 3264         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
 3265             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
 3266             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
 3267 
 3268         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
 3269             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
 3270             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
 3271 
 3272         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
 3273             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3274             sysctl_cim_la, "A", "CIM logic analyzer");
 3275 
 3276         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
 3277             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
 3278             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
 3279 
 3280         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
 3281             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
 3282             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
 3283 
 3284         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
 3285             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
 3286             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
 3287 
 3288         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
 3289             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
 3290             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
 3291 
 3292         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
 3293             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
 3294             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
 3295 
 3296         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
 3297             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
 3298             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
 3299 
 3300         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
 3301             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3302             sysctl_cim_qcfg, "A", "CIM queue configuration");
 3303 
 3304         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
 3305             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3306             sysctl_cpl_stats, "A", "CPL statistics");
 3307 
 3308         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
 3309             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3310             sysctl_ddp_stats, "A", "DDP statistics");
 3311 
 3312         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
 3313             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3314             sysctl_devlog, "A", "firmware's device log");
 3315 
 3316         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
 3317             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3318             sysctl_fcoe_stats, "A", "FCoE statistics");
 3319 
 3320         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
 3321             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3322             sysctl_hw_sched, "A", "hardware scheduler ");
 3323 
 3324         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
 3325             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3326             sysctl_l2t, "A", "hardware L2 table");
 3327 
 3328         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
 3329             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3330             sysctl_lb_stats, "A", "loopback statistics");
 3331 
 3332         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
 3333             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3334             sysctl_meminfo, "A", "memory regions");
 3335 
 3336         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
 3337             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3338             sysctl_path_mtus, "A", "path MTUs");
 3339 
 3340         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
 3341             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3342             sysctl_pm_stats, "A", "PM statistics");
 3343 
 3344         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
 3345             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3346             sysctl_rdma_stats, "A", "RDMA statistics");
 3347 
 3348         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
 3349             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3350             sysctl_tcp_stats, "A", "TCP statistics");
 3351 
 3352         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
 3353             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3354             sysctl_tids, "A", "TID information");
 3355 
 3356         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
 3357             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3358             sysctl_tp_err_stats, "A", "TP error statistics");
 3359 
 3360         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
 3361             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 3362             sysctl_tx_rate, "A", "Tx rate");
 3363 #endif
 3364 
 3365 #ifdef TCP_OFFLOAD
 3366         if (is_offload(sc)) {
 3367                 /*
 3368                  * dev.t4nex.X.toe.
 3369                  */
 3370                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
 3371                     NULL, "TOE parameters");
 3372                 children = SYSCTL_CHILDREN(oid);
 3373 
 3374                 sc->tt.sndbuf = 256 * 1024;
 3375                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
 3376                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
 3377 
 3378                 sc->tt.ddp = 0;
 3379                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
 3380                     &sc->tt.ddp, 0, "DDP allowed");
 3381 
 3382                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
 3383                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
 3384                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
 3385 
 3386                 sc->tt.ddp_thres =
 3387                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
 3388                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
 3389                     &sc->tt.ddp_thres, 0, "DDP threshold");
 3390         }
 3391 #endif
 3392 
 3393 
 3394         return (0);
 3395 }
 3396 
 3397 static int
 3398 cxgbe_sysctls(struct port_info *pi)
 3399 {
 3400         struct sysctl_ctx_list *ctx;
 3401         struct sysctl_oid *oid;
 3402         struct sysctl_oid_list *children;
 3403 
 3404         ctx = device_get_sysctl_ctx(pi->dev);
 3405 
 3406         /*
 3407          * dev.cxgbe.X.
 3408          */
 3409         oid = device_get_sysctl_tree(pi->dev);
 3410         children = SYSCTL_CHILDREN(oid);
 3411 
 3412         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
 3413             &pi->nrxq, 0, "# of rx queues");
 3414         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
 3415             &pi->ntxq, 0, "# of tx queues");
 3416         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
 3417             &pi->first_rxq, 0, "index of first rx queue");
 3418         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
 3419             &pi->first_txq, 0, "index of first tx queue");
 3420 
 3421 #ifdef TCP_OFFLOAD
 3422         if (is_offload(pi->adapter)) {
 3423                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
 3424                     &pi->nofldrxq, 0,
 3425                     "# of rx queues for offloaded TCP connections");
 3426                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
 3427                     &pi->nofldtxq, 0,
 3428                     "# of tx queues for offloaded TCP connections");
 3429                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
 3430                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
 3431                     "index of first TOE rx queue");
 3432                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
 3433                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
 3434                     "index of first TOE tx queue");
 3435         }
 3436 #endif
 3437 
 3438         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
 3439             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
 3440             "holdoff timer index");
 3441         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
 3442             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
 3443             "holdoff packet counter index");
 3444 
 3445         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
 3446             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
 3447             "rx queue size");
 3448         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
 3449             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
 3450             "tx queue size");
 3451 
 3452         /*
 3453          * dev.cxgbe.X.stats.
 3454          */
 3455         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
 3456             NULL, "port statistics");
 3457         children = SYSCTL_CHILDREN(oid);
 3458 
 3459 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
 3460         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
 3461             CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
 3462             sysctl_handle_t4_reg64, "QU", desc)
 3463 
 3464         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
 3465             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
 3466         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
 3467             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
 3468         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
 3469             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
 3470         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
 3471             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
 3472         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
 3473             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
 3474         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
 3475             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
 3476         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
 3477             "# of tx frames in this range",
 3478             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
 3479         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
 3480             "# of tx frames in this range",
 3481             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
 3482         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
 3483             "# of tx frames in this range",
 3484             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
 3485         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
 3486             "# of tx frames in this range",
 3487             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
 3488         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
 3489             "# of tx frames in this range",
 3490             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
 3491         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
 3492             "# of tx frames in this range",
 3493             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
 3494         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
 3495             "# of tx frames in this range",
 3496             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
 3497         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
 3498             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
 3499         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
 3500             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
 3501         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
 3502             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
 3503         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
 3504             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
 3505         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
 3506             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
 3507         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
 3508             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
 3509         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
 3510             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
 3511         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
 3512             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
 3513         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
 3514             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
 3515         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
 3516             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
 3517 
 3518         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
 3519             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
 3520         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
 3521             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
 3522         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
 3523             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
 3524         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
 3525             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
 3526         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
 3527             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
 3528         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
 3529             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
 3530         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
 3531             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
 3532         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
 3533             "# of frames received with bad FCS",
 3534             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
 3535         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
 3536             "# of frames received with length error",
 3537             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
 3538         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
 3539             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
 3540         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
 3541             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
 3542         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
 3543             "# of rx frames in this range",
 3544             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
 3545         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
 3546             "# of rx frames in this range",
 3547             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
 3548         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
 3549             "# of rx frames in this range",
 3550             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
 3551         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
 3552             "# of rx frames in this range",
 3553             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
 3554         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
 3555             "# of rx frames in this range",
 3556             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
 3557         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
 3558             "# of rx frames in this range",
 3559             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
 3560         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
 3561             "# of rx frames in this range",
 3562             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
 3563         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
 3564             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
 3565         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
 3566             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
 3567         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
 3568             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
 3569         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
 3570             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
 3571         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
 3572             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
 3573         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
 3574             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
 3575         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
 3576             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
 3577         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
 3578             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
 3579         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
 3580             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
 3581 
 3582 #undef SYSCTL_ADD_T4_REG64
 3583 
 3584 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
 3585         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
 3586             &pi->stats.name, desc)
 3587 
 3588         /* We get these from port_stats and they may be stale by upto 1s */
 3589         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
 3590             "# drops due to buffer-group 0 overflows");
 3591         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
 3592             "# drops due to buffer-group 1 overflows");
 3593         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
 3594             "# drops due to buffer-group 2 overflows");
 3595         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
 3596             "# drops due to buffer-group 3 overflows");
 3597         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
 3598             "# of buffer-group 0 truncated packets");
 3599         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
 3600             "# of buffer-group 1 truncated packets");
 3601         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
 3602             "# of buffer-group 2 truncated packets");
 3603         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
 3604             "# of buffer-group 3 truncated packets");
 3605 
 3606 #undef SYSCTL_ADD_T4_PORTSTAT
 3607 
 3608         return (0);
 3609 }
 3610 
 3611 static int
 3612 sysctl_int_array(SYSCTL_HANDLER_ARGS)
 3613 {
 3614         int rc, *i;
 3615         struct sbuf sb;
 3616 
 3617         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
 3618         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
 3619                 sbuf_printf(&sb, "%d ", *i);
 3620         sbuf_trim(&sb);
 3621         sbuf_finish(&sb);
 3622         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
 3623         sbuf_delete(&sb);
 3624         return (rc);
 3625 }
 3626 
 3627 static int
 3628 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
 3629 {
 3630         int rc;
 3631         struct sbuf *sb;
 3632 
 3633         rc = sysctl_wire_old_buffer(req, 0);
 3634         if (rc != 0)
 3635                 return(rc);
 3636 
 3637         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
 3638         if (sb == NULL)
 3639                 return (ENOMEM);
 3640 
 3641         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
 3642         rc = sbuf_finish(sb);
 3643         sbuf_delete(sb);
 3644 
 3645         return (rc);
 3646 }
 3647 
 3648 static int
 3649 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
 3650 {
 3651         struct port_info *pi = arg1;
 3652         struct adapter *sc = pi->adapter;
 3653         int idx, rc, i;
 3654         struct sge_rxq *rxq;
 3655         uint8_t v;
 3656 
 3657         idx = pi->tmr_idx;
 3658 
 3659         rc = sysctl_handle_int(oidp, &idx, 0, req);
 3660         if (rc != 0 || req->newptr == NULL)
 3661                 return (rc);
 3662 
 3663         if (idx < 0 || idx >= SGE_NTIMERS)
 3664                 return (EINVAL);
 3665 
 3666         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 3667             "t4tmr");
 3668         if (rc)
 3669                 return (rc);
 3670 
 3671         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
 3672         for_each_rxq(pi, i, rxq) {
 3673 #ifdef atomic_store_rel_8
 3674                 atomic_store_rel_8(&rxq->iq.intr_params, v);
 3675 #else
 3676                 rxq->iq.intr_params = v;
 3677 #endif
 3678         }
 3679         pi->tmr_idx = idx;
 3680 
 3681         end_synchronized_op(sc, LOCK_HELD);
 3682         return (0);
 3683 }
 3684 
 3685 static int
 3686 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
 3687 {
 3688         struct port_info *pi = arg1;
 3689         struct adapter *sc = pi->adapter;
 3690         int idx, rc;
 3691 
 3692         idx = pi->pktc_idx;
 3693 
 3694         rc = sysctl_handle_int(oidp, &idx, 0, req);
 3695         if (rc != 0 || req->newptr == NULL)
 3696                 return (rc);
 3697 
 3698         if (idx < -1 || idx >= SGE_NCOUNTERS)
 3699                 return (EINVAL);
 3700 
 3701         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 3702             "t4pktc");
 3703         if (rc)
 3704                 return (rc);
 3705 
 3706         if (pi->flags & PORT_INIT_DONE)
 3707                 rc = EBUSY; /* cannot be changed once the queues are created */
 3708         else
 3709                 pi->pktc_idx = idx;
 3710 
 3711         end_synchronized_op(sc, LOCK_HELD);
 3712         return (rc);
 3713 }
 3714 
 3715 static int
 3716 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
 3717 {
 3718         struct port_info *pi = arg1;
 3719         struct adapter *sc = pi->adapter;
 3720         int qsize, rc;
 3721 
 3722         qsize = pi->qsize_rxq;
 3723 
 3724         rc = sysctl_handle_int(oidp, &qsize, 0, req);
 3725         if (rc != 0 || req->newptr == NULL)
 3726                 return (rc);
 3727 
 3728         if (qsize < 128 || (qsize & 7))
 3729                 return (EINVAL);
 3730 
 3731         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 3732             "t4rxqs");
 3733         if (rc)
 3734                 return (rc);
 3735 
 3736         if (pi->flags & PORT_INIT_DONE)
 3737                 rc = EBUSY; /* cannot be changed once the queues are created */
 3738         else
 3739                 pi->qsize_rxq = qsize;
 3740 
 3741         end_synchronized_op(sc, LOCK_HELD);
 3742         return (rc);
 3743 }
 3744 
 3745 static int
 3746 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
 3747 {
 3748         struct port_info *pi = arg1;
 3749         struct adapter *sc = pi->adapter;
 3750         int qsize, rc;
 3751 
 3752         qsize = pi->qsize_txq;
 3753 
 3754         rc = sysctl_handle_int(oidp, &qsize, 0, req);
 3755         if (rc != 0 || req->newptr == NULL)
 3756                 return (rc);
 3757 
 3758         /* bufring size must be powerof2 */
 3759         if (qsize < 128 || !powerof2(qsize))
 3760                 return (EINVAL);
 3761 
 3762         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 3763             "t4txqs");
 3764         if (rc)
 3765                 return (rc);
 3766 
 3767         if (pi->flags & PORT_INIT_DONE)
 3768                 rc = EBUSY; /* cannot be changed once the queues are created */
 3769         else
 3770                 pi->qsize_txq = qsize;
 3771 
 3772         end_synchronized_op(sc, LOCK_HELD);
 3773         return (rc);
 3774 }
 3775 
 3776 static int
 3777 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
 3778 {
 3779         struct adapter *sc = arg1;
 3780         int reg = arg2;
 3781         uint64_t val;
 3782 
 3783         val = t4_read_reg64(sc, reg);
 3784 
 3785         return (sysctl_handle_64(oidp, &val, 0, req));
 3786 }
 3787 
 3788 #ifdef SBUF_DRAIN
 3789 static int
 3790 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
 3791 {
 3792         struct adapter *sc = arg1;
 3793         struct sbuf *sb;
 3794         int rc, i;
 3795         uint16_t incr[NMTUS][NCCTRL_WIN];
 3796         static const char *dec_fac[] = {
 3797                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
 3798                 "0.9375"
 3799         };
 3800 
 3801         rc = sysctl_wire_old_buffer(req, 0);
 3802         if (rc != 0)
 3803                 return (rc);
 3804 
 3805         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
 3806         if (sb == NULL)
 3807                 return (ENOMEM);
 3808 
 3809         t4_read_cong_tbl(sc, incr);
 3810 
 3811         for (i = 0; i < NCCTRL_WIN; ++i) {
 3812                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
 3813                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
 3814                     incr[5][i], incr[6][i], incr[7][i]);
 3815                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
 3816                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
 3817                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
 3818                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
 3819         }
 3820 
 3821         rc = sbuf_finish(sb);
 3822         sbuf_delete(sb);
 3823 
 3824         return (rc);
 3825 }
 3826 
 3827 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ] = {
 3828         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
 3829         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI"  /* obq's */
 3830 };
 3831 
 3832 static int
 3833 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
 3834 {
 3835         struct adapter *sc = arg1;
 3836         struct sbuf *sb;
 3837         int rc, i, n, qid = arg2;
 3838         uint32_t *buf, *p;
 3839         char *qtype;
 3840 
 3841         KASSERT(qid >= 0 && qid < nitems(qname),
 3842             ("%s: bad qid %d\n", __func__, qid));
 3843 
 3844         if (qid < CIM_NUM_IBQ) {
 3845                 /* inbound queue */
 3846                 qtype = "IBQ";
 3847                 n = 4 * CIM_IBQ_SIZE;
 3848                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
 3849                 rc = t4_read_cim_ibq(sc, qid, buf, n);
 3850         } else {
 3851                 /* outbound queue */
 3852                 qtype = "OBQ";
 3853                 qid -= CIM_NUM_IBQ;
 3854                 n = 4 * 6 * CIM_OBQ_SIZE;
 3855                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
 3856                 rc = t4_read_cim_obq(sc, qid, buf, n);
 3857         }
 3858 
 3859         if (rc < 0) {
 3860                 rc = -rc;
 3861                 goto done;
 3862         }
 3863         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
 3864 
 3865         rc = sysctl_wire_old_buffer(req, 0);
 3866         if (rc != 0)
 3867                 goto done;
 3868 
 3869         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
 3870         if (sb == NULL) {
 3871                 rc = ENOMEM;
 3872                 goto done;
 3873         }
 3874 
 3875         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
 3876         for (i = 0, p = buf; i < n; i += 16, p += 4)
 3877                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
 3878                     p[2], p[3]);
 3879 
 3880         rc = sbuf_finish(sb);
 3881         sbuf_delete(sb);
 3882 done:
 3883         free(buf, M_CXGBE);
 3884         return (rc);
 3885 }
 3886 
 3887 static int
 3888 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
 3889 {
 3890         struct adapter *sc = arg1;
 3891         u_int cfg;
 3892         struct sbuf *sb;
 3893         uint32_t *buf, *p;
 3894         int rc;
 3895 
 3896         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
 3897         if (rc != 0)
 3898                 return (rc);
 3899 
 3900         rc = sysctl_wire_old_buffer(req, 0);
 3901         if (rc != 0)
 3902                 return (rc);
 3903 
 3904         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
 3905         if (sb == NULL)
 3906                 return (ENOMEM);
 3907 
 3908         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
 3909             M_ZERO | M_WAITOK);
 3910 
 3911         rc = -t4_cim_read_la(sc, buf, NULL);
 3912         if (rc != 0)
 3913                 goto done;
 3914 
 3915         sbuf_printf(sb, "Status   Data      PC%s",
 3916             cfg & F_UPDBGLACAPTPCONLY ? "" :
 3917             "     LS0Stat  LS0Addr             LS0Data");
 3918 
 3919         KASSERT((sc->params.cim_la_size & 7) == 0,
 3920             ("%s: p will walk off the end of buf", __func__));
 3921 
 3922         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
 3923                 if (cfg & F_UPDBGLACAPTPCONLY) {
 3924                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
 3925                             p[6], p[7]);
 3926                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
 3927                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
 3928                             p[4] & 0xff, p[5] >> 8);
 3929                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
 3930                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
 3931                             p[1] & 0xf, p[2] >> 4);
 3932                 } else {
 3933                         sbuf_printf(sb,
 3934                             "\n  %02x   %x%07x %x%07x %08x %08x "
 3935                             "%08x%08x%08x%08x",
 3936                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
 3937                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
 3938                             p[6], p[7]);
 3939                 }
 3940         }
 3941 
 3942         rc = sbuf_finish(sb);
 3943         sbuf_delete(sb);
 3944 done:
 3945         free(buf, M_CXGBE);
 3946         return (rc);
 3947 }
 3948 
 3949 static int
 3950 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
 3951 {
 3952         struct adapter *sc = arg1;
 3953         struct sbuf *sb;
 3954         int rc, i;
 3955         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ];
 3956         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ];
 3957         uint16_t thres[CIM_NUM_IBQ];
 3958         uint32_t obq_wr[2 * CIM_NUM_OBQ], *wr = obq_wr;
 3959         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ)], *p = stat;
 3960 
 3961         rc = -t4_cim_read(sc, A_UP_IBQ_0_RDADDR, nitems(stat), stat);
 3962         if (rc == 0)
 3963                 rc = -t4_cim_read(sc, A_UP_OBQ_0_REALADDR, nitems(obq_wr),
 3964                     obq_wr);
 3965         if (rc != 0)
 3966                 return (rc);
 3967 
 3968         t4_read_cimq_cfg(sc, base, size, thres);
 3969 
 3970         rc = sysctl_wire_old_buffer(req, 0);
 3971         if (rc != 0)
 3972                 return (rc);
 3973 
 3974         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
 3975         if (sb == NULL)
 3976                 return (ENOMEM);
 3977 
 3978         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
 3979 
 3980         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
 3981                 sbuf_printf(sb, "\n%5s %5x %5u %4u %6x  %4x %4u %4u %5u",
 3982                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
 3983                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
 3984                     G_QUEREMFLITS(p[2]) * 16);
 3985         for ( ; i < CIM_NUM_IBQ + CIM_NUM_OBQ; i++, p += 4, wr += 2)
 3986                 sbuf_printf(sb, "\n%5s %5x %5u %11x  %4x %4u %4u %5u", qname[i],
 3987                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
 3988                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
 3989                     G_QUEREMFLITS(p[2]) * 16);
 3990 
 3991         rc = sbuf_finish(sb);
 3992         sbuf_delete(sb);
 3993 
 3994         return (rc);
 3995 }
 3996 
 3997 static int
 3998 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
 3999 {
 4000         struct adapter *sc = arg1;
 4001         struct sbuf *sb;
 4002         int rc;
 4003         struct tp_cpl_stats stats;
 4004 
 4005         rc = sysctl_wire_old_buffer(req, 0);
 4006         if (rc != 0)
 4007                 return (rc);
 4008 
 4009         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4010         if (sb == NULL)
 4011                 return (ENOMEM);
 4012 
 4013         t4_tp_get_cpl_stats(sc, &stats);
 4014 
 4015         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
 4016             "channel 3\n");
 4017         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
 4018                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
 4019         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
 4020                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
 4021 
 4022         rc = sbuf_finish(sb);
 4023         sbuf_delete(sb);
 4024 
 4025         return (rc);
 4026 }
 4027 
 4028 static int
 4029 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
 4030 {
 4031         struct adapter *sc = arg1;
 4032         struct sbuf *sb;
 4033         int rc;
 4034         struct tp_usm_stats stats;
 4035 
 4036         rc = sysctl_wire_old_buffer(req, 0);
 4037         if (rc != 0)
 4038                 return(rc);
 4039 
 4040         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4041         if (sb == NULL)
 4042                 return (ENOMEM);
 4043 
 4044         t4_get_usm_stats(sc, &stats);
 4045 
 4046         sbuf_printf(sb, "Frames: %u\n", stats.frames);
 4047         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
 4048         sbuf_printf(sb, "Drops:  %u", stats.drops);
 4049 
 4050         rc = sbuf_finish(sb);
 4051         sbuf_delete(sb);
 4052 
 4053         return (rc);
 4054 }
 4055 
 4056 const char *devlog_level_strings[] = {
 4057         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
 4058         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
 4059         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
 4060         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
 4061         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
 4062         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
 4063 };
 4064 
 4065 const char *devlog_facility_strings[] = {
 4066         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
 4067         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
 4068         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
 4069         [FW_DEVLOG_FACILITY_RES]        = "RES",
 4070         [FW_DEVLOG_FACILITY_HW]         = "HW",
 4071         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
 4072         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
 4073         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
 4074         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
 4075         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
 4076         [FW_DEVLOG_FACILITY_VI]         = "VI",
 4077         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
 4078         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
 4079         [FW_DEVLOG_FACILITY_TM]         = "TM",
 4080         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
 4081         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
 4082         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
 4083         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
 4084         [FW_DEVLOG_FACILITY_RI]         = "RI",
 4085         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
 4086         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
 4087         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
 4088         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
 4089 };
 4090 
 4091 static int
 4092 sysctl_devlog(SYSCTL_HANDLER_ARGS)
 4093 {
 4094         struct adapter *sc = arg1;
 4095         struct devlog_params *dparams = &sc->params.devlog;
 4096         struct fw_devlog_e *buf, *e;
 4097         int i, j, rc, nentries, first = 0;
 4098         struct sbuf *sb;
 4099         uint64_t ftstamp = UINT64_MAX;
 4100 
 4101         if (dparams->start == 0)
 4102                 return (ENXIO);
 4103 
 4104         nentries = dparams->size / sizeof(struct fw_devlog_e);
 4105 
 4106         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
 4107         if (buf == NULL)
 4108                 return (ENOMEM);
 4109 
 4110         rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
 4111             (void *)buf);
 4112         if (rc != 0)
 4113                 goto done;
 4114 
 4115         for (i = 0; i < nentries; i++) {
 4116                 e = &buf[i];
 4117 
 4118                 if (e->timestamp == 0)
 4119                         break;  /* end */
 4120 
 4121                 e->timestamp = be64toh(e->timestamp);
 4122                 e->seqno = be32toh(e->seqno);
 4123                 for (j = 0; j < 8; j++)
 4124                         e->params[j] = be32toh(e->params[j]);
 4125 
 4126                 if (e->timestamp < ftstamp) {
 4127                         ftstamp = e->timestamp;
 4128                         first = i;
 4129                 }
 4130         }
 4131 
 4132         if (buf[first].timestamp == 0)
 4133                 goto done;      /* nothing in the log */
 4134 
 4135         rc = sysctl_wire_old_buffer(req, 0);
 4136         if (rc != 0)
 4137                 goto done;
 4138 
 4139         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
 4140         if (sb == NULL) {
 4141                 rc = ENOMEM;
 4142                 goto done;
 4143         }
 4144         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
 4145             "Seq#", "Tstamp", "Level", "Facility", "Message");
 4146 
 4147         i = first;
 4148         do {
 4149                 e = &buf[i];
 4150                 if (e->timestamp == 0)
 4151                         break;  /* end */
 4152 
 4153                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
 4154                     e->seqno, e->timestamp,
 4155                     (e->level < nitems(devlog_level_strings) ?
 4156                         devlog_level_strings[e->level] : "UNKNOWN"),
 4157                     (e->facility < nitems(devlog_facility_strings) ?
 4158                         devlog_facility_strings[e->facility] : "UNKNOWN"));
 4159                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
 4160                     e->params[2], e->params[3], e->params[4],
 4161                     e->params[5], e->params[6], e->params[7]);
 4162 
 4163                 if (++i == nentries)
 4164                         i = 0;
 4165         } while (i != first);
 4166 
 4167         rc = sbuf_finish(sb);
 4168         sbuf_delete(sb);
 4169 done:
 4170         free(buf, M_CXGBE);
 4171         return (rc);
 4172 }
 4173 
 4174 static int
 4175 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
 4176 {
 4177         struct adapter *sc = arg1;
 4178         struct sbuf *sb;
 4179         int rc;
 4180         struct tp_fcoe_stats stats[4];
 4181 
 4182         rc = sysctl_wire_old_buffer(req, 0);
 4183         if (rc != 0)
 4184                 return (rc);
 4185 
 4186         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4187         if (sb == NULL)
 4188                 return (ENOMEM);
 4189 
 4190         t4_get_fcoe_stats(sc, 0, &stats[0]);
 4191         t4_get_fcoe_stats(sc, 1, &stats[1]);
 4192         t4_get_fcoe_stats(sc, 2, &stats[2]);
 4193         t4_get_fcoe_stats(sc, 3, &stats[3]);
 4194 
 4195         sbuf_printf(sb, "                   channel 0        channel 1        "
 4196             "channel 2        channel 3\n");
 4197         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
 4198             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
 4199             stats[3].octetsDDP);
 4200         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
 4201             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
 4202         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
 4203             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
 4204             stats[3].framesDrop);
 4205 
 4206         rc = sbuf_finish(sb);
 4207         sbuf_delete(sb);
 4208 
 4209         return (rc);
 4210 }
 4211 
 4212 static int
 4213 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
 4214 {
 4215         struct adapter *sc = arg1;
 4216         struct sbuf *sb;
 4217         int rc, i;
 4218         unsigned int map, kbps, ipg, mode;
 4219         unsigned int pace_tab[NTX_SCHED];
 4220 
 4221         rc = sysctl_wire_old_buffer(req, 0);
 4222         if (rc != 0)
 4223                 return (rc);
 4224 
 4225         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4226         if (sb == NULL)
 4227                 return (ENOMEM);
 4228 
 4229         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
 4230         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
 4231         t4_read_pace_tbl(sc, pace_tab);
 4232 
 4233         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
 4234             "Class IPG (0.1 ns)   Flow IPG (us)");
 4235 
 4236         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
 4237                 t4_get_tx_sched(sc, i, &kbps, &ipg);
 4238                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
 4239                     (mode & (1 << i)) ? "flow" : "class", map & 3);
 4240                 if (kbps)
 4241                         sbuf_printf(sb, "%9u     ", kbps);
 4242                 else
 4243                         sbuf_printf(sb, " disabled     ");
 4244 
 4245                 if (ipg)
 4246                         sbuf_printf(sb, "%13u        ", ipg);
 4247                 else
 4248                         sbuf_printf(sb, "     disabled        ");
 4249 
 4250                 if (pace_tab[i])
 4251                         sbuf_printf(sb, "%10u", pace_tab[i]);
 4252                 else
 4253                         sbuf_printf(sb, "  disabled");
 4254         }
 4255 
 4256         rc = sbuf_finish(sb);
 4257         sbuf_delete(sb);
 4258 
 4259         return (rc);
 4260 }
 4261 
 4262 static int
 4263 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
 4264 {
 4265         struct adapter *sc = arg1;
 4266         struct sbuf *sb;
 4267         int rc, i, j;
 4268         uint64_t *p0, *p1;
 4269         struct lb_port_stats s[2];
 4270         static const char *stat_name[] = {
 4271                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
 4272                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
 4273                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
 4274                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
 4275                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
 4276                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
 4277                 "BG2FramesTrunc:", "BG3FramesTrunc:"
 4278         };
 4279 
 4280         rc = sysctl_wire_old_buffer(req, 0);
 4281         if (rc != 0)
 4282                 return (rc);
 4283 
 4284         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
 4285         if (sb == NULL)
 4286                 return (ENOMEM);
 4287 
 4288         memset(s, 0, sizeof(s));
 4289 
 4290         for (i = 0; i < 4; i += 2) {
 4291                 t4_get_lb_stats(sc, i, &s[0]);
 4292                 t4_get_lb_stats(sc, i + 1, &s[1]);
 4293 
 4294                 p0 = &s[0].octets;
 4295                 p1 = &s[1].octets;
 4296                 sbuf_printf(sb, "%s                       Loopback %u"
 4297                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
 4298 
 4299                 for (j = 0; j < nitems(stat_name); j++)
 4300                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
 4301                                    *p0++, *p1++);
 4302         }
 4303 
 4304         rc = sbuf_finish(sb);
 4305         sbuf_delete(sb);
 4306 
 4307         return (rc);
 4308 }
 4309 
 4310 struct mem_desc {
 4311         unsigned int base;
 4312         unsigned int limit;
 4313         unsigned int idx;
 4314 };
 4315 
 4316 static int
 4317 mem_desc_cmp(const void *a, const void *b)
 4318 {
 4319         return ((const struct mem_desc *)a)->base -
 4320                ((const struct mem_desc *)b)->base;
 4321 }
 4322 
 4323 static void
 4324 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
 4325     unsigned int to)
 4326 {
 4327         unsigned int size;
 4328 
 4329         size = to - from + 1;
 4330         if (size == 0)
 4331                 return;
 4332 
 4333         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
 4334         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
 4335 }
 4336 
 4337 static int
 4338 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
 4339 {
 4340         struct adapter *sc = arg1;
 4341         struct sbuf *sb;
 4342         int rc, i, n;
 4343         uint32_t lo, hi;
 4344         static const char *memory[] = { "EDC0:", "EDC1:", "MC:" };
 4345         static const char *region[] = {
 4346                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
 4347                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
 4348                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
 4349                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
 4350                 "RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:",
 4351                 "ULPTX state:", "On-chip queues:"
 4352         };
 4353         struct mem_desc avail[3];
 4354         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
 4355         struct mem_desc *md = mem;
 4356 
 4357         rc = sysctl_wire_old_buffer(req, 0);
 4358         if (rc != 0)
 4359                 return (rc);
 4360 
 4361         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
 4362         if (sb == NULL)
 4363                 return (ENOMEM);
 4364 
 4365         for (i = 0; i < nitems(mem); i++) {
 4366                 mem[i].limit = 0;
 4367                 mem[i].idx = i;
 4368         }
 4369 
 4370         /* Find and sort the populated memory ranges */
 4371         i = 0;
 4372         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
 4373         if (lo & F_EDRAM0_ENABLE) {
 4374                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
 4375                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
 4376                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
 4377                 avail[i].idx = 0;
 4378                 i++;
 4379         }
 4380         if (lo & F_EDRAM1_ENABLE) {
 4381                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
 4382                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
 4383                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
 4384                 avail[i].idx = 1;
 4385                 i++;
 4386         }
 4387         if (lo & F_EXT_MEM_ENABLE) {
 4388                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
 4389                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
 4390                 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
 4391                 avail[i].idx = 2;
 4392                 i++;
 4393         }
 4394         if (!i)                                    /* no memory available */
 4395                 return 0;
 4396         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
 4397 
 4398         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
 4399         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
 4400         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
 4401         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
 4402         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
 4403         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
 4404         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
 4405         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
 4406         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
 4407 
 4408         /* the next few have explicit upper bounds */
 4409         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
 4410         md->limit = md->base - 1 +
 4411                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
 4412                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
 4413         md++;
 4414 
 4415         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
 4416         md->limit = md->base - 1 +
 4417                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
 4418                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
 4419         md++;
 4420 
 4421         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
 4422                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
 4423                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
 4424                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
 4425         } else {
 4426                 md->base = 0;
 4427                 md->idx = nitems(region);  /* hide it */
 4428         }
 4429         md++;
 4430 
 4431 #define ulp_region(reg) \
 4432         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
 4433         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
 4434 
 4435         ulp_region(RX_ISCSI);
 4436         ulp_region(RX_TDDP);
 4437         ulp_region(TX_TPT);
 4438         ulp_region(RX_STAG);
 4439         ulp_region(RX_RQ);
 4440         ulp_region(RX_RQUDP);
 4441         ulp_region(RX_PBL);
 4442         ulp_region(TX_PBL);
 4443 #undef ulp_region
 4444 
 4445         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
 4446         md->limit = md->base + sc->tids.ntids - 1;
 4447         md++;
 4448         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
 4449         md->limit = md->base + sc->tids.ntids - 1;
 4450         md++;
 4451 
 4452         md->base = sc->vres.ocq.start;
 4453         if (sc->vres.ocq.size)
 4454                 md->limit = md->base + sc->vres.ocq.size - 1;
 4455         else
 4456                 md->idx = nitems(region);  /* hide it */
 4457         md++;
 4458 
 4459         /* add any address-space holes, there can be up to 3 */
 4460         for (n = 0; n < i - 1; n++)
 4461                 if (avail[n].limit < avail[n + 1].base)
 4462                         (md++)->base = avail[n].limit;
 4463         if (avail[n].limit)
 4464                 (md++)->base = avail[n].limit;
 4465 
 4466         n = md - mem;
 4467         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
 4468 
 4469         for (lo = 0; lo < i; lo++)
 4470                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
 4471                                 avail[lo].limit - 1);
 4472 
 4473         sbuf_printf(sb, "\n");
 4474         for (i = 0; i < n; i++) {
 4475                 if (mem[i].idx >= nitems(region))
 4476                         continue;                        /* skip holes */
 4477                 if (!mem[i].limit)
 4478                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
 4479                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
 4480                                 mem[i].limit);
 4481         }
 4482 
 4483         sbuf_printf(sb, "\n");
 4484         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
 4485         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
 4486         mem_region_show(sb, "uP RAM:", lo, hi);
 4487 
 4488         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
 4489         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
 4490         mem_region_show(sb, "uP Extmem2:", lo, hi);
 4491 
 4492         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
 4493         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
 4494                    G_PMRXMAXPAGE(lo),
 4495                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
 4496                    (lo & F_PMRXNUMCHN) ? 2 : 1);
 4497 
 4498         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
 4499         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
 4500         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
 4501                    G_PMTXMAXPAGE(lo),
 4502                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
 4503                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
 4504         sbuf_printf(sb, "%u p-structs\n",
 4505                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
 4506 
 4507         for (i = 0; i < 4; i++) {
 4508                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
 4509                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
 4510                            i, G_USED(lo), G_ALLOC(lo));
 4511         }
 4512         for (i = 0; i < 4; i++) {
 4513                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
 4514                 sbuf_printf(sb,
 4515                            "\nLoopback %d using %u pages out of %u allocated",
 4516                            i, G_USED(lo), G_ALLOC(lo));
 4517         }
 4518 
 4519         rc = sbuf_finish(sb);
 4520         sbuf_delete(sb);
 4521 
 4522         return (rc);
 4523 }
 4524 
 4525 static int
 4526 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
 4527 {
 4528         struct adapter *sc = arg1;
 4529         struct sbuf *sb;
 4530         int rc;
 4531         uint16_t mtus[NMTUS];
 4532 
 4533         rc = sysctl_wire_old_buffer(req, 0);
 4534         if (rc != 0)
 4535                 return (rc);
 4536 
 4537         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4538         if (sb == NULL)
 4539                 return (ENOMEM);
 4540 
 4541         t4_read_mtu_tbl(sc, mtus, NULL);
 4542 
 4543         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
 4544             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
 4545             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
 4546             mtus[14], mtus[15]);
 4547 
 4548         rc = sbuf_finish(sb);
 4549         sbuf_delete(sb);
 4550 
 4551         return (rc);
 4552 }
 4553 
 4554 static int
 4555 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
 4556 {
 4557         struct adapter *sc = arg1;
 4558         struct sbuf *sb;
 4559         int rc, i;
 4560         uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
 4561         uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
 4562         static const char *pm_stats[] = {
 4563                 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
 4564         };
 4565 
 4566         rc = sysctl_wire_old_buffer(req, 0);
 4567         if (rc != 0)
 4568                 return (rc);
 4569 
 4570         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4571         if (sb == NULL)
 4572                 return (ENOMEM);
 4573 
 4574         t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
 4575         t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
 4576 
 4577         sbuf_printf(sb, "                Tx count            Tx cycles    "
 4578             "Rx count            Rx cycles");
 4579         for (i = 0; i < PM_NSTATS; i++)
 4580                 sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
 4581                     pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
 4582 
 4583         rc = sbuf_finish(sb);
 4584         sbuf_delete(sb);
 4585 
 4586         return (rc);
 4587 }
 4588 
 4589 static int
 4590 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
 4591 {
 4592         struct adapter *sc = arg1;
 4593         struct sbuf *sb;
 4594         int rc;
 4595         struct tp_rdma_stats stats;
 4596 
 4597         rc = sysctl_wire_old_buffer(req, 0);
 4598         if (rc != 0)
 4599                 return (rc);
 4600 
 4601         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4602         if (sb == NULL)
 4603                 return (ENOMEM);
 4604 
 4605         t4_tp_get_rdma_stats(sc, &stats);
 4606         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
 4607         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
 4608 
 4609         rc = sbuf_finish(sb);
 4610         sbuf_delete(sb);
 4611 
 4612         return (rc);
 4613 }
 4614 
 4615 static int
 4616 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
 4617 {
 4618         struct adapter *sc = arg1;
 4619         struct sbuf *sb;
 4620         int rc;
 4621         struct tp_tcp_stats v4, v6;
 4622 
 4623         rc = sysctl_wire_old_buffer(req, 0);
 4624         if (rc != 0)
 4625                 return (rc);
 4626 
 4627         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4628         if (sb == NULL)
 4629                 return (ENOMEM);
 4630 
 4631         t4_tp_get_tcp_stats(sc, &v4, &v6);
 4632         sbuf_printf(sb,
 4633             "                                IP                 IPv6\n");
 4634         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
 4635             v4.tcpOutRsts, v6.tcpOutRsts);
 4636         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
 4637             v4.tcpInSegs, v6.tcpInSegs);
 4638         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
 4639             v4.tcpOutSegs, v6.tcpOutSegs);
 4640         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
 4641             v4.tcpRetransSegs, v6.tcpRetransSegs);
 4642 
 4643         rc = sbuf_finish(sb);
 4644         sbuf_delete(sb);
 4645 
 4646         return (rc);
 4647 }
 4648 
 4649 static int
 4650 sysctl_tids(SYSCTL_HANDLER_ARGS)
 4651 {
 4652         struct adapter *sc = arg1;
 4653         struct sbuf *sb;
 4654         int rc;
 4655         struct tid_info *t = &sc->tids;
 4656 
 4657         rc = sysctl_wire_old_buffer(req, 0);
 4658         if (rc != 0)
 4659                 return (rc);
 4660 
 4661         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4662         if (sb == NULL)
 4663                 return (ENOMEM);
 4664 
 4665         if (t->natids) {
 4666                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
 4667                     t->atids_in_use);
 4668         }
 4669 
 4670         if (t->ntids) {
 4671                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
 4672                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
 4673 
 4674                         if (b) {
 4675                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
 4676                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
 4677                                     t->ntids - 1);
 4678                         } else {
 4679                                 sbuf_printf(sb, "TID range: %u-%u",
 4680                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
 4681                                     t->ntids - 1);
 4682                         }
 4683                 } else
 4684                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
 4685                 sbuf_printf(sb, ", in use: %u\n",
 4686                     atomic_load_acq_int(&t->tids_in_use));
 4687         }
 4688 
 4689         if (t->nstids) {
 4690                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
 4691                     t->stid_base + t->nstids - 1, t->stids_in_use);
 4692         }
 4693 
 4694         if (t->nftids) {
 4695                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
 4696                     t->ftid_base + t->nftids - 1);
 4697         }
 4698 
 4699         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
 4700             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
 4701             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
 4702 
 4703         rc = sbuf_finish(sb);
 4704         sbuf_delete(sb);
 4705 
 4706         return (rc);
 4707 }
 4708 
 4709 static int
 4710 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
 4711 {
 4712         struct adapter *sc = arg1;
 4713         struct sbuf *sb;
 4714         int rc;
 4715         struct tp_err_stats stats;
 4716 
 4717         rc = sysctl_wire_old_buffer(req, 0);
 4718         if (rc != 0)
 4719                 return (rc);
 4720 
 4721         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4722         if (sb == NULL)
 4723                 return (ENOMEM);
 4724 
 4725         t4_tp_get_err_stats(sc, &stats);
 4726 
 4727         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
 4728                       "channel 3\n");
 4729         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
 4730             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
 4731             stats.macInErrs[3]);
 4732         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
 4733             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
 4734             stats.hdrInErrs[3]);
 4735         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
 4736             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
 4737             stats.tcpInErrs[3]);
 4738         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
 4739             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
 4740             stats.tcp6InErrs[3]);
 4741         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
 4742             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
 4743             stats.tnlCongDrops[3]);
 4744         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
 4745             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
 4746             stats.tnlTxDrops[3]);
 4747         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
 4748             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
 4749             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
 4750         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
 4751             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
 4752             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
 4753         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
 4754             stats.ofldNoNeigh, stats.ofldCongDefer);
 4755 
 4756         rc = sbuf_finish(sb);
 4757         sbuf_delete(sb);
 4758 
 4759         return (rc);
 4760 }
 4761 
 4762 static int
 4763 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
 4764 {
 4765         struct adapter *sc = arg1;
 4766         struct sbuf *sb;
 4767         int rc;
 4768         u64 nrate[NCHAN], orate[NCHAN];
 4769 
 4770         rc = sysctl_wire_old_buffer(req, 0);
 4771         if (rc != 0)
 4772                 return (rc);
 4773 
 4774         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
 4775         if (sb == NULL)
 4776                 return (ENOMEM);
 4777 
 4778         t4_get_chan_txrate(sc, nrate, orate);
 4779         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
 4780                  "channel 3\n");
 4781         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
 4782             nrate[0], nrate[1], nrate[2], nrate[3]);
 4783         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
 4784             orate[0], orate[1], orate[2], orate[3]);
 4785 
 4786         rc = sbuf_finish(sb);
 4787         sbuf_delete(sb);
 4788 
 4789         return (rc);
 4790 }
 4791 #endif
 4792 
 4793 static inline void
 4794 txq_start(struct ifnet *ifp, struct sge_txq *txq)
 4795 {
 4796         struct buf_ring *br;
 4797         struct mbuf *m;
 4798 
 4799         TXQ_LOCK_ASSERT_OWNED(txq);
 4800 
 4801         br = txq->br;
 4802         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
 4803         if (m)
 4804                 t4_eth_tx(ifp, txq, m);
 4805 }
 4806 
 4807 void
 4808 t4_tx_callout(void *arg)
 4809 {
 4810         struct sge_eq *eq = arg;
 4811         struct adapter *sc;
 4812 
 4813         if (EQ_TRYLOCK(eq) == 0)
 4814                 goto reschedule;
 4815 
 4816         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
 4817                 EQ_UNLOCK(eq);
 4818 reschedule:
 4819                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
 4820                         callout_schedule(&eq->tx_callout, 1);
 4821                 return;
 4822         }
 4823 
 4824         EQ_LOCK_ASSERT_OWNED(eq);
 4825 
 4826         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
 4827 
 4828                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
 4829                         struct sge_txq *txq = arg;
 4830                         struct port_info *pi = txq->ifp->if_softc;
 4831 
 4832                         sc = pi->adapter;
 4833                 } else {
 4834                         struct sge_wrq *wrq = arg;
 4835 
 4836                         sc = wrq->adapter;
 4837                 }
 4838 
 4839                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
 4840         }
 4841 
 4842         EQ_UNLOCK(eq);
 4843 }
 4844 
 4845 void
 4846 t4_tx_task(void *arg, int count)
 4847 {
 4848         struct sge_eq *eq = arg;
 4849 
 4850         EQ_LOCK(eq);
 4851         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
 4852                 struct sge_txq *txq = arg;
 4853                 txq_start(txq->ifp, txq);
 4854         } else {
 4855                 struct sge_wrq *wrq = arg;
 4856                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
 4857         }
 4858         EQ_UNLOCK(eq);
 4859 }
 4860 
 4861 static uint32_t
 4862 fconf_to_mode(uint32_t fconf)
 4863 {
 4864         uint32_t mode;
 4865 
 4866         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
 4867             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
 4868 
 4869         if (fconf & F_FRAGMENTATION)
 4870                 mode |= T4_FILTER_IP_FRAGMENT;
 4871 
 4872         if (fconf & F_MPSHITTYPE)
 4873                 mode |= T4_FILTER_MPS_HIT_TYPE;
 4874 
 4875         if (fconf & F_MACMATCH)
 4876                 mode |= T4_FILTER_MAC_IDX;
 4877 
 4878         if (fconf & F_ETHERTYPE)
 4879                 mode |= T4_FILTER_ETH_TYPE;
 4880 
 4881         if (fconf & F_PROTOCOL)
 4882                 mode |= T4_FILTER_IP_PROTO;
 4883 
 4884         if (fconf & F_TOS)
 4885                 mode |= T4_FILTER_IP_TOS;
 4886 
 4887         if (fconf & F_VLAN)
 4888                 mode |= T4_FILTER_VLAN;
 4889 
 4890         if (fconf & F_VNIC_ID)
 4891                 mode |= T4_FILTER_VNIC;
 4892 
 4893         if (fconf & F_PORT)
 4894                 mode |= T4_FILTER_PORT;
 4895 
 4896         if (fconf & F_FCOE)
 4897                 mode |= T4_FILTER_FCoE;
 4898 
 4899         return (mode);
 4900 }
 4901 
 4902 static uint32_t
 4903 mode_to_fconf(uint32_t mode)
 4904 {
 4905         uint32_t fconf = 0;
 4906 
 4907         if (mode & T4_FILTER_IP_FRAGMENT)
 4908                 fconf |= F_FRAGMENTATION;
 4909 
 4910         if (mode & T4_FILTER_MPS_HIT_TYPE)
 4911                 fconf |= F_MPSHITTYPE;
 4912 
 4913         if (mode & T4_FILTER_MAC_IDX)
 4914                 fconf |= F_MACMATCH;
 4915 
 4916         if (mode & T4_FILTER_ETH_TYPE)
 4917                 fconf |= F_ETHERTYPE;
 4918 
 4919         if (mode & T4_FILTER_IP_PROTO)
 4920                 fconf |= F_PROTOCOL;
 4921 
 4922         if (mode & T4_FILTER_IP_TOS)
 4923                 fconf |= F_TOS;
 4924 
 4925         if (mode & T4_FILTER_VLAN)
 4926                 fconf |= F_VLAN;
 4927 
 4928         if (mode & T4_FILTER_VNIC)
 4929                 fconf |= F_VNIC_ID;
 4930 
 4931         if (mode & T4_FILTER_PORT)
 4932                 fconf |= F_PORT;
 4933 
 4934         if (mode & T4_FILTER_FCoE)
 4935                 fconf |= F_FCOE;
 4936 
 4937         return (fconf);
 4938 }
 4939 
 4940 static uint32_t
 4941 fspec_to_fconf(struct t4_filter_specification *fs)
 4942 {
 4943         uint32_t fconf = 0;
 4944 
 4945         if (fs->val.frag || fs->mask.frag)
 4946                 fconf |= F_FRAGMENTATION;
 4947 
 4948         if (fs->val.matchtype || fs->mask.matchtype)
 4949                 fconf |= F_MPSHITTYPE;
 4950 
 4951         if (fs->val.macidx || fs->mask.macidx)
 4952                 fconf |= F_MACMATCH;
 4953 
 4954         if (fs->val.ethtype || fs->mask.ethtype)
 4955                 fconf |= F_ETHERTYPE;
 4956 
 4957         if (fs->val.proto || fs->mask.proto)
 4958                 fconf |= F_PROTOCOL;
 4959 
 4960         if (fs->val.tos || fs->mask.tos)
 4961                 fconf |= F_TOS;
 4962 
 4963         if (fs->val.vlan_vld || fs->mask.vlan_vld)
 4964                 fconf |= F_VLAN;
 4965 
 4966         if (fs->val.vnic_vld || fs->mask.vnic_vld)
 4967                 fconf |= F_VNIC_ID;
 4968 
 4969         if (fs->val.iport || fs->mask.iport)
 4970                 fconf |= F_PORT;
 4971 
 4972         if (fs->val.fcoe || fs->mask.fcoe)
 4973                 fconf |= F_FCOE;
 4974 
 4975         return (fconf);
 4976 }
 4977 
 4978 static int
 4979 get_filter_mode(struct adapter *sc, uint32_t *mode)
 4980 {
 4981         int rc;
 4982         uint32_t fconf;
 4983 
 4984         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
 4985             "t4getfm");
 4986         if (rc)
 4987                 return (rc);
 4988 
 4989         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
 4990             A_TP_VLAN_PRI_MAP);
 4991 
 4992         if (sc->filter_mode != fconf) {
 4993                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
 4994                     device_get_nameunit(sc->dev), sc->filter_mode, fconf);
 4995                 sc->filter_mode = fconf;
 4996         }
 4997 
 4998         *mode = fconf_to_mode(sc->filter_mode);
 4999 
 5000         end_synchronized_op(sc, LOCK_HELD);
 5001         return (0);
 5002 }
 5003 
 5004 static int
 5005 set_filter_mode(struct adapter *sc, uint32_t mode)
 5006 {
 5007         uint32_t fconf;
 5008         int rc;
 5009 
 5010         fconf = mode_to_fconf(mode);
 5011 
 5012         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
 5013             "t4setfm");
 5014         if (rc)
 5015                 return (rc);
 5016 
 5017         if (sc->tids.ftids_in_use > 0) {
 5018                 rc = EBUSY;
 5019                 goto done;
 5020         }
 5021 
 5022 #ifdef TCP_OFFLOAD
 5023         if (sc->offload_map) {
 5024                 rc = EBUSY;
 5025                 goto done;
 5026         }
 5027 #endif
 5028 
 5029 #ifdef notyet
 5030         rc = -t4_set_filter_mode(sc, fconf);
 5031         if (rc == 0)
 5032                 sc->filter_mode = fconf;
 5033 #else
 5034         rc = ENOTSUP;
 5035 #endif
 5036 
 5037 done:
 5038         end_synchronized_op(sc, LOCK_HELD);
 5039         return (rc);
 5040 }
 5041 
 5042 static inline uint64_t
 5043 get_filter_hits(struct adapter *sc, uint32_t fid)
 5044 {
 5045         uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
 5046         uint64_t hits;
 5047 
 5048         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
 5049             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
 5050         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
 5051         hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
 5052 
 5053         return (be64toh(hits));
 5054 }
 5055 
 5056 static int
 5057 get_filter(struct adapter *sc, struct t4_filter *t)
 5058 {
 5059         int i, rc, nfilters = sc->tids.nftids;
 5060         struct filter_entry *f;
 5061 
 5062         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
 5063             "t4getf");
 5064         if (rc)
 5065                 return (rc);
 5066 
 5067         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
 5068             t->idx >= nfilters) {
 5069                 t->idx = 0xffffffff;
 5070                 goto done;
 5071         }
 5072 
 5073         f = &sc->tids.ftid_tab[t->idx];
 5074         for (i = t->idx; i < nfilters; i++, f++) {
 5075                 if (f->valid) {
 5076                         t->idx = i;
 5077                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
 5078                         t->smtidx = f->smtidx;
 5079                         if (f->fs.hitcnts)
 5080                                 t->hits = get_filter_hits(sc, t->idx);
 5081                         else
 5082                                 t->hits = UINT64_MAX;
 5083                         t->fs = f->fs;
 5084 
 5085                         goto done;
 5086                 }
 5087         }
 5088 
 5089         t->idx = 0xffffffff;
 5090 done:
 5091         end_synchronized_op(sc, LOCK_HELD);
 5092         return (0);
 5093 }
 5094 
 5095 static int
 5096 set_filter(struct adapter *sc, struct t4_filter *t)
 5097 {
 5098         unsigned int nfilters, nports;
 5099         struct filter_entry *f;
 5100         int i, rc;
 5101 
 5102         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
 5103         if (rc)
 5104                 return (rc);
 5105 
 5106         nfilters = sc->tids.nftids;
 5107         nports = sc->params.nports;
 5108 
 5109         if (nfilters == 0) {
 5110                 rc = ENOTSUP;
 5111                 goto done;
 5112         }
 5113 
 5114         if (!(sc->flags & FULL_INIT_DONE)) {
 5115                 rc = EAGAIN;
 5116                 goto done;
 5117         }
 5118 
 5119         if (t->idx >= nfilters) {
 5120                 rc = EINVAL;
 5121                 goto done;
 5122         }
 5123 
 5124         /* Validate against the global filter mode */
 5125         if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) {
 5126                 rc = E2BIG;
 5127                 goto done;
 5128         }
 5129 
 5130         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
 5131                 rc = EINVAL;
 5132                 goto done;
 5133         }
 5134 
 5135         if (t->fs.val.iport >= nports) {
 5136                 rc = EINVAL;
 5137                 goto done;
 5138         }
 5139 
 5140         /* Can't specify an iq if not steering to it */
 5141         if (!t->fs.dirsteer && t->fs.iq) {
 5142                 rc = EINVAL;
 5143                 goto done;
 5144         }
 5145 
 5146         /* IPv6 filter idx must be 4 aligned */
 5147         if (t->fs.type == 1 &&
 5148             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
 5149                 rc = EINVAL;
 5150                 goto done;
 5151         }
 5152 
 5153         if (sc->tids.ftid_tab == NULL) {
 5154                 KASSERT(sc->tids.ftids_in_use == 0,
 5155                     ("%s: no memory allocated but filters_in_use > 0",
 5156                     __func__));
 5157 
 5158                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
 5159                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
 5160                 if (sc->tids.ftid_tab == NULL) {
 5161                         rc = ENOMEM;
 5162                         goto done;
 5163                 }
 5164                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
 5165         }
 5166 
 5167         for (i = 0; i < 4; i++) {
 5168                 f = &sc->tids.ftid_tab[t->idx + i];
 5169 
 5170                 if (f->pending || f->valid) {
 5171                         rc = EBUSY;
 5172                         goto done;
 5173                 }
 5174                 if (f->locked) {
 5175                         rc = EPERM;
 5176                         goto done;
 5177                 }
 5178 
 5179                 if (t->fs.type == 0)
 5180                         break;
 5181         }
 5182 
 5183         f = &sc->tids.ftid_tab[t->idx];
 5184         f->fs = t->fs;
 5185 
 5186         rc = set_filter_wr(sc, t->idx);
 5187 done:
 5188         end_synchronized_op(sc, 0);
 5189 
 5190         if (rc == 0) {
 5191                 mtx_lock(&sc->tids.ftid_lock);
 5192                 for (;;) {
 5193                         if (f->pending == 0) {
 5194                                 rc = f->valid ? 0 : EIO;
 5195                                 break;
 5196                         }
 5197 
 5198                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
 5199                             PCATCH, "t4setfw", 0)) {
 5200                                 rc = EINPROGRESS;
 5201                                 break;
 5202                         }
 5203                 }
 5204                 mtx_unlock(&sc->tids.ftid_lock);
 5205         }
 5206         return (rc);
 5207 }
 5208 
 5209 static int
 5210 del_filter(struct adapter *sc, struct t4_filter *t)
 5211 {
 5212         unsigned int nfilters;
 5213         struct filter_entry *f;
 5214         int rc;
 5215 
 5216         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
 5217         if (rc)
 5218                 return (rc);
 5219 
 5220         nfilters = sc->tids.nftids;
 5221 
 5222         if (nfilters == 0) {
 5223                 rc = ENOTSUP;
 5224                 goto done;
 5225         }
 5226 
 5227         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
 5228             t->idx >= nfilters) {
 5229                 rc = EINVAL;
 5230                 goto done;
 5231         }
 5232 
 5233         if (!(sc->flags & FULL_INIT_DONE)) {
 5234                 rc = EAGAIN;
 5235                 goto done;
 5236         }
 5237 
 5238         f = &sc->tids.ftid_tab[t->idx];
 5239 
 5240         if (f->pending) {
 5241                 rc = EBUSY;
 5242                 goto done;
 5243         }
 5244         if (f->locked) {
 5245                 rc = EPERM;
 5246                 goto done;
 5247         }
 5248 
 5249         if (f->valid) {
 5250                 t->fs = f->fs;  /* extra info for the caller */
 5251                 rc = del_filter_wr(sc, t->idx);
 5252         }
 5253 
 5254 done:
 5255         end_synchronized_op(sc, 0);
 5256 
 5257         if (rc == 0) {
 5258                 mtx_lock(&sc->tids.ftid_lock);
 5259                 for (;;) {
 5260                         if (f->pending == 0) {
 5261                                 rc = f->valid ? EIO : 0;
 5262                                 break;
 5263                         }
 5264 
 5265                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
 5266                             PCATCH, "t4delfw", 0)) {
 5267                                 rc = EINPROGRESS;
 5268                                 break;
 5269                         }
 5270                 }
 5271                 mtx_unlock(&sc->tids.ftid_lock);
 5272         }
 5273 
 5274         return (rc);
 5275 }
 5276 
 5277 static void
 5278 clear_filter(struct filter_entry *f)
 5279 {
 5280         if (f->l2t)
 5281                 t4_l2t_release(f->l2t);
 5282 
 5283         bzero(f, sizeof (*f));
 5284 }
 5285 
 5286 static int
 5287 set_filter_wr(struct adapter *sc, int fidx)
 5288 {
 5289         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
 5290         struct wrqe *wr;
 5291         struct fw_filter_wr *fwr;
 5292         unsigned int ftid;
 5293 
 5294         ASSERT_SYNCHRONIZED_OP(sc);
 5295 
 5296         if (f->fs.newdmac || f->fs.newvlan) {
 5297                 /* This filter needs an L2T entry; allocate one. */
 5298                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
 5299                 if (f->l2t == NULL)
 5300                         return (EAGAIN);
 5301                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
 5302                     f->fs.dmac)) {
 5303                         t4_l2t_release(f->l2t);
 5304                         f->l2t = NULL;
 5305                         return (ENOMEM);
 5306                 }
 5307         }
 5308 
 5309         ftid = sc->tids.ftid_base + fidx;
 5310 
 5311         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
 5312         if (wr == NULL)
 5313                 return (ENOMEM);
 5314 
 5315         fwr = wrtod(wr);
 5316         bzero(fwr, sizeof (*fwr));
 5317 
 5318         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
 5319         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
 5320         fwr->tid_to_iq =
 5321             htobe32(V_FW_FILTER_WR_TID(ftid) |
 5322                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
 5323                 V_FW_FILTER_WR_NOREPLY(0) |
 5324                 V_FW_FILTER_WR_IQ(f->fs.iq));
 5325         fwr->del_filter_to_l2tix =
 5326             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
 5327                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
 5328                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
 5329                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
 5330                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
 5331                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
 5332                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
 5333                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
 5334                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
 5335                     f->fs.newvlan == VLAN_REWRITE) |
 5336                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
 5337                     f->fs.newvlan == VLAN_REWRITE) |
 5338                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
 5339                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
 5340                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
 5341                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
 5342         fwr->ethtype = htobe16(f->fs.val.ethtype);
 5343         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
 5344         fwr->frag_to_ovlan_vldm =
 5345             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
 5346                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
 5347                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
 5348                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
 5349                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
 5350                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
 5351         fwr->smac_sel = 0;
 5352         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
 5353             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
 5354         fwr->maci_to_matchtypem =
 5355             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
 5356                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
 5357                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
 5358                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
 5359                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
 5360                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
 5361                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
 5362                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
 5363         fwr->ptcl = f->fs.val.proto;
 5364         fwr->ptclm = f->fs.mask.proto;
 5365         fwr->ttyp = f->fs.val.tos;
 5366         fwr->ttypm = f->fs.mask.tos;
 5367         fwr->ivlan = htobe16(f->fs.val.vlan);
 5368         fwr->ivlanm = htobe16(f->fs.mask.vlan);
 5369         fwr->ovlan = htobe16(f->fs.val.vnic);
 5370         fwr->ovlanm = htobe16(f->fs.mask.vnic);
 5371         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
 5372         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
 5373         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
 5374         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
 5375         fwr->lp = htobe16(f->fs.val.dport);
 5376         fwr->lpm = htobe16(f->fs.mask.dport);
 5377         fwr->fp = htobe16(f->fs.val.sport);
 5378         fwr->fpm = htobe16(f->fs.mask.sport);
 5379         if (f->fs.newsmac)
 5380                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
 5381 
 5382         f->pending = 1;
 5383         sc->tids.ftids_in_use++;
 5384 
 5385         t4_wrq_tx(sc, wr);
 5386         return (0);
 5387 }
 5388 
 5389 static int
 5390 del_filter_wr(struct adapter *sc, int fidx)
 5391 {
 5392         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
 5393         struct wrqe *wr;
 5394         struct fw_filter_wr *fwr;
 5395         unsigned int ftid;
 5396 
 5397         ftid = sc->tids.ftid_base + fidx;
 5398 
 5399         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
 5400         if (wr == NULL)
 5401                 return (ENOMEM);
 5402         fwr = wrtod(wr);
 5403         bzero(fwr, sizeof (*fwr));
 5404 
 5405         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
 5406 
 5407         f->pending = 1;
 5408         t4_wrq_tx(sc, wr);
 5409         return (0);
 5410 }
 5411 
 5412 int
 5413 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 5414 {
 5415         struct adapter *sc = iq->adapter;
 5416         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
 5417         unsigned int idx = GET_TID(rpl);
 5418 
 5419         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
 5420             rss->opcode));
 5421 
 5422         if (idx >= sc->tids.ftid_base &&
 5423             (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
 5424                 unsigned int rc = G_COOKIE(rpl->cookie);
 5425                 struct filter_entry *f = &sc->tids.ftid_tab[idx];
 5426 
 5427                 mtx_lock(&sc->tids.ftid_lock);
 5428                 if (rc == FW_FILTER_WR_FLT_ADDED) {
 5429                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
 5430                             __func__, idx));
 5431                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
 5432                         f->pending = 0;  /* asynchronous setup completed */
 5433                         f->valid = 1;
 5434                 } else {
 5435                         if (rc != FW_FILTER_WR_FLT_DELETED) {
 5436                                 /* Add or delete failed, display an error */
 5437                                 log(LOG_ERR,
 5438                                     "filter %u setup failed with error %u\n",
 5439                                     idx, rc);
 5440                         }
 5441 
 5442                         clear_filter(f);
 5443                         sc->tids.ftids_in_use--;
 5444                 }
 5445                 wakeup(&sc->tids.ftid_tab);
 5446                 mtx_unlock(&sc->tids.ftid_lock);
 5447         }
 5448 
 5449         return (0);
 5450 }
 5451 
 5452 static int
 5453 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
 5454 {
 5455         int rc;
 5456 
 5457         if (cntxt->cid > M_CTXTQID)
 5458                 return (EINVAL);
 5459 
 5460         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
 5461             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
 5462                 return (EINVAL);
 5463 
 5464         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
 5465         if (rc)
 5466                 return (rc);
 5467 
 5468         if (sc->flags & FW_OK) {
 5469                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
 5470                     &cntxt->data[0]);
 5471                 if (rc == 0)
 5472                         goto done;
 5473         }
 5474 
 5475         /*
 5476          * Read via firmware failed or wasn't even attempted.  Read directly via
 5477          * the backdoor.
 5478          */
 5479         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
 5480 done:
 5481         end_synchronized_op(sc, 0);
 5482         return (rc);
 5483 }
 5484 
 5485 static int
 5486 load_fw(struct adapter *sc, struct t4_data *fw)
 5487 {
 5488         int rc;
 5489         uint8_t *fw_data;
 5490 
 5491         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
 5492         if (rc)
 5493                 return (rc);
 5494 
 5495         if (sc->flags & FULL_INIT_DONE) {
 5496                 rc = EBUSY;
 5497                 goto done;
 5498         }
 5499 
 5500         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
 5501         if (fw_data == NULL) {
 5502                 rc = ENOMEM;
 5503                 goto done;
 5504         }
 5505 
 5506         rc = copyin(fw->data, fw_data, fw->len);
 5507         if (rc == 0)
 5508                 rc = -t4_load_fw(sc, fw_data, fw->len);
 5509 
 5510         free(fw_data, M_CXGBE);
 5511 done:
 5512         end_synchronized_op(sc, 0);
 5513         return (rc);
 5514 }
 5515 
 5516 static int
 5517 read_card_mem(struct adapter *sc, struct t4_mem_range *mr)
 5518 {
 5519         uint32_t base, size, lo, hi, win, off, remaining, i, n;
 5520         uint32_t *buf, *b;
 5521         int rc;
 5522 
 5523         /* reads are in multiples of 32 bits */
 5524         if (mr->addr & 3 || mr->len & 3 || mr->len == 0)
 5525                 return (EINVAL);
 5526 
 5527         /*
 5528          * We don't want to deal with potential holes so we mandate that the
 5529          * requested region must lie entirely within one of the 3 memories.
 5530          */
 5531         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
 5532         if (lo & F_EDRAM0_ENABLE) {
 5533                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
 5534                 base = G_EDRAM0_BASE(hi) << 20;
 5535                 size = G_EDRAM0_SIZE(hi) << 20;
 5536                 if (size > 0 &&
 5537                     mr->addr >= base && mr->addr < base + size &&
 5538                     mr->addr + mr->len <= base + size)
 5539                         goto proceed;
 5540         }
 5541         if (lo & F_EDRAM1_ENABLE) {
 5542                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
 5543                 base = G_EDRAM1_BASE(hi) << 20;
 5544                 size = G_EDRAM1_SIZE(hi) << 20;
 5545                 if (size > 0 &&
 5546                     mr->addr >= base && mr->addr < base + size &&
 5547                     mr->addr + mr->len <= base + size)
 5548                         goto proceed;
 5549         }
 5550         if (lo & F_EXT_MEM_ENABLE) {
 5551                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
 5552                 base = G_EXT_MEM_BASE(hi) << 20;
 5553                 size = G_EXT_MEM_SIZE(hi) << 20;
 5554                 if (size > 0 &&
 5555                     mr->addr >= base && mr->addr < base + size &&
 5556                     mr->addr + mr->len <= base + size)
 5557                         goto proceed;
 5558         }
 5559         return (ENXIO);
 5560 
 5561 proceed:
 5562         buf = b = malloc(mr->len, M_CXGBE, M_WAITOK);
 5563 
 5564         /*
 5565          * Position the PCIe window (we use memwin2) to the 16B aligned area
 5566          * just at/before the requested region.
 5567          */
 5568         win = mr->addr & ~0xf;
 5569         off = mr->addr - win;  /* offset of the requested region in the win */
 5570         remaining = mr->len;
 5571 
 5572         while (remaining) {
 5573                 t4_write_reg(sc,
 5574                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
 5575                 t4_read_reg(sc,
 5576                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
 5577 
 5578                 /* number of bytes that we'll copy in the inner loop */
 5579                 n = min(remaining, MEMWIN2_APERTURE - off);
 5580 
 5581                 for (i = 0; i < n; i += 4, remaining -= 4)
 5582                         *b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
 5583 
 5584                 win += MEMWIN2_APERTURE;
 5585                 off = 0;
 5586         }
 5587 
 5588         rc = copyout(buf, mr->data, mr->len);
 5589         free(buf, M_CXGBE);
 5590 
 5591         return (rc);
 5592 }
 5593 
 5594 static int
 5595 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
 5596 {
 5597         int rc;
 5598 
 5599         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
 5600                 return (EINVAL);
 5601 
 5602         if (i2cd->len > 1) {
 5603                 /* XXX: need fw support for longer reads in one go */
 5604                 return (ENOTSUP);
 5605         }
 5606 
 5607         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
 5608         if (rc)
 5609                 return (rc);
 5610         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
 5611             i2cd->offset, &i2cd->data[0]);
 5612         end_synchronized_op(sc, 0);
 5613 
 5614         return (rc);
 5615 }
 5616 
 5617 int
 5618 t4_os_find_pci_capability(struct adapter *sc, int cap)
 5619 {
 5620         int i;
 5621 
 5622         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
 5623 }
 5624 
 5625 int
 5626 t4_os_pci_save_state(struct adapter *sc)
 5627 {
 5628         device_t dev;
 5629         struct pci_devinfo *dinfo;
 5630 
 5631         dev = sc->dev;
 5632         dinfo = device_get_ivars(dev);
 5633 
 5634         pci_cfg_save(dev, dinfo, 0);
 5635         return (0);
 5636 }
 5637 
 5638 int
 5639 t4_os_pci_restore_state(struct adapter *sc)
 5640 {
 5641         device_t dev;
 5642         struct pci_devinfo *dinfo;
 5643 
 5644         dev = sc->dev;
 5645         dinfo = device_get_ivars(dev);
 5646 
 5647         pci_cfg_restore(dev, dinfo);
 5648         return (0);
 5649 }
 5650 
 5651 void
 5652 t4_os_portmod_changed(const struct adapter *sc, int idx)
 5653 {
 5654         struct port_info *pi = sc->port[idx];
 5655         static const char *mod_str[] = {
 5656                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
 5657         };
 5658 
 5659         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
 5660                 if_printf(pi->ifp, "transceiver unplugged.\n");
 5661         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
 5662                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
 5663         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
 5664                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
 5665         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
 5666                 if_printf(pi->ifp, "%s transceiver inserted.\n",
 5667                     mod_str[pi->mod_type]);
 5668         } else {
 5669                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
 5670                     pi->mod_type);
 5671         }
 5672 }
 5673 
 5674 void
 5675 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
 5676 {
 5677         struct port_info *pi = sc->port[idx];
 5678         struct ifnet *ifp = pi->ifp;
 5679 
 5680         if (link_stat) {
 5681                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
 5682                 if_link_state_change(ifp, LINK_STATE_UP);
 5683         } else
 5684                 if_link_state_change(ifp, LINK_STATE_DOWN);
 5685 }
 5686 
 5687 void
 5688 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
 5689 {
 5690         struct adapter *sc;
 5691 
 5692         mtx_lock(&t4_list_lock);
 5693         SLIST_FOREACH(sc, &t4_list, link) {
 5694                 /*
 5695                  * func should not make any assumptions about what state sc is
 5696                  * in - the only guarantee is that sc->sc_lock is a valid lock.
 5697                  */
 5698                 func(sc, arg);
 5699         }
 5700         mtx_unlock(&t4_list_lock);
 5701 }
 5702 
 5703 static int
 5704 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
 5705 {
 5706        return (0);
 5707 }
 5708 
 5709 static int
 5710 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
 5711 {
 5712        return (0);
 5713 }
 5714 
 5715 static int
 5716 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
 5717     struct thread *td)
 5718 {
 5719         int rc;
 5720         struct adapter *sc = dev->si_drv1;
 5721 
 5722         rc = priv_check(td, PRIV_DRIVER);
 5723         if (rc != 0)
 5724                 return (rc);
 5725 
 5726         switch (cmd) {
 5727         case CHELSIO_T4_GETREG: {
 5728                 struct t4_reg *edata = (struct t4_reg *)data;
 5729 
 5730                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 5731                         return (EFAULT);
 5732 
 5733                 if (edata->size == 4)
 5734                         edata->val = t4_read_reg(sc, edata->addr);
 5735                 else if (edata->size == 8)
 5736                         edata->val = t4_read_reg64(sc, edata->addr);
 5737                 else
 5738                         return (EINVAL);
 5739 
 5740                 break;
 5741         }
 5742         case CHELSIO_T4_SETREG: {
 5743                 struct t4_reg *edata = (struct t4_reg *)data;
 5744 
 5745                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 5746                         return (EFAULT);
 5747 
 5748                 if (edata->size == 4) {
 5749                         if (edata->val & 0xffffffff00000000)
 5750                                 return (EINVAL);
 5751                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
 5752                 } else if (edata->size == 8)
 5753                         t4_write_reg64(sc, edata->addr, edata->val);
 5754                 else
 5755                         return (EINVAL);
 5756                 break;
 5757         }
 5758         case CHELSIO_T4_REGDUMP: {
 5759                 struct t4_regdump *regs = (struct t4_regdump *)data;
 5760                 int reglen = T4_REGDUMP_SIZE;
 5761                 uint8_t *buf;
 5762 
 5763                 if (regs->len < reglen) {
 5764                         regs->len = reglen; /* hint to the caller */
 5765                         return (ENOBUFS);
 5766                 }
 5767 
 5768                 regs->len = reglen;
 5769                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
 5770                 t4_get_regs(sc, regs, buf);
 5771                 rc = copyout(buf, regs->data, reglen);
 5772                 free(buf, M_CXGBE);
 5773                 break;
 5774         }
 5775         case CHELSIO_T4_GET_FILTER_MODE:
 5776                 rc = get_filter_mode(sc, (uint32_t *)data);
 5777                 break;
 5778         case CHELSIO_T4_SET_FILTER_MODE:
 5779                 rc = set_filter_mode(sc, *(uint32_t *)data);
 5780                 break;
 5781         case CHELSIO_T4_GET_FILTER:
 5782                 rc = get_filter(sc, (struct t4_filter *)data);
 5783                 break;
 5784         case CHELSIO_T4_SET_FILTER:
 5785                 rc = set_filter(sc, (struct t4_filter *)data);
 5786                 break;
 5787         case CHELSIO_T4_DEL_FILTER:
 5788                 rc = del_filter(sc, (struct t4_filter *)data);
 5789                 break;
 5790         case CHELSIO_T4_GET_SGE_CONTEXT:
 5791                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
 5792                 break;
 5793         case CHELSIO_T4_LOAD_FW:
 5794                 rc = load_fw(sc, (struct t4_data *)data);
 5795                 break;
 5796         case CHELSIO_T4_GET_MEM:
 5797                 rc = read_card_mem(sc, (struct t4_mem_range *)data);
 5798                 break;
 5799         case CHELSIO_T4_GET_I2C:
 5800                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
 5801                 break;
 5802         case CHELSIO_T4_CLEAR_STATS: {
 5803                 int i;
 5804                 u_int port_id = *(uint32_t *)data;
 5805                 struct port_info *pi;
 5806 
 5807                 if (port_id >= sc->params.nports)
 5808                         return (EINVAL);
 5809 
 5810                 /* MAC stats */
 5811                 t4_clr_port_stats(sc, port_id);
 5812 
 5813                 pi = sc->port[port_id];
 5814                 if (pi->flags & PORT_INIT_DONE) {
 5815                         struct sge_rxq *rxq;
 5816                         struct sge_txq *txq;
 5817                         struct sge_wrq *wrq;
 5818 
 5819                         for_each_rxq(pi, i, rxq) {
 5820 #if defined(INET) || defined(INET6)
 5821                                 rxq->lro.lro_queued = 0;
 5822                                 rxq->lro.lro_flushed = 0;
 5823 #endif
 5824                                 rxq->rxcsum = 0;
 5825                                 rxq->vlan_extraction = 0;
 5826                         }
 5827 
 5828                         for_each_txq(pi, i, txq) {
 5829                                 txq->txcsum = 0;
 5830                                 txq->tso_wrs = 0;
 5831                                 txq->vlan_insertion = 0;
 5832                                 txq->imm_wrs = 0;
 5833                                 txq->sgl_wrs = 0;
 5834                                 txq->txpkt_wrs = 0;
 5835                                 txq->txpkts_wrs = 0;
 5836                                 txq->txpkts_pkts = 0;
 5837                                 txq->br->br_drops = 0;
 5838                                 txq->no_dmamap = 0;
 5839                                 txq->no_desc = 0;
 5840                         }
 5841 
 5842 #ifdef TCP_OFFLOAD
 5843                         /* nothing to clear for each ofld_rxq */
 5844 
 5845                         for_each_ofld_txq(pi, i, wrq) {
 5846                                 wrq->tx_wrs = 0;
 5847                                 wrq->no_desc = 0;
 5848                         }
 5849 #endif
 5850                         wrq = &sc->sge.ctrlq[pi->port_id];
 5851                         wrq->tx_wrs = 0;
 5852                         wrq->no_desc = 0;
 5853                 }
 5854                 break;
 5855         }
 5856         default:
 5857                 rc = EINVAL;
 5858         }
 5859 
 5860         return (rc);
 5861 }
 5862 
 5863 #ifdef TCP_OFFLOAD
 5864 static int
 5865 toe_capability(struct port_info *pi, int enable)
 5866 {
 5867         int rc;
 5868         struct adapter *sc = pi->adapter;
 5869 
 5870         ASSERT_SYNCHRONIZED_OP(sc);
 5871 
 5872         if (!is_offload(sc))
 5873                 return (ENODEV);
 5874 
 5875         if (enable) {
 5876                 if (!(sc->flags & FULL_INIT_DONE)) {
 5877                         rc = cxgbe_init_synchronized(pi);
 5878                         if (rc)
 5879                                 return (rc);
 5880                 }
 5881 
 5882                 if (isset(&sc->offload_map, pi->port_id))
 5883                         return (0);
 5884 
 5885                 if (!(sc->flags & TOM_INIT_DONE)) {
 5886                         rc = t4_activate_uld(sc, ULD_TOM);
 5887                         if (rc == EAGAIN) {
 5888                                 log(LOG_WARNING,
 5889                                     "You must kldload t4_tom.ko before trying "
 5890                                     "to enable TOE on a cxgbe interface.\n");
 5891                         }
 5892                         if (rc != 0)
 5893                                 return (rc);
 5894                         KASSERT(sc->tom_softc != NULL,
 5895                             ("%s: TOM activated but softc NULL", __func__));
 5896                         KASSERT(sc->flags & TOM_INIT_DONE,
 5897                             ("%s: TOM activated but flag not set", __func__));
 5898                 }
 5899 
 5900                 setbit(&sc->offload_map, pi->port_id);
 5901         } else {
 5902                 if (!isset(&sc->offload_map, pi->port_id))
 5903                         return (0);
 5904 
 5905                 KASSERT(sc->flags & TOM_INIT_DONE,
 5906                     ("%s: TOM never initialized?", __func__));
 5907                 clrbit(&sc->offload_map, pi->port_id);
 5908         }
 5909 
 5910         return (0);
 5911 }
 5912 
 5913 /*
 5914  * Add an upper layer driver to the global list.
 5915  */
 5916 int
 5917 t4_register_uld(struct uld_info *ui)
 5918 {
 5919         int rc = 0;
 5920         struct uld_info *u;
 5921 
 5922         mtx_lock(&t4_uld_list_lock);
 5923         SLIST_FOREACH(u, &t4_uld_list, link) {
 5924             if (u->uld_id == ui->uld_id) {
 5925                     rc = EEXIST;
 5926                     goto done;
 5927             }
 5928         }
 5929 
 5930         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
 5931         ui->refcount = 0;
 5932 done:
 5933         mtx_unlock(&t4_uld_list_lock);
 5934         return (rc);
 5935 }
 5936 
 5937 int
 5938 t4_unregister_uld(struct uld_info *ui)
 5939 {
 5940         int rc = EINVAL;
 5941         struct uld_info *u;
 5942 
 5943         mtx_lock(&t4_uld_list_lock);
 5944 
 5945         SLIST_FOREACH(u, &t4_uld_list, link) {
 5946             if (u == ui) {
 5947                     if (ui->refcount > 0) {
 5948                             rc = EBUSY;
 5949                             goto done;
 5950                     }
 5951 
 5952                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
 5953                     rc = 0;
 5954                     goto done;
 5955             }
 5956         }
 5957 done:
 5958         mtx_unlock(&t4_uld_list_lock);
 5959         return (rc);
 5960 }
 5961 
 5962 int
 5963 t4_activate_uld(struct adapter *sc, int id)
 5964 {
 5965         int rc = EAGAIN;
 5966         struct uld_info *ui;
 5967 
 5968         ASSERT_SYNCHRONIZED_OP(sc);
 5969 
 5970         mtx_lock(&t4_uld_list_lock);
 5971 
 5972         SLIST_FOREACH(ui, &t4_uld_list, link) {
 5973                 if (ui->uld_id == id) {
 5974                         rc = ui->activate(sc);
 5975                         if (rc == 0)
 5976                                 ui->refcount++;
 5977                         goto done;
 5978                 }
 5979         }
 5980 done:
 5981         mtx_unlock(&t4_uld_list_lock);
 5982 
 5983         return (rc);
 5984 }
 5985 
 5986 int
 5987 t4_deactivate_uld(struct adapter *sc, int id)
 5988 {
 5989         int rc = EINVAL;
 5990         struct uld_info *ui;
 5991 
 5992         ASSERT_SYNCHRONIZED_OP(sc);
 5993 
 5994         mtx_lock(&t4_uld_list_lock);
 5995 
 5996         SLIST_FOREACH(ui, &t4_uld_list, link) {
 5997                 if (ui->uld_id == id) {
 5998                         rc = ui->deactivate(sc);
 5999                         if (rc == 0)
 6000                                 ui->refcount--;
 6001                         goto done;
 6002                 }
 6003         }
 6004 done:
 6005         mtx_unlock(&t4_uld_list_lock);
 6006 
 6007         return (rc);
 6008 }
 6009 #endif
 6010 
 6011 /*
 6012  * Come up with reasonable defaults for some of the tunables, provided they're
 6013  * not set by the user (in which case we'll use the values as is).
 6014  */
 6015 static void
 6016 tweak_tunables(void)
 6017 {
 6018         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
 6019 
 6020         if (t4_ntxq10g < 1)
 6021                 t4_ntxq10g = min(nc, NTXQ_10G);
 6022 
 6023         if (t4_ntxq1g < 1)
 6024                 t4_ntxq1g = min(nc, NTXQ_1G);
 6025 
 6026         if (t4_nrxq10g < 1)
 6027                 t4_nrxq10g = min(nc, NRXQ_10G);
 6028 
 6029         if (t4_nrxq1g < 1)
 6030                 t4_nrxq1g = min(nc, NRXQ_1G);
 6031 
 6032 #ifdef TCP_OFFLOAD
 6033         if (t4_nofldtxq10g < 1)
 6034                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
 6035 
 6036         if (t4_nofldtxq1g < 1)
 6037                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
 6038 
 6039         if (t4_nofldrxq10g < 1)
 6040                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
 6041 
 6042         if (t4_nofldrxq1g < 1)
 6043                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
 6044 
 6045         if (t4_toecaps_allowed == -1)
 6046                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
 6047 #else
 6048         if (t4_toecaps_allowed == -1)
 6049                 t4_toecaps_allowed = 0;
 6050 #endif
 6051 
 6052         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
 6053                 t4_tmr_idx_10g = TMR_IDX_10G;
 6054 
 6055         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
 6056                 t4_pktc_idx_10g = PKTC_IDX_10G;
 6057 
 6058         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
 6059                 t4_tmr_idx_1g = TMR_IDX_1G;
 6060 
 6061         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
 6062                 t4_pktc_idx_1g = PKTC_IDX_1G;
 6063 
 6064         if (t4_qsize_txq < 128)
 6065                 t4_qsize_txq = 128;
 6066 
 6067         if (t4_qsize_rxq < 128)
 6068                 t4_qsize_rxq = 128;
 6069         while (t4_qsize_rxq & 7)
 6070                 t4_qsize_rxq++;
 6071 
 6072         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
 6073 }
 6074 
 6075 static int
 6076 t4_mod_event(module_t mod, int cmd, void *arg)
 6077 {
 6078         int rc = 0;
 6079 
 6080         switch (cmd) {
 6081         case MOD_LOAD:
 6082                 t4_sge_modload();
 6083                 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
 6084                 SLIST_INIT(&t4_list);
 6085 #ifdef TCP_OFFLOAD
 6086                 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
 6087                 SLIST_INIT(&t4_uld_list);
 6088 #endif
 6089                 tweak_tunables();
 6090                 break;
 6091 
 6092         case MOD_UNLOAD:
 6093 #ifdef TCP_OFFLOAD
 6094                 mtx_lock(&t4_uld_list_lock);
 6095                 if (!SLIST_EMPTY(&t4_uld_list)) {
 6096                         rc = EBUSY;
 6097                         mtx_unlock(&t4_uld_list_lock);
 6098                         break;
 6099                 }
 6100                 mtx_unlock(&t4_uld_list_lock);
 6101                 mtx_destroy(&t4_uld_list_lock);
 6102 #endif
 6103                 mtx_lock(&t4_list_lock);
 6104                 if (!SLIST_EMPTY(&t4_list)) {
 6105                         rc = EBUSY;
 6106                         mtx_unlock(&t4_list_lock);
 6107                         break;
 6108                 }
 6109                 mtx_unlock(&t4_list_lock);
 6110                 mtx_destroy(&t4_list_lock);
 6111                 break;
 6112         }
 6113 
 6114         return (rc);
 6115 }
 6116 
 6117 static devclass_t t4_devclass;
 6118 static devclass_t cxgbe_devclass;
 6119 
 6120 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
 6121 MODULE_VERSION(t4nex, 1);
 6122 
 6123 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
 6124 MODULE_VERSION(cxgbe, 1);

Cache object: 5167392af10c7a2e210f66226896a5e1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.