The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_main.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007-2008, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15 
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/bus.h>
   37 #include <sys/module.h>
   38 #include <sys/pciio.h>
   39 #include <sys/conf.h>
   40 #include <machine/bus.h>
   41 #include <machine/resource.h>
   42 #include <sys/bus_dma.h>
   43 #include <sys/ktr.h>
   44 #include <sys/rman.h>
   45 #include <sys/ioccom.h>
   46 #include <sys/mbuf.h>
   47 #include <sys/linker.h>
   48 #include <sys/firmware.h>
   49 #include <sys/socket.h>
   50 #include <sys/sockio.h>
   51 #include <sys/smp.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/syslog.h>
   54 #include <sys/queue.h>
   55 #include <sys/taskqueue.h>
   56 #include <sys/proc.h>
   57 
   58 #include <net/bpf.h>
   59 #include <net/ethernet.h>
   60 #include <net/if.h>
   61 #include <net/if_arp.h>
   62 #include <net/if_dl.h>
   63 #include <net/if_media.h>
   64 #include <net/if_types.h>
   65 #include <net/if_vlan_var.h>
   66 
   67 #include <netinet/in_systm.h>
   68 #include <netinet/in.h>
   69 #include <netinet/if_ether.h>
   70 #include <netinet/ip.h>
   71 #include <netinet/ip.h>
   72 #include <netinet/tcp.h>
   73 #include <netinet/udp.h>
   74 
   75 #include <dev/pci/pcireg.h>
   76 #include <dev/pci/pcivar.h>
   77 #include <dev/pci/pci_private.h>
   78 
   79 #ifdef CONFIG_DEFINED
   80 #include <cxgb_include.h>
   81 #else
   82 #include <dev/cxgb/cxgb_include.h>
   83 #endif
   84 
   85 #ifdef PRIV_SUPPORTED
   86 #include <sys/priv.h>
   87 #endif
   88 
   89 #ifdef IFNET_MULTIQUEUE
   90 #include <machine/intr_machdep.h>
   91 #endif
   92 
   93 static int cxgb_setup_msix(adapter_t *, int);
   94 static void cxgb_teardown_msix(adapter_t *);
   95 static void cxgb_init(void *);
   96 static void cxgb_init_locked(struct port_info *);
   97 static void cxgb_stop_locked(struct port_info *);
   98 static void cxgb_set_rxmode(struct port_info *);
   99 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
  100 static int cxgb_media_change(struct ifnet *);
  101 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
  102 static int setup_sge_qsets(adapter_t *);
  103 static void cxgb_async_intr(void *);
  104 static void cxgb_ext_intr_handler(void *, int);
  105 static void cxgb_tick_handler(void *, int);
  106 static void cxgb_down_locked(struct adapter *sc);
  107 static void cxgb_tick(void *);
  108 static void setup_rss(adapter_t *sc);
  109 
  110 /* Attachment glue for the PCI controller end of the device.  Each port of
  111  * the device is attached separately, as defined later.
  112  */
  113 static int cxgb_controller_probe(device_t);
  114 static int cxgb_controller_attach(device_t);
  115 static int cxgb_controller_detach(device_t);
  116 static void cxgb_free(struct adapter *);
  117 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
  118     unsigned int end);
  119 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
  120 static int cxgb_get_regs_len(void);
  121 static int offload_open(struct port_info *pi);
  122 static void touch_bars(device_t dev);
  123 static int offload_close(struct t3cdev *tdev);
  124 static void cxgb_link_start(struct port_info *p);
  125 
  126 static device_method_t cxgb_controller_methods[] = {
  127         DEVMETHOD(device_probe,         cxgb_controller_probe),
  128         DEVMETHOD(device_attach,        cxgb_controller_attach),
  129         DEVMETHOD(device_detach,        cxgb_controller_detach),
  130 
  131         /* bus interface */
  132         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  133         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  134 
  135         { 0, 0 }
  136 };
  137 
  138 static driver_t cxgb_controller_driver = {
  139         "cxgbc",
  140         cxgb_controller_methods,
  141         sizeof(struct adapter)
  142 };
  143 
  144 static devclass_t       cxgb_controller_devclass;
  145 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
  146 
  147 /*
  148  * Attachment glue for the ports.  Attachment is done directly to the
  149  * controller device.
  150  */
  151 static int cxgb_port_probe(device_t);
  152 static int cxgb_port_attach(device_t);
  153 static int cxgb_port_detach(device_t);
  154 
  155 static device_method_t cxgb_port_methods[] = {
  156         DEVMETHOD(device_probe,         cxgb_port_probe),
  157         DEVMETHOD(device_attach,        cxgb_port_attach),
  158         DEVMETHOD(device_detach,        cxgb_port_detach),
  159         { 0, 0 }
  160 };
  161 
  162 static driver_t cxgb_port_driver = {
  163         "cxgb",
  164         cxgb_port_methods,
  165         0
  166 };
  167 
  168 static d_ioctl_t cxgb_extension_ioctl;
  169 static d_open_t cxgb_extension_open;
  170 static d_close_t cxgb_extension_close;
  171 
  172 static struct cdevsw cxgb_cdevsw = {
  173        .d_version =    D_VERSION,
  174        .d_flags =      0,
  175        .d_open =       cxgb_extension_open,
  176        .d_close =      cxgb_extension_close,
  177        .d_ioctl =      cxgb_extension_ioctl,
  178        .d_name =       "cxgb",
  179 };
  180 
  181 static devclass_t       cxgb_port_devclass;
  182 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
  183 
  184 #define SGE_MSIX_COUNT (SGE_QSETS + 1)
  185 
  186 /*
  187  * The driver uses the best interrupt scheme available on a platform in the
  188  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
  189  * of these schemes the driver may consider as follows:
  190  *
  191  * msi = 2: choose from among all three options
  192  * msi = 1 : only consider MSI and pin interrupts
  193  * msi = 0: force pin interrupts
  194  */
  195 static int msi_allowed = 2;
  196 
  197 TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
  198 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
  199 SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
  200     "MSI-X, MSI, INTx selector");
  201 
  202 /*
  203  * The driver enables offload as a default.
  204  * To disable it, use ofld_disable = 1.
  205  */
  206 static int ofld_disable = 0;
  207 TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
  208 SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
  209     "disable ULP offload");
  210 
  211 /*
  212  * The driver uses an auto-queue algorithm by default.
  213  * To disable it and force a single queue-set per port, use singleq = 1.
  214  */
  215 static int singleq = 0;
  216 TUNABLE_INT("hw.cxgb.singleq", &singleq);
  217 SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
  218     "use a single queue-set per port");
  219 
  220 
  221 /*
  222  * The driver uses an auto-queue algorithm by default.
  223  * To disable it and force a single queue-set per port, use singleq = 1.
  224  */
  225 static int force_fw_update = 0;
  226 TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
  227 SYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
  228     "update firmware even if up to date");
  229 
  230 int cxgb_use_16k_clusters = 1;
  231 TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
  232 SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
  233     &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
  234 
  235 /*
  236  * Tune the size of the output queue.
  237  */
  238 int cxgb_snd_queue_len = IFQ_MAXLEN;
  239 TUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
  240 SYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
  241     &cxgb_snd_queue_len, 0, "send queue size ");
  242 
  243 
  244 enum {
  245         MAX_TXQ_ENTRIES      = 16384,
  246         MAX_CTRL_TXQ_ENTRIES = 1024,
  247         MAX_RSPQ_ENTRIES     = 16384,
  248         MAX_RX_BUFFERS       = 16384,
  249         MAX_RX_JUMBO_BUFFERS = 16384,
  250         MIN_TXQ_ENTRIES      = 4,
  251         MIN_CTRL_TXQ_ENTRIES = 4,
  252         MIN_RSPQ_ENTRIES     = 32,
  253         MIN_FL_ENTRIES       = 32,
  254         MIN_FL_JUMBO_ENTRIES = 32
  255 };
  256 
  257 struct filter_info {
  258         u32 sip;
  259         u32 sip_mask;
  260         u32 dip;
  261         u16 sport;
  262         u16 dport;
  263         u32 vlan:12;
  264         u32 vlan_prio:3;
  265         u32 mac_hit:1;
  266         u32 mac_idx:4;
  267         u32 mac_vld:1;
  268         u32 pkt_type:2;
  269         u32 report_filter_id:1;
  270         u32 pass:1;
  271         u32 rss:1;
  272         u32 qset:3;
  273         u32 locked:1;
  274         u32 valid:1;
  275 };
  276 
  277 enum { FILTER_NO_VLAN_PRI = 7 };
  278 
  279 #define EEPROM_MAGIC 0x38E2F10C
  280 
  281 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  282 
  283 /* Table for probing the cards.  The desc field isn't actually used */
  284 struct cxgb_ident {
  285         uint16_t        vendor;
  286         uint16_t        device;
  287         int             index;
  288         char            *desc;
  289 } cxgb_identifiers[] = {
  290         {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
  291         {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
  292         {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
  293         {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
  294         {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
  295         {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
  296         {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
  297         {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
  298         {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
  299         {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
  300         {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
  301         {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "N310E"},
  302         {0, 0, 0, NULL}
  303 };
  304 
  305 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
  306 
  307 
  308 void
  309 cxgb_log_tcb(struct adapter *sc, unsigned int tid)
  310 {
  311         char buf[TCB_SIZE];
  312         uint64_t *tcb = (uint64_t *)buf;
  313         int i, error;
  314         struct mc7 *mem = &sc->cm;
  315         
  316         error = t3_mc7_bd_read(mem, tid*TCB_SIZE/8, TCB_SIZE/8, tcb);
  317         if (error)
  318                 printf("cxgb_tcb_log failed\n");
  319         
  320         CTR1(KTR_CXGB, "TCB tid=%u", tid);
  321         for (i = 0; i < TCB_SIZE / 32; i++) {
  322                 CTR5(KTR_CXGB, "%1d: %08x %08x %08x %08x",
  323                     i, (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32),
  324                     (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32));
  325                 tcb += 2;
  326                 CTR4(KTR_CXGB, "   %08x %08x %08x %08x",
  327                     (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32),
  328                     (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32));
  329                 tcb += 2;
  330         }
  331 }
  332 
  333 static __inline char
  334 t3rev2char(struct adapter *adapter)
  335 {
  336         char rev = 'z';
  337 
  338         switch(adapter->params.rev) {
  339         case T3_REV_A:
  340                 rev = 'a';
  341                 break;
  342         case T3_REV_B:
  343         case T3_REV_B2:
  344                 rev = 'b';
  345                 break;
  346         case T3_REV_C:
  347                 rev = 'c';
  348                 break;
  349         }
  350         return rev;
  351 }
  352 
  353 static struct cxgb_ident *
  354 cxgb_get_ident(device_t dev)
  355 {
  356         struct cxgb_ident *id;
  357 
  358         for (id = cxgb_identifiers; id->desc != NULL; id++) {
  359                 if ((id->vendor == pci_get_vendor(dev)) &&
  360                     (id->device == pci_get_device(dev))) {
  361                         return (id);
  362                 }
  363         }
  364         return (NULL);
  365 }
  366 
  367 static const struct adapter_info *
  368 cxgb_get_adapter_info(device_t dev)
  369 {
  370         struct cxgb_ident *id;
  371         const struct adapter_info *ai;
  372       
  373         id = cxgb_get_ident(dev);
  374         if (id == NULL)
  375                 return (NULL);
  376 
  377         ai = t3_get_adapter_info(id->index);
  378 
  379         return (ai);
  380 }
  381 
  382 static int
  383 cxgb_controller_probe(device_t dev)
  384 {
  385         const struct adapter_info *ai;
  386         char *ports, buf[80];
  387         int nports;
  388         struct adapter *sc = device_get_softc(dev);
  389 
  390         ai = cxgb_get_adapter_info(dev);
  391         if (ai == NULL)
  392                 return (ENXIO);
  393 
  394         nports = ai->nports0 + ai->nports1;
  395         if (nports == 1)
  396                 ports = "port";
  397         else
  398                 ports = "ports";
  399 
  400         snprintf(buf, sizeof(buf), "%s %sNIC, rev: %d nports: %d %s",
  401                  ai->desc, is_offload(sc) ? "R" : "",
  402                  sc->params.rev, nports, ports);
  403         device_set_desc_copy(dev, buf);
  404         return (BUS_PROBE_DEFAULT);
  405 }
  406 
  407 #define FW_FNAME "cxgb_t3fw"
  408 #define TPEEPROM_NAME "t3b_tp_eeprom"
  409 #define TPSRAM_NAME "t3b_protocol_sram"
  410 
  411 static int
  412 upgrade_fw(adapter_t *sc)
  413 {
  414 #ifdef FIRMWARE_LATEST
  415         const struct firmware *fw;
  416 #else
  417         struct firmware *fw;
  418 #endif  
  419         int status;
  420         
  421         if ((fw = firmware_get(FW_FNAME)) == NULL)  {
  422                 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
  423                 return (ENOENT);
  424         } else
  425                 device_printf(sc->dev, "updating firmware on card\n");
  426         status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
  427 
  428         device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
  429         
  430         firmware_put(fw, FIRMWARE_UNLOAD);
  431 
  432         return (status);        
  433 }
  434 
  435 static int
  436 cxgb_controller_attach(device_t dev)
  437 {
  438         device_t child;
  439         const struct adapter_info *ai;
  440         struct adapter *sc;
  441         int i, error = 0;
  442         uint32_t vers;
  443         int port_qsets = 1;
  444 #ifdef MSI_SUPPORTED
  445         int msi_needed, reg;
  446 #endif
  447         int must_load = 0;
  448         char buf[80];
  449 
  450         sc = device_get_softc(dev);
  451         sc->dev = dev;
  452         sc->msi_count = 0;
  453         ai = cxgb_get_adapter_info(dev);
  454 
  455         /*
  456          * XXX not really related but a recent addition
  457          */
  458 #ifdef MSI_SUPPORTED    
  459         /* find the PCIe link width and set max read request to 4KB*/
  460         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
  461                 uint16_t lnk, pectl;
  462                 lnk = pci_read_config(dev, reg + 0x12, 2);
  463                 sc->link_width = (lnk >> 4) & 0x3f;
  464                 
  465                 pectl = pci_read_config(dev, reg + 0x8, 2);
  466                 pectl = (pectl & ~0x7000) | (5 << 12);
  467                 pci_write_config(dev, reg + 0x8, pectl, 2);
  468         }
  469 
  470         if (sc->link_width != 0 && sc->link_width <= 4 &&
  471             (ai->nports0 + ai->nports1) <= 2) {
  472                 device_printf(sc->dev,
  473                     "PCIe x%d Link, expect reduced performance\n",
  474                     sc->link_width);
  475         }
  476 #endif
  477         touch_bars(dev);
  478         pci_enable_busmaster(dev);
  479         /*
  480          * Allocate the registers and make them available to the driver.
  481          * The registers that we care about for NIC mode are in BAR 0
  482          */
  483         sc->regs_rid = PCIR_BAR(0);
  484         if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  485             &sc->regs_rid, RF_ACTIVE)) == NULL) {
  486                 device_printf(dev, "Cannot allocate BAR region 0\n");
  487                 return (ENXIO);
  488         }
  489         sc->udbs_rid = PCIR_BAR(2);
  490         sc->udbs_res = NULL;
  491         if (is_offload(sc) &&
  492             ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  493                    &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
  494                 device_printf(dev, "Cannot allocate BAR region 1\n");
  495                 error = ENXIO;
  496                 goto out;
  497         }
  498 
  499         snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
  500             device_get_unit(dev));
  501         ADAPTER_LOCK_INIT(sc, sc->lockbuf);
  502 
  503         snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
  504             device_get_unit(dev));
  505         snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
  506             device_get_unit(dev));
  507         snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
  508             device_get_unit(dev));
  509         
  510         MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
  511         MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
  512         MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
  513         
  514         sc->bt = rman_get_bustag(sc->regs_res);
  515         sc->bh = rman_get_bushandle(sc->regs_res);
  516         sc->mmio_len = rman_get_size(sc->regs_res);
  517 
  518         if (t3_prep_adapter(sc, ai, 1) < 0) {
  519                 printf("prep adapter failed\n");
  520                 error = ENODEV;
  521                 goto out;
  522         }
  523         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
  524          * enough messages for the queue sets.  If that fails, try falling
  525          * back to MSI.  If that fails, then try falling back to the legacy
  526          * interrupt pin model.
  527          */
  528 #ifdef MSI_SUPPORTED
  529 
  530         sc->msix_regs_rid = 0x20;
  531         if ((msi_allowed >= 2) &&
  532             (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  533             &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
  534 
  535                 msi_needed = sc->msi_count = SGE_MSIX_COUNT;
  536 
  537                 if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
  538                     (sc->msi_count != msi_needed)) {
  539                         device_printf(dev, "msix allocation failed - msi_count = %d"
  540                             " msi_needed=%d will try msi err=%d\n", sc->msi_count,
  541                             msi_needed, error);
  542                         sc->msi_count = 0;
  543                         pci_release_msi(dev);
  544                         bus_release_resource(dev, SYS_RES_MEMORY,
  545                             sc->msix_regs_rid, sc->msix_regs_res);
  546                         sc->msix_regs_res = NULL;
  547                 } else {
  548                         sc->flags |= USING_MSIX;
  549                         sc->cxgb_intr = t3_intr_msix;
  550                 }
  551         }
  552 
  553         if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
  554                 sc->msi_count = 1;
  555                 if (pci_alloc_msi(dev, &sc->msi_count)) {
  556                         device_printf(dev, "alloc msi failed - will try INTx\n");
  557                         sc->msi_count = 0;
  558                         pci_release_msi(dev);
  559                 } else {
  560                         sc->flags |= USING_MSI;
  561                         sc->irq_rid = 1;
  562                         sc->cxgb_intr = t3_intr_msi;
  563                 }
  564         }
  565 #endif
  566         if (sc->msi_count == 0) {
  567                 device_printf(dev, "using line interrupts\n");
  568                 sc->irq_rid = 0;
  569                 sc->cxgb_intr = t3b_intr;
  570         }
  571 
  572         if ((sc->flags & USING_MSIX) && !singleq)
  573                 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
  574         
  575         /* Create a private taskqueue thread for handling driver events */
  576 #ifdef TASKQUEUE_CURRENT        
  577         sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
  578             taskqueue_thread_enqueue, &sc->tq);
  579 #else
  580         sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
  581             taskqueue_thread_enqueue, &sc->tq);
  582 #endif  
  583         if (sc->tq == NULL) {
  584                 device_printf(dev, "failed to allocate controller task queue\n");
  585                 goto out;
  586         }
  587 
  588         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
  589             device_get_nameunit(dev));
  590         TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
  591         TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
  592 
  593         
  594         /* Create a periodic callout for checking adapter status */
  595         callout_init(&sc->cxgb_tick_ch, TRUE);
  596         
  597         if ((t3_check_fw_version(sc, &must_load) != 0 && must_load) || force_fw_update) {
  598                 /*
  599                  * Warn user that a firmware update will be attempted in init.
  600                  */
  601                 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
  602                     FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
  603                 sc->flags &= ~FW_UPTODATE;
  604         } else {
  605                 sc->flags |= FW_UPTODATE;
  606         }
  607 
  608         if (t3_check_tpsram_version(sc, &must_load) != 0 && must_load) {
  609                 /*
  610                  * Warn user that a firmware update will be attempted in init.
  611                  */
  612                 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
  613                     t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
  614                 sc->flags &= ~TPS_UPTODATE;
  615         } else {
  616                 sc->flags |= TPS_UPTODATE;
  617         }
  618         
  619         /*
  620          * Create a child device for each MAC.  The ethernet attachment
  621          * will be done in these children.
  622          */     
  623         for (i = 0; i < (sc)->params.nports; i++) {
  624                 struct port_info *pi;
  625                 
  626                 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
  627                         device_printf(dev, "failed to add child port\n");
  628                         error = EINVAL;
  629                         goto out;
  630                 }
  631                 pi = &sc->port[i];
  632                 pi->adapter = sc;
  633                 pi->nqsets = port_qsets;
  634                 pi->first_qset = i*port_qsets;
  635                 pi->port_id = i;
  636                 pi->tx_chan = i >= ai->nports0;
  637                 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
  638                 sc->rxpkt_map[pi->txpkt_intf] = i;
  639                 sc->port[i].tx_chan = i >= ai->nports0;
  640                 sc->portdev[i] = child;
  641                 device_set_softc(child, pi);
  642         }
  643         if ((error = bus_generic_attach(dev)) != 0)
  644                 goto out;
  645 
  646         /* initialize sge private state */
  647         t3_sge_init_adapter(sc);
  648 
  649         t3_led_ready(sc);
  650         
  651         cxgb_offload_init();
  652         if (is_offload(sc)) {
  653                 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  654                 cxgb_adapter_ofld(sc);
  655         }
  656         error = t3_get_fw_version(sc, &vers);
  657         if (error)
  658                 goto out;
  659 
  660         snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
  661             G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
  662             G_FW_VERSION_MICRO(vers));
  663 
  664         snprintf(buf, sizeof(buf), "%s\t E/C: %s S/N: %s", 
  665                  ai->desc,
  666                  sc->params.vpd.ec, sc->params.vpd.sn);
  667         device_set_desc_copy(dev, buf);
  668 
  669         device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
  670         callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
  671         t3_add_attach_sysctls(sc);
  672 out:
  673         if (error)
  674                 cxgb_free(sc);
  675 
  676         return (error);
  677 }
  678 
  679 static int
  680 cxgb_controller_detach(device_t dev)
  681 {
  682         struct adapter *sc;
  683 
  684         sc = device_get_softc(dev);
  685 
  686         cxgb_free(sc);
  687 
  688         return (0);
  689 }
  690 
  691 static void
  692 cxgb_free(struct adapter *sc)
  693 {
  694         int i;
  695 
  696         ADAPTER_LOCK(sc);
  697         sc->flags |= CXGB_SHUTDOWN;
  698         ADAPTER_UNLOCK(sc);
  699         cxgb_pcpu_shutdown_threads(sc);
  700         ADAPTER_LOCK(sc);
  701 
  702 /*
  703  * drops the lock
  704  */
  705         cxgb_down_locked(sc);
  706         
  707 #ifdef MSI_SUPPORTED
  708         if (sc->flags & (USING_MSI | USING_MSIX)) {
  709                 device_printf(sc->dev, "releasing msi message(s)\n");
  710                 pci_release_msi(sc->dev);
  711         } else {
  712                 device_printf(sc->dev, "no msi message to release\n");
  713         }
  714 #endif
  715         if (sc->msix_regs_res != NULL) {
  716                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
  717                     sc->msix_regs_res);
  718         }
  719 
  720         t3_sge_deinit_sw(sc);
  721         /*
  722          * Wait for last callout
  723          */
  724         
  725         DELAY(hz*100);
  726 
  727         for (i = 0; i < (sc)->params.nports; ++i) {
  728                 if (sc->portdev[i] != NULL)
  729                         device_delete_child(sc->dev, sc->portdev[i]);
  730         }
  731                 
  732         bus_generic_detach(sc->dev);
  733         if (sc->tq != NULL) {
  734                 taskqueue_free(sc->tq);
  735                 sc->tq = NULL;
  736         }
  737         
  738         if (is_offload(sc)) {
  739                 cxgb_adapter_unofld(sc);
  740                 if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
  741                         offload_close(&sc->tdev);
  742                 else
  743                         printf("cxgb_free: DEVMAP_BIT not set\n");
  744         } else
  745                 printf("not offloading set\n"); 
  746 #ifdef notyet
  747         if (sc->flags & CXGB_OFLD_INIT)
  748                 cxgb_offload_deactivate(sc);
  749 #endif
  750         free(sc->filters, M_DEVBUF);
  751         t3_sge_free(sc);
  752         
  753         cxgb_offload_exit();
  754 
  755         if (sc->udbs_res != NULL)
  756                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
  757                     sc->udbs_res);
  758 
  759         if (sc->regs_res != NULL)
  760                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
  761                     sc->regs_res);
  762 
  763         MTX_DESTROY(&sc->mdio_lock);
  764         MTX_DESTROY(&sc->sge.reg_lock);
  765         MTX_DESTROY(&sc->elmer_lock);
  766         ADAPTER_LOCK_DEINIT(sc);
  767 }
  768 
  769 /**
  770  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
  771  *      @sc: the controller softc
  772  *
  773  *      Determines how many sets of SGE queues to use and initializes them.
  774  *      We support multiple queue sets per port if we have MSI-X, otherwise
  775  *      just one queue set per port.
  776  */
  777 static int
  778 setup_sge_qsets(adapter_t *sc)
  779 {
  780         int i, j, err, irq_idx = 0, qset_idx = 0;
  781         u_int ntxq = SGE_TXQ_PER_SET;
  782 
  783         if ((err = t3_sge_alloc(sc)) != 0) {
  784                 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
  785                 return (err);
  786         }
  787 
  788         if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
  789                 irq_idx = -1;
  790 
  791         for (i = 0; i < (sc)->params.nports; i++) {
  792                 struct port_info *pi = &sc->port[i];
  793 
  794                 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
  795                         err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
  796                             (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
  797                             &sc->params.sge.qset[qset_idx], ntxq, pi);
  798                         if (err) {
  799                                 t3_free_sge_resources(sc);
  800                                 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
  801                                     err);
  802                                 return (err);
  803                         }
  804                 }
  805         }
  806 
  807         return (0);
  808 }
  809 
  810 static void
  811 cxgb_teardown_msix(adapter_t *sc) 
  812 {
  813         int i, nqsets;
  814         
  815         for (nqsets = i = 0; i < (sc)->params.nports; i++) 
  816                 nqsets += sc->port[i].nqsets;
  817 
  818         for (i = 0; i < nqsets; i++) {
  819                 if (sc->msix_intr_tag[i] != NULL) {
  820                         bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
  821                             sc->msix_intr_tag[i]);
  822                         sc->msix_intr_tag[i] = NULL;
  823                 }
  824                 if (sc->msix_irq_res[i] != NULL) {
  825                         bus_release_resource(sc->dev, SYS_RES_IRQ,
  826                             sc->msix_irq_rid[i], sc->msix_irq_res[i]);
  827                         sc->msix_irq_res[i] = NULL;
  828                 }
  829         }
  830 }
  831 
  832 static int
  833 cxgb_setup_msix(adapter_t *sc, int msix_count)
  834 {
  835         int i, j, k, nqsets, rid;
  836 
  837         /* The first message indicates link changes and error conditions */
  838         sc->irq_rid = 1;
  839         if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
  840            &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  841                 device_printf(sc->dev, "Cannot allocate msix interrupt\n");
  842                 return (EINVAL);
  843         }
  844 
  845         if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
  846 #ifdef INTR_FILTERS
  847                 NULL,
  848 #endif
  849                 cxgb_async_intr, sc, &sc->intr_tag)) {
  850                 device_printf(sc->dev, "Cannot set up interrupt\n");
  851                 return (EINVAL);
  852         }
  853         for (i = k = 0; i < (sc)->params.nports; i++) {
  854                 nqsets = sc->port[i].nqsets;
  855                 for (j = 0; j < nqsets; j++, k++) {
  856                         struct sge_qset *qs = &sc->sge.qs[k];
  857 
  858                         rid = k + 2;
  859                         if (cxgb_debug)
  860                                 printf("rid=%d ", rid);
  861                         if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
  862                             sc->dev, SYS_RES_IRQ, &rid,
  863                             RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  864                                 device_printf(sc->dev, "Cannot allocate "
  865                                     "interrupt for message %d\n", rid);
  866                                 return (EINVAL);
  867                         }
  868                         sc->msix_irq_rid[k] = rid;
  869                         if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
  870                                 INTR_MPSAFE|INTR_TYPE_NET,
  871 #ifdef INTR_FILTERS
  872                                 NULL,
  873 #endif
  874                                 t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
  875                                 device_printf(sc->dev, "Cannot set up "
  876                                     "interrupt for message %d\n", rid);
  877                                 return (EINVAL);
  878                         }
  879 #ifdef IFNET_MULTIQUEUE                 
  880                         if (singleq == 0) {
  881                                 int vector = rman_get_start(sc->msix_irq_res[k]);
  882                                 if (bootverbose)
  883                                         device_printf(sc->dev, "binding vector=%d to cpu=%d\n", vector, k % mp_ncpus);
  884                                 intr_bind(vector, k % mp_ncpus);
  885                         }
  886 #endif                  
  887                 }
  888         }
  889 
  890         return (0);
  891 }
  892 
  893 static int
  894 cxgb_port_probe(device_t dev)
  895 {
  896         struct port_info *p;
  897         char buf[80];
  898         const char *desc;
  899         
  900         p = device_get_softc(dev);
  901         desc = p->phy.desc;
  902         snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
  903         device_set_desc_copy(dev, buf);
  904         return (0);
  905 }
  906 
  907 
  908 static int
  909 cxgb_makedev(struct port_info *pi)
  910 {
  911         
  912         pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
  913             UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
  914         
  915         if (pi->port_cdev == NULL)
  916                 return (ENOMEM);
  917 
  918         pi->port_cdev->si_drv1 = (void *)pi;
  919         
  920         return (0);
  921 }
  922 
  923 
  924 #ifdef TSO_SUPPORTED
  925 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO)
  926 /* Don't enable TSO6 yet */
  927 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO)
  928 #else
  929 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
  930 /* Don't enable TSO6 yet */
  931 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
  932 #define IFCAP_TSO4 0x0
  933 #define IFCAP_TSO6 0x0
  934 #define CSUM_TSO   0x0
  935 #endif
  936 
  937 
  938 static int
  939 cxgb_port_attach(device_t dev)
  940 {
  941         struct port_info *p;
  942         struct ifnet *ifp;
  943         int err, media_flags;
  944         struct adapter *sc;
  945         
  946         
  947         p = device_get_softc(dev);
  948         sc = p->adapter;
  949         snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
  950             device_get_unit(device_get_parent(dev)), p->port_id);
  951         PORT_LOCK_INIT(p, p->lockbuf);
  952 
  953         /* Allocate an ifnet object and set it up */
  954         ifp = p->ifp = if_alloc(IFT_ETHER);
  955         if (ifp == NULL) {
  956                 device_printf(dev, "Cannot allocate ifnet\n");
  957                 return (ENOMEM);
  958         }
  959         
  960         /*
  961          * Note that there is currently no watchdog timer.
  962          */
  963         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  964         ifp->if_init = cxgb_init;
  965         ifp->if_softc = p;
  966         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  967         ifp->if_ioctl = cxgb_ioctl;
  968         ifp->if_start = cxgb_start;
  969 
  970 #if 0   
  971 #ifdef IFNET_MULTIQUEUE
  972         ifp->if_flags |= IFF_MULTIQ;
  973         ifp->if_mq_start = cxgb_pcpu_start;
  974 #endif
  975 #endif  
  976         ifp->if_timer = 0;      /* Disable ifnet watchdog */
  977         ifp->if_watchdog = NULL;
  978 
  979         ifp->if_snd.ifq_drv_maxlen = cxgb_snd_queue_len;
  980         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  981         IFQ_SET_READY(&ifp->if_snd);
  982 
  983         ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
  984         ifp->if_capabilities |= CXGB_CAP;
  985         ifp->if_capenable |= CXGB_CAP_ENABLE;
  986         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
  987         /*
  988          * disable TSO on 4-port - it isn't supported by the firmware yet
  989          */     
  990         if (p->adapter->params.nports > 2) {
  991                 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
  992                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
  993                 ifp->if_hwassist &= ~CSUM_TSO;
  994         }
  995 
  996         ether_ifattach(ifp, p->hw_addr);
  997         /*
  998          * Only default to jumbo frames on 10GigE
  999          */
 1000         if (p->adapter->params.nports <= 2)
 1001                 ifp->if_mtu = ETHERMTU_JUMBO;
 1002         if ((err = cxgb_makedev(p)) != 0) {
 1003                 printf("makedev failed %d\n", err);
 1004                 return (err);
 1005         }
 1006         ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
 1007             cxgb_media_status);
 1008       
 1009         if (!strcmp(p->phy.desc,        "10GBASE-CX4")) {
 1010                 media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
 1011         } else if (!strcmp(p->phy.desc, "10GBASE-SR")) {
 1012                 media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
 1013         } else if (!strcmp(p->phy.desc, "10GBASE-R")) {
 1014                 media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
 1015         } else if (!strcmp(p->phy.desc, "10/100/1000BASE-T")) {
 1016                 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
 1017                 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
 1018                             0, NULL);
 1019                 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
 1020                             0, NULL);
 1021                 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
 1022                             0, NULL);
 1023                 ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
 1024                             0, NULL);
 1025                 media_flags = 0;
 1026         } else if (!strcmp(p->phy.desc, "1000BASE-X")) {
 1027                 /*
 1028                  * XXX: This is not very accurate.  Fix when common code
 1029                  * returns more specific value - eg 1000BASE-SX, LX, etc.
 1030                  */
 1031                 media_flags = IFM_ETHER | IFM_1000_SX | IFM_FDX;
 1032         } else {
 1033                 printf("unsupported media type %s\n", p->phy.desc);
 1034                 return (ENXIO);
 1035         }
 1036         if (media_flags) {
 1037                 ifmedia_add(&p->media, media_flags, 0, NULL);
 1038                 ifmedia_set(&p->media, media_flags);
 1039         } else {
 1040                 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
 1041                 ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
 1042         }       
 1043 
 1044         /* Get the latest mac address, User can use a LAA */
 1045         bcopy(IF_LLADDR(p->ifp), p->hw_addr, ETHER_ADDR_LEN);
 1046         t3_sge_init_port(p);
 1047 #if defined(LINK_ATTACH)        
 1048         cxgb_link_start(p);
 1049         t3_link_changed(sc, p->port_id);
 1050 #endif
 1051         return (0);
 1052 }
 1053 
 1054 static int
 1055 cxgb_port_detach(device_t dev)
 1056 {
 1057         struct port_info *p;
 1058 
 1059         p = device_get_softc(dev);
 1060 
 1061         PORT_LOCK(p);
 1062         if (p->ifp->if_drv_flags & IFF_DRV_RUNNING) 
 1063                 cxgb_stop_locked(p);
 1064         PORT_UNLOCK(p);
 1065         
 1066         ether_ifdetach(p->ifp);
 1067         printf("waiting for callout to stop ...");
 1068         DELAY(1000000);
 1069         printf("done\n");
 1070         /*
 1071          * the lock may be acquired in ifdetach
 1072          */
 1073         PORT_LOCK_DEINIT(p);
 1074         if_free(p->ifp);
 1075         
 1076         if (p->port_cdev != NULL)
 1077                 destroy_dev(p->port_cdev);
 1078         
 1079         return (0);
 1080 }
 1081 
 1082 void
 1083 t3_fatal_err(struct adapter *sc)
 1084 {
 1085         u_int fw_status[4];
 1086         int i = 0;
 1087 
 1088         /*
 1089          * We don't know which tcb caused the error so we just hope it was one of the first ten :-/
 1090          */
 1091         for (i = 0; i < 10; i++)
 1092                 cxgb_log_tcb(sc, i);
 1093             
 1094         if (sc->flags & FULL_INIT_DONE) {
 1095                 t3_sge_stop(sc);
 1096                 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
 1097                 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
 1098                 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
 1099                 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
 1100                 t3_intr_disable(sc);
 1101         }
 1102         device_printf(sc->dev,"encountered fatal error, operation suspended\n");
 1103         if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status)) {
 1104                 
 1105                 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 1106                     fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
 1107 
 1108                 CTR4(KTR_CXGB, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 1109                     fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
 1110         }
 1111 }
 1112 
 1113 int
 1114 t3_os_find_pci_capability(adapter_t *sc, int cap)
 1115 {
 1116         device_t dev;
 1117         struct pci_devinfo *dinfo;
 1118         pcicfgregs *cfg;
 1119         uint32_t status;
 1120         uint8_t ptr;
 1121 
 1122         dev = sc->dev;
 1123         dinfo = device_get_ivars(dev);
 1124         cfg = &dinfo->cfg;
 1125 
 1126         status = pci_read_config(dev, PCIR_STATUS, 2);
 1127         if (!(status & PCIM_STATUS_CAPPRESENT))
 1128                 return (0);
 1129 
 1130         switch (cfg->hdrtype & PCIM_HDRTYPE) {
 1131         case 0:
 1132         case 1:
 1133                 ptr = PCIR_CAP_PTR;
 1134                 break;
 1135         case 2:
 1136                 ptr = PCIR_CAP_PTR_2;
 1137                 break;
 1138         default:
 1139                 return (0);
 1140                 break;
 1141         }
 1142         ptr = pci_read_config(dev, ptr, 1);
 1143 
 1144         while (ptr != 0) {
 1145                 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
 1146                         return (ptr);
 1147                 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
 1148         }
 1149 
 1150         return (0);
 1151 }
 1152 
 1153 int
 1154 t3_os_pci_save_state(struct adapter *sc)
 1155 {
 1156         device_t dev;
 1157         struct pci_devinfo *dinfo;
 1158 
 1159         dev = sc->dev;
 1160         dinfo = device_get_ivars(dev);
 1161 
 1162         pci_cfg_save(dev, dinfo, 0);
 1163         return (0);
 1164 }
 1165 
 1166 int
 1167 t3_os_pci_restore_state(struct adapter *sc)
 1168 {
 1169         device_t dev;
 1170         struct pci_devinfo *dinfo;
 1171 
 1172         dev = sc->dev;
 1173         dinfo = device_get_ivars(dev);
 1174 
 1175         pci_cfg_restore(dev, dinfo);
 1176         return (0);
 1177 }
 1178 
 1179 /**
 1180  *      t3_os_link_changed - handle link status changes
 1181  *      @adapter: the adapter associated with the link change
 1182  *      @port_id: the port index whose limk status has changed
 1183  *      @link_status: the new status of the link
 1184  *      @speed: the new speed setting
 1185  *      @duplex: the new duplex setting
 1186  *      @fc: the new flow-control setting
 1187  *
 1188  *      This is the OS-dependent handler for link status changes.  The OS
 1189  *      neutral handler takes care of most of the processing for these events,
 1190  *      then calls this handler for any OS-specific processing.
 1191  */
 1192 void
 1193 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
 1194      int duplex, int fc)
 1195 {
 1196         struct port_info *pi = &adapter->port[port_id];
 1197         struct cmac *mac = &adapter->port[port_id].mac;
 1198 
 1199         if (link_status) {
 1200                 DELAY(10);
 1201                 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 1202                         /* Clear errors created by MAC enable */
 1203                         t3_set_reg_field(adapter,
 1204                                          A_XGM_STAT_CTRL + pi->mac.offset,
 1205                                          F_CLRSTATS, 1);
 1206                 if_link_state_change(pi->ifp, LINK_STATE_UP);
 1207 
 1208         } else {
 1209                 pi->phy.ops->power_down(&pi->phy, 1);
 1210                 t3_mac_disable(mac, MAC_DIRECTION_RX);
 1211                 t3_link_start(&pi->phy, mac, &pi->link_config);
 1212                 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 1213                 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
 1214         }
 1215 }
 1216 
 1217 /**
 1218  *      t3_os_phymod_changed - handle PHY module changes
 1219  *      @phy: the PHY reporting the module change
 1220  *      @mod_type: new module type
 1221  *
 1222  *      This is the OS-dependent handler for PHY module changes.  It is
 1223  *      invoked when a PHY module is removed or inserted for any OS-specific
 1224  *      processing.
 1225  */
 1226 void t3_os_phymod_changed(struct adapter *adap, int port_id)
 1227 {
 1228         static const char *mod_str[] = {
 1229                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 1230         };
 1231 
 1232         struct port_info *pi = &adap->port[port_id];
 1233 
 1234         if (pi->phy.modtype == phy_modtype_none)
 1235                 device_printf(adap->dev, "PHY module unplugged\n");
 1236         else {
 1237                 KASSERT(pi->phy.modtype < ARRAY_SIZE(mod_str),
 1238                     ("invalid PHY module type %d", pi->phy.modtype));
 1239                 device_printf(adap->dev, "%s PHY module inserted\n",
 1240                     mod_str[pi->phy.modtype]);
 1241         }
 1242 }
 1243 
 1244 /*
 1245  * Interrupt-context handler for external (PHY) interrupts.
 1246  */
 1247 void
 1248 t3_os_ext_intr_handler(adapter_t *sc)
 1249 {
 1250         if (cxgb_debug)
 1251                 printf("t3_os_ext_intr_handler\n");
 1252         /*
 1253          * Schedule a task to handle external interrupts as they may be slow
 1254          * and we use a mutex to protect MDIO registers.  We disable PHY
 1255          * interrupts in the meantime and let the task reenable them when
 1256          * it's done.
 1257          */
 1258         ADAPTER_LOCK(sc);
 1259         if (sc->slow_intr_mask) {
 1260                 sc->slow_intr_mask &= ~F_T3DBG;
 1261                 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
 1262                 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
 1263         }
 1264         ADAPTER_UNLOCK(sc);
 1265 }
 1266 
 1267 void
 1268 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
 1269 {
 1270 
 1271         /*
 1272          * The ifnet might not be allocated before this gets called,
 1273          * as this is called early on in attach by t3_prep_adapter
 1274          * save the address off in the port structure
 1275          */
 1276         if (cxgb_debug)
 1277                 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
 1278         bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
 1279 }
 1280 
 1281 /**
 1282  *      link_start - enable a port
 1283  *      @p: the port to enable
 1284  *
 1285  *      Performs the MAC and PHY actions needed to enable a port.
 1286  */
 1287 static void
 1288 cxgb_link_start(struct port_info *p)
 1289 {
 1290         struct ifnet *ifp;
 1291         struct t3_rx_mode rm;
 1292         struct cmac *mac = &p->mac;
 1293         int mtu, hwtagging;
 1294 
 1295         ifp = p->ifp;
 1296 
 1297         bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
 1298 
 1299         mtu = ifp->if_mtu;
 1300         if (ifp->if_capenable & IFCAP_VLAN_MTU)
 1301                 mtu += ETHER_VLAN_ENCAP_LEN;
 1302 
 1303         hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
 1304 
 1305         t3_init_rx_mode(&rm, p);
 1306         if (!mac->multiport) 
 1307                 t3_mac_reset(mac);
 1308         t3_mac_set_mtu(mac, mtu);
 1309         t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
 1310         t3_mac_set_address(mac, 0, p->hw_addr);
 1311         t3_mac_set_rx_mode(mac, &rm);
 1312         t3_link_start(&p->phy, mac, &p->link_config);
 1313         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 1314 }
 1315 
 1316 
 1317 static int
 1318 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 1319                               unsigned long n)
 1320 {
 1321         int attempts = 5;
 1322 
 1323         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 1324                 if (!--attempts)
 1325                         return (ETIMEDOUT);
 1326                 t3_os_sleep(10);
 1327         }
 1328         return 0;
 1329 }
 1330 
 1331 static int
 1332 init_tp_parity(struct adapter *adap)
 1333 {
 1334         int i;
 1335         struct mbuf *m;
 1336         struct cpl_set_tcb_field *greq;
 1337         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 1338 
 1339         t3_tp_set_offload_mode(adap, 1);
 1340 
 1341         for (i = 0; i < 16; i++) {
 1342                 struct cpl_smt_write_req *req;
 1343 
 1344                 m = m_gethdr(M_WAITOK, MT_DATA);
 1345                 req = mtod(m, struct cpl_smt_write_req *);
 1346                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1347                 memset(req, 0, sizeof(*req));
 1348                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1349                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 1350                 req->iff = i;
 1351                 t3_mgmt_tx(adap, m);
 1352         }
 1353 
 1354         for (i = 0; i < 2048; i++) {
 1355                 struct cpl_l2t_write_req *req;
 1356 
 1357                 m = m_gethdr(M_WAITOK, MT_DATA);
 1358                 req = mtod(m, struct cpl_l2t_write_req *);
 1359                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1360                 memset(req, 0, sizeof(*req));
 1361                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1362                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 1363                 req->params = htonl(V_L2T_W_IDX(i));
 1364                 t3_mgmt_tx(adap, m);
 1365         }
 1366 
 1367         for (i = 0; i < 2048; i++) {
 1368                 struct cpl_rte_write_req *req;
 1369 
 1370                 m = m_gethdr(M_WAITOK, MT_DATA);
 1371                 req = mtod(m, struct cpl_rte_write_req *);
 1372                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1373                 memset(req, 0, sizeof(*req));
 1374                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1375                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 1376                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
 1377                 t3_mgmt_tx(adap, m);
 1378         }
 1379 
 1380         m = m_gethdr(M_WAITOK, MT_DATA);
 1381         greq = mtod(m, struct cpl_set_tcb_field *);
 1382         m->m_len = m->m_pkthdr.len = sizeof(*greq);
 1383         memset(greq, 0, sizeof(*greq));
 1384         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1385         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 1386         greq->mask = htobe64(1);
 1387         t3_mgmt_tx(adap, m);
 1388 
 1389         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 1390         t3_tp_set_offload_mode(adap, 0);
 1391         return (i);
 1392 }
 1393 
 1394 /**
 1395  *      setup_rss - configure Receive Side Steering (per-queue connection demux) 
 1396  *      @adap: the adapter
 1397  *
 1398  *      Sets up RSS to distribute packets to multiple receive queues.  We
 1399  *      configure the RSS CPU lookup table to distribute to the number of HW
 1400  *      receive queues, and the response queue lookup table to narrow that
 1401  *      down to the response queues actually configured for each port.
 1402  *      We always configure the RSS mapping for two ports since the mapping
 1403  *      table has plenty of entries.
 1404  */
 1405 static void
 1406 setup_rss(adapter_t *adap)
 1407 {
 1408         int i;
 1409         u_int nq[2]; 
 1410         uint8_t cpus[SGE_QSETS + 1];
 1411         uint16_t rspq_map[RSS_TABLE_SIZE];
 1412         
 1413         for (i = 0; i < SGE_QSETS; ++i)
 1414                 cpus[i] = i;
 1415         cpus[SGE_QSETS] = 0xff;
 1416 
 1417         nq[0] = nq[1] = 0;
 1418         for_each_port(adap, i) {
 1419                 const struct port_info *pi = adap2pinfo(adap, i);
 1420 
 1421                 nq[pi->tx_chan] += pi->nqsets;
 1422         }
 1423         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 1424                 rspq_map[i] = nq[0] ? i % nq[0] : 0;
 1425                 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
 1426         }
 1427         /* Calculate the reverse RSS map table */
 1428         for (i = 0; i < RSS_TABLE_SIZE; ++i)
 1429                 if (adap->rrss_map[rspq_map[i]] == 0xff)
 1430                         adap->rrss_map[rspq_map[i]] = i;
 1431 
 1432         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 1433                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
 1434                       F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
 1435                       cpus, rspq_map);
 1436 
 1437 }
 1438 
 1439 /*
 1440  * Sends an mbuf to an offload queue driver
 1441  * after dealing with any active network taps.
 1442  */
 1443 static inline int
 1444 offload_tx(struct t3cdev *tdev, struct mbuf *m)
 1445 {
 1446         int ret;
 1447 
 1448         ret = t3_offload_tx(tdev, m);
 1449         return (ret);
 1450 }
 1451 
 1452 static int
 1453 write_smt_entry(struct adapter *adapter, int idx)
 1454 {
 1455         struct port_info *pi = &adapter->port[idx];
 1456         struct cpl_smt_write_req *req;
 1457         struct mbuf *m;
 1458 
 1459         if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
 1460                 return (ENOMEM);
 1461 
 1462         req = mtod(m, struct cpl_smt_write_req *);
 1463         m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
 1464         
 1465         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1466         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 1467         req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
 1468         req->iff = idx;
 1469         memset(req->src_mac1, 0, sizeof(req->src_mac1));
 1470         memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
 1471 
 1472         m_set_priority(m, 1);
 1473 
 1474         offload_tx(&adapter->tdev, m);
 1475 
 1476         return (0);
 1477 }
 1478 
 1479 static int
 1480 init_smt(struct adapter *adapter)
 1481 {
 1482         int i;
 1483 
 1484         for_each_port(adapter, i)
 1485                 write_smt_entry(adapter, i);
 1486         return 0;
 1487 }
 1488 
 1489 static void
 1490 init_port_mtus(adapter_t *adapter)
 1491 {
 1492         unsigned int mtus = adapter->port[0].ifp->if_mtu;
 1493 
 1494         if (adapter->port[1].ifp)
 1495                 mtus |= adapter->port[1].ifp->if_mtu << 16;
 1496         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 1497 }
 1498 
 1499 static void
 1500 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 1501                               int hi, int port)
 1502 {
 1503         struct mbuf *m;
 1504         struct mngt_pktsched_wr *req;
 1505 
 1506         m = m_gethdr(M_DONTWAIT, MT_DATA);
 1507         if (m) {        
 1508                 req = mtod(m, struct mngt_pktsched_wr *);
 1509                 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 1510                 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 1511                 req->sched = sched;
 1512                 req->idx = qidx;
 1513                 req->min = lo;
 1514                 req->max = hi;
 1515                 req->binding = port;
 1516                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1517                 t3_mgmt_tx(adap, m);
 1518         }
 1519 }
 1520 
 1521 static void
 1522 bind_qsets(adapter_t *sc)
 1523 {
 1524         int i, j;
 1525 
 1526         cxgb_pcpu_startup_threads(sc);
 1527         for (i = 0; i < (sc)->params.nports; ++i) {
 1528                 const struct port_info *pi = adap2pinfo(sc, i);
 1529 
 1530                 for (j = 0; j < pi->nqsets; ++j) {
 1531                         send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
 1532                                           -1, pi->tx_chan);
 1533 
 1534                 }
 1535         }
 1536 }
 1537 
 1538 static void
 1539 update_tpeeprom(struct adapter *adap)
 1540 {
 1541 #ifdef FIRMWARE_LATEST
 1542         const struct firmware *tpeeprom;
 1543 #else
 1544         struct firmware *tpeeprom;
 1545 #endif
 1546 
 1547         uint32_t version;
 1548         unsigned int major, minor;
 1549         int ret, len;
 1550         char rev;
 1551 
 1552         t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
 1553 
 1554         major = G_TP_VERSION_MAJOR(version);
 1555         minor = G_TP_VERSION_MINOR(version);
 1556         if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
 1557                 return; 
 1558 
 1559         rev = t3rev2char(adap);
 1560 
 1561         tpeeprom = firmware_get(TPEEPROM_NAME);
 1562         if (tpeeprom == NULL) {
 1563                 device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
 1564                     TPEEPROM_NAME);
 1565                 return;
 1566         }
 1567 
 1568         len = tpeeprom->datasize - 4;
 1569         
 1570         ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
 1571         if (ret)
 1572                 goto release_tpeeprom;
 1573 
 1574         if (len != TP_SRAM_LEN) {
 1575                 device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", TPEEPROM_NAME, len, TP_SRAM_LEN);
 1576                 return;
 1577         }
 1578         
 1579         ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
 1580             TP_SRAM_OFFSET);
 1581         
 1582         if (!ret) {
 1583                 device_printf(adap->dev,
 1584                         "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
 1585                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 1586         } else 
 1587                 device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
 1588 
 1589 release_tpeeprom:
 1590         firmware_put(tpeeprom, FIRMWARE_UNLOAD);
 1591         
 1592         return;
 1593 }
 1594 
 1595 static int
 1596 update_tpsram(struct adapter *adap)
 1597 {
 1598 #ifdef FIRMWARE_LATEST
 1599         const struct firmware *tpsram;
 1600 #else
 1601         struct firmware *tpsram;
 1602 #endif  
 1603         int ret;
 1604         char rev;
 1605 
 1606         rev = t3rev2char(adap);
 1607         if (!rev)
 1608                 return 0;
 1609 
 1610         update_tpeeprom(adap);
 1611 
 1612         tpsram = firmware_get(TPSRAM_NAME);
 1613         if (tpsram == NULL){
 1614                 device_printf(adap->dev, "could not load TP SRAM\n");
 1615                 return (EINVAL);
 1616         } else
 1617                 device_printf(adap->dev, "updating TP SRAM\n");
 1618         
 1619         ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
 1620         if (ret)
 1621                 goto release_tpsram;    
 1622 
 1623         ret = t3_set_proto_sram(adap, tpsram->data);
 1624         if (ret)
 1625                 device_printf(adap->dev, "loading protocol SRAM failed\n");
 1626 
 1627 release_tpsram:
 1628         firmware_put(tpsram, FIRMWARE_UNLOAD);
 1629         
 1630         return ret;
 1631 }
 1632 
 1633 /**
 1634  *      cxgb_up - enable the adapter
 1635  *      @adap: adapter being enabled
 1636  *
 1637  *      Called when the first port is enabled, this function performs the
 1638  *      actions necessary to make an adapter operational, such as completing
 1639  *      the initialization of HW modules, and enabling interrupts.
 1640  *
 1641  */
 1642 static int
 1643 cxgb_up(struct adapter *sc)
 1644 {
 1645         int err = 0;
 1646 
 1647         if ((sc->flags & FULL_INIT_DONE) == 0) {
 1648 
 1649                 if ((sc->flags & FW_UPTODATE) == 0)
 1650                         if ((err = upgrade_fw(sc)))
 1651                                 goto out;
 1652                 if ((sc->flags & TPS_UPTODATE) == 0)
 1653                         if ((err = update_tpsram(sc)))
 1654                                 goto out;
 1655                 err = t3_init_hw(sc, 0);
 1656                 if (err)
 1657                         goto out;
 1658 
 1659                 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
 1660                 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 1661 
 1662                 err = setup_sge_qsets(sc);
 1663                 if (err)
 1664                         goto out;
 1665 
 1666                 setup_rss(sc);
 1667                 t3_add_configured_sysctls(sc);
 1668                 sc->flags |= FULL_INIT_DONE;
 1669         }
 1670 
 1671         t3_intr_clear(sc);
 1672 
 1673         /* If it's MSI or INTx, allocate a single interrupt for everything */
 1674         if ((sc->flags & USING_MSIX) == 0) {
 1675                 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
 1676                    &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
 1677                         device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
 1678                             sc->irq_rid);
 1679                         err = EINVAL;
 1680                         goto out;
 1681                 }
 1682                 device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
 1683 
 1684                 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
 1685 #ifdef INTR_FILTERS
 1686                         NULL,
 1687 #endif                  
 1688                         sc->cxgb_intr, sc, &sc->intr_tag)) {
 1689                         device_printf(sc->dev, "Cannot set up interrupt\n");
 1690                         err = EINVAL;
 1691                         goto irq_err;
 1692                 }
 1693         } else {
 1694                 cxgb_setup_msix(sc, sc->msi_count);
 1695         }
 1696 
 1697         t3_sge_start(sc);
 1698         t3_intr_enable(sc);
 1699 
 1700         if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
 1701             is_offload(sc) && init_tp_parity(sc) == 0)
 1702                 sc->flags |= TP_PARITY_INIT;
 1703 
 1704         if (sc->flags & TP_PARITY_INIT) {
 1705                 t3_write_reg(sc, A_TP_INT_CAUSE,
 1706                                 F_CMCACHEPERR | F_ARPLUTPERR);
 1707                 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
 1708         }
 1709 
 1710         
 1711         if (!(sc->flags & QUEUES_BOUND)) {
 1712                 bind_qsets(sc);
 1713                 sc->flags |= QUEUES_BOUND;              
 1714         }
 1715 out:
 1716         return (err);
 1717 irq_err:
 1718         CH_ERR(sc, "request_irq failed, err %d\n", err);
 1719         goto out;
 1720 }
 1721 
 1722 
 1723 /*
 1724  * Release resources when all the ports and offloading have been stopped.
 1725  */
 1726 static void
 1727 cxgb_down_locked(struct adapter *sc)
 1728 {
 1729         
 1730         t3_sge_stop(sc);
 1731         t3_intr_disable(sc);
 1732         
 1733         if (sc->intr_tag != NULL) {
 1734                 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
 1735                 sc->intr_tag = NULL;
 1736         }
 1737         if (sc->irq_res != NULL) {
 1738                 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
 1739                     sc->irq_rid, sc->irq_res);
 1740                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
 1741                     sc->irq_res);
 1742                 sc->irq_res = NULL;
 1743         }
 1744         
 1745         if (sc->flags & USING_MSIX) 
 1746                 cxgb_teardown_msix(sc);
 1747         
 1748         callout_stop(&sc->cxgb_tick_ch);
 1749         callout_stop(&sc->sge_timer_ch);
 1750         callout_drain(&sc->cxgb_tick_ch);
 1751         callout_drain(&sc->sge_timer_ch);
 1752         
 1753         if (sc->tq != NULL) {
 1754                 printf("draining slow intr\n");
 1755                 
 1756                 taskqueue_drain(sc->tq, &sc->slow_intr_task);
 1757                         printf("draining ext intr\n");  
 1758                 taskqueue_drain(sc->tq, &sc->ext_intr_task);
 1759                 printf("draining tick task\n");
 1760                 taskqueue_drain(sc->tq, &sc->tick_task);
 1761         }
 1762         ADAPTER_UNLOCK(sc);
 1763 }
 1764 
 1765 static int
 1766 offload_open(struct port_info *pi)
 1767 {
 1768         struct adapter *adapter = pi->adapter;
 1769         struct t3cdev *tdev = &adapter->tdev;
 1770 
 1771         int adap_up = adapter->open_device_map & PORT_MASK;
 1772         int err = 0;
 1773 
 1774         CTR1(KTR_CXGB, "device_map=0x%x", adapter->open_device_map); 
 1775         if (atomic_cmpset_int(&adapter->open_device_map,
 1776                 (adapter->open_device_map & ~(1<<OFFLOAD_DEVMAP_BIT)),
 1777                 (adapter->open_device_map | (1<<OFFLOAD_DEVMAP_BIT))) == 0)
 1778                 return (0);
 1779 
 1780        
 1781         if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT)) 
 1782                 printf("offload_open: DEVMAP_BIT did not get set 0x%x\n", adapter->open_device_map);
 1783         ADAPTER_LOCK(pi->adapter); 
 1784         if (!adap_up)
 1785                 err = cxgb_up(adapter);
 1786         ADAPTER_UNLOCK(pi->adapter);
 1787         if (err)
 1788                 return (err);
 1789 
 1790         t3_tp_set_offload_mode(adapter, 1);
 1791         tdev->lldev = pi->ifp;
 1792 
 1793         init_port_mtus(adapter);
 1794         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
 1795                      adapter->params.b_wnd,
 1796                      adapter->params.rev == 0 ?
 1797                        adapter->port[0].ifp->if_mtu : 0xffff);
 1798         init_smt(adapter);
 1799         /* Call back all registered clients */
 1800         cxgb_add_clients(tdev);
 1801 
 1802         /* restore them in case the offload module has changed them */
 1803         if (err) {
 1804                 t3_tp_set_offload_mode(adapter, 0);
 1805                 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
 1806                 cxgb_set_dummy_ops(tdev);
 1807         }
 1808         return (err);
 1809 }
 1810 
 1811 static int
 1812 offload_close(struct t3cdev *tdev)
 1813 {
 1814         struct adapter *adapter = tdev2adap(tdev);
 1815 
 1816         if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
 1817                 return (0);
 1818 
 1819         /* Call back all registered clients */
 1820         cxgb_remove_clients(tdev);
 1821 
 1822         tdev->lldev = NULL;
 1823         cxgb_set_dummy_ops(tdev);
 1824         t3_tp_set_offload_mode(adapter, 0);
 1825         clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
 1826 
 1827         ADAPTER_LOCK(adapter);
 1828         if (!adapter->open_device_map)
 1829                 cxgb_down_locked(adapter);
 1830         else
 1831                 ADAPTER_UNLOCK(adapter);
 1832         return (0);
 1833 }
 1834 
 1835 
 1836 static void
 1837 cxgb_init(void *arg)
 1838 {
 1839         struct port_info *p = arg;
 1840 
 1841         PORT_LOCK(p);
 1842         cxgb_init_locked(p);
 1843         PORT_UNLOCK(p);
 1844 }
 1845 
 1846 static void
 1847 cxgb_init_locked(struct port_info *p)
 1848 {
 1849         struct ifnet *ifp;
 1850         adapter_t *sc = p->adapter;
 1851         int err;
 1852 
 1853         PORT_LOCK_ASSERT_OWNED(p);
 1854         ifp = p->ifp;
 1855 
 1856         ADAPTER_LOCK(p->adapter);
 1857         if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
 1858                 ADAPTER_UNLOCK(p->adapter);
 1859                 cxgb_stop_locked(p);
 1860                 return;
 1861         }
 1862         if (p->adapter->open_device_map == 0) {
 1863                 t3_intr_clear(sc);
 1864         }
 1865         setbit(&p->adapter->open_device_map, p->port_id);
 1866         ADAPTER_UNLOCK(p->adapter);
 1867 
 1868         if (is_offload(sc) && !ofld_disable) {
 1869                 err = offload_open(p);
 1870                 if (err)
 1871                         log(LOG_WARNING,
 1872                             "Could not initialize offload capabilities\n");
 1873         }
 1874 #if !defined(LINK_ATTACH)
 1875         cxgb_link_start(p);
 1876         t3_link_changed(sc, p->port_id);
 1877 #endif
 1878         ifp->if_baudrate = p->link_config.speed * 1000000;
 1879 
 1880         device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
 1881         t3_port_intr_enable(sc, p->port_id);
 1882 
 1883         t3_sge_reset_adapter(sc);
 1884 
 1885         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1886         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1887 }
 1888 
 1889 static void
 1890 cxgb_set_rxmode(struct port_info *p)
 1891 {
 1892         struct t3_rx_mode rm;
 1893         struct cmac *mac = &p->mac;
 1894 
 1895         t3_init_rx_mode(&rm, p);
 1896         mtx_lock(&p->adapter->mdio_lock);
 1897         t3_mac_set_rx_mode(mac, &rm);
 1898         mtx_unlock(&p->adapter->mdio_lock);
 1899 }
 1900 
 1901 static void
 1902 cxgb_stop_locked(struct port_info *pi)
 1903 {
 1904         struct ifnet *ifp;
 1905 
 1906         PORT_LOCK_ASSERT_OWNED(pi);
 1907         ADAPTER_LOCK_ASSERT_NOTOWNED(pi->adapter);
 1908         
 1909         ifp = pi->ifp;
 1910         t3_port_intr_disable(pi->adapter, pi->port_id);
 1911         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1912 
 1913         /* disable pause frames */
 1914         t3_set_reg_field(pi->adapter, A_XGM_TX_CFG + pi->mac.offset,
 1915                          F_TXPAUSEEN, 0);
 1916 
 1917         /* Reset RX FIFO HWM */
 1918         t3_set_reg_field(pi->adapter, A_XGM_RXFIFO_CFG +  pi->mac.offset,
 1919                          V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
 1920 
 1921 
 1922         ADAPTER_LOCK(pi->adapter);
 1923         clrbit(&pi->adapter->open_device_map, pi->port_id);
 1924 
 1925         if (pi->adapter->open_device_map == 0) {
 1926                 cxgb_down_locked(pi->adapter);
 1927         } else 
 1928                 ADAPTER_UNLOCK(pi->adapter);
 1929 
 1930 #if !defined(LINK_ATTACH)
 1931         DELAY(100);
 1932 
 1933         /* Wait for TXFIFO empty */
 1934         t3_wait_op_done(pi->adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 1935                         F_TXFIFO_EMPTY, 1, 20, 5);
 1936 
 1937         DELAY(100);
 1938         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 1939 
 1940         pi->phy.ops->power_down(&pi->phy, 1);
 1941 #endif          
 1942 
 1943 }
 1944 
 1945 static int
 1946 cxgb_set_mtu(struct port_info *p, int mtu)
 1947 {
 1948         struct ifnet *ifp = p->ifp;
 1949         int error = 0;
 1950         
 1951         if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
 1952                 error = EINVAL;
 1953         else if (ifp->if_mtu != mtu) {
 1954                 PORT_LOCK(p);
 1955                 ifp->if_mtu = mtu;
 1956                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1957                         cxgb_stop_locked(p);
 1958                         cxgb_init_locked(p);
 1959                 }
 1960                 PORT_UNLOCK(p);
 1961         }
 1962         return (error);
 1963 }
 1964 
 1965 /*
 1966  * Mark lro enabled or disabled in all qsets for this port
 1967  */
 1968 static int
 1969 cxgb_set_lro(struct port_info *p, int enabled)
 1970 {
 1971         int i;
 1972         struct adapter *adp = p->adapter;
 1973         struct sge_qset *q;
 1974 
 1975         PORT_LOCK_ASSERT_OWNED(p);
 1976         for (i = 0; i < p->nqsets; i++) {
 1977                 q = &adp->sge.qs[p->first_qset + i];
 1978                 q->lro.enabled = (enabled != 0);
 1979         }
 1980         return (0);
 1981 }
 1982 
 1983 static int
 1984 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
 1985 {
 1986         struct port_info *p = ifp->if_softc;
 1987         struct ifaddr *ifa = (struct ifaddr *)data;
 1988         struct ifreq *ifr = (struct ifreq *)data;
 1989         int flags, error = 0, reinit = 0;
 1990         uint32_t mask;
 1991 
 1992         /* 
 1993          * XXX need to check that we aren't in the middle of an unload
 1994          */
 1995         switch (command) {
 1996         case SIOCSIFMTU:
 1997                 error = cxgb_set_mtu(p, ifr->ifr_mtu);
 1998                 break;
 1999         case SIOCSIFADDR:
 2000                 if (ifa->ifa_addr->sa_family == AF_INET) {
 2001                         ifp->if_flags |= IFF_UP;
 2002                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 2003                                 PORT_LOCK(p);
 2004                                 cxgb_init_locked(p);
 2005                                 PORT_UNLOCK(p);
 2006                         }
 2007                         arp_ifinit(ifp, ifa);
 2008                 } else
 2009                         error = ether_ioctl(ifp, command, data);
 2010                 break;
 2011         case SIOCSIFFLAGS:
 2012                 PORT_LOCK(p);
 2013                 if (ifp->if_flags & IFF_UP) {
 2014                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2015                                 flags = p->if_flags;
 2016                                 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
 2017                                     ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
 2018                                         cxgb_set_rxmode(p);
 2019                         } else
 2020                                 cxgb_init_locked(p);
 2021                         p->if_flags = ifp->if_flags;
 2022                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2023                         cxgb_stop_locked(p);
 2024                                 
 2025                 PORT_UNLOCK(p);
 2026                 break;
 2027         case SIOCADDMULTI:
 2028         case SIOCDELMULTI:
 2029                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2030                         cxgb_set_rxmode(p);
 2031                 }
 2032                 break;
 2033         case SIOCSIFMEDIA:
 2034         case SIOCGIFMEDIA:
 2035                 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
 2036                 break;
 2037         case SIOCSIFCAP:
 2038                 PORT_LOCK(p);
 2039                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2040                 if (mask & IFCAP_TXCSUM) {
 2041                         if (IFCAP_TXCSUM & ifp->if_capenable) {
 2042                                 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
 2043                                 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
 2044                                     | CSUM_IP | CSUM_TSO);
 2045                         } else {
 2046                                 ifp->if_capenable |= IFCAP_TXCSUM;
 2047                                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
 2048                                     | CSUM_IP);
 2049                         }
 2050                 }
 2051                 if (mask & IFCAP_RXCSUM) {
 2052                         ifp->if_capenable ^= IFCAP_RXCSUM;
 2053                 }
 2054                 if (mask & IFCAP_TSO4) {
 2055                         if (IFCAP_TSO4 & ifp->if_capenable) {
 2056                                 ifp->if_capenable &= ~IFCAP_TSO4;
 2057                                 ifp->if_hwassist &= ~CSUM_TSO;
 2058                         } else if (IFCAP_TXCSUM & ifp->if_capenable) {
 2059                                 ifp->if_capenable |= IFCAP_TSO4;
 2060                                 ifp->if_hwassist |= CSUM_TSO;
 2061                         } else {
 2062                                 if (cxgb_debug)
 2063                                         printf("cxgb requires tx checksum offload"
 2064                                             " be enabled to use TSO\n");
 2065                                 error = EINVAL;
 2066                         }
 2067                 }
 2068                 if (mask & IFCAP_LRO) {
 2069                         ifp->if_capenable ^= IFCAP_LRO;
 2070 
 2071                         /* Safe to do this even if cxgb_up not called yet */
 2072                         cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
 2073                 }
 2074                 if (mask & IFCAP_VLAN_HWTAGGING) {
 2075                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2076                         reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
 2077                 }
 2078                 if (mask & IFCAP_VLAN_MTU) {
 2079                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
 2080                         reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
 2081                 }
 2082                 if (mask & IFCAP_VLAN_HWCSUM) {
 2083                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 2084                 }
 2085                 if (reinit) {
 2086                         cxgb_stop_locked(p);
 2087                         cxgb_init_locked(p);
 2088                 }
 2089                 PORT_UNLOCK(p);
 2090 
 2091 #ifdef VLAN_CAPABILITIES
 2092                 VLAN_CAPABILITIES(ifp);
 2093 #endif
 2094                 break;
 2095         default:
 2096                 error = ether_ioctl(ifp, command, data);
 2097                 break;
 2098         }
 2099         return (error);
 2100 }
 2101 
 2102 static int
 2103 cxgb_media_change(struct ifnet *ifp)
 2104 {
 2105         if_printf(ifp, "media change not supported\n");
 2106         return (ENXIO);
 2107 }
 2108 
 2109 static void
 2110 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 2111 {
 2112         struct port_info *p = ifp->if_softc;
 2113 
 2114         ifmr->ifm_status = IFM_AVALID;
 2115         ifmr->ifm_active = IFM_ETHER;
 2116 
 2117         if (!p->link_config.link_ok)
 2118                 return;
 2119 
 2120         ifmr->ifm_status |= IFM_ACTIVE;
 2121 
 2122         switch (p->link_config.speed) {
 2123         case 10:
 2124                 ifmr->ifm_active |= IFM_10_T;
 2125                 break;
 2126         case 100:
 2127                 ifmr->ifm_active |= IFM_100_TX;
 2128                         break;
 2129         case 1000:
 2130                 ifmr->ifm_active |= IFM_1000_T;
 2131                 break;
 2132         }
 2133         
 2134         if (p->link_config.duplex)
 2135                 ifmr->ifm_active |= IFM_FDX;
 2136         else
 2137                 ifmr->ifm_active |= IFM_HDX;
 2138 }
 2139 
 2140 static void
 2141 cxgb_async_intr(void *data)
 2142 {
 2143         adapter_t *sc = data;
 2144 
 2145         if (cxgb_debug)
 2146                 device_printf(sc->dev, "cxgb_async_intr\n");
 2147         /*
 2148          * May need to sleep - defer to taskqueue
 2149          */
 2150         taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
 2151 }
 2152 
 2153 static void
 2154 cxgb_ext_intr_handler(void *arg, int count)
 2155 {
 2156         adapter_t *sc = (adapter_t *)arg;
 2157 
 2158         if (cxgb_debug)
 2159                 printf("cxgb_ext_intr_handler\n");
 2160 
 2161         t3_phy_intr_handler(sc);
 2162 
 2163         /* Now reenable external interrupts */
 2164         ADAPTER_LOCK(sc);
 2165         if (sc->slow_intr_mask) {
 2166                 sc->slow_intr_mask |= F_T3DBG;
 2167                 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
 2168                 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
 2169         }
 2170         ADAPTER_UNLOCK(sc);
 2171 }
 2172 
 2173 static void
 2174 check_link_status(adapter_t *sc)
 2175 {
 2176         int i;
 2177 
 2178         for (i = 0; i < (sc)->params.nports; ++i) {
 2179                 struct port_info *p = &sc->port[i];
 2180 
 2181                 if (!(p->phy.caps & SUPPORTED_IRQ)) 
 2182                         t3_link_changed(sc, i);
 2183                 p->ifp->if_baudrate = p->link_config.speed * 1000000;
 2184         }
 2185 }
 2186 
 2187 static void
 2188 check_t3b2_mac(struct adapter *adapter)
 2189 {
 2190         int i;
 2191 
 2192         if(adapter->flags & CXGB_SHUTDOWN)
 2193                 return;
 2194         
 2195         for_each_port(adapter, i) {
 2196                 struct port_info *p = &adapter->port[i];
 2197                 struct ifnet *ifp = p->ifp;
 2198                 int status;
 2199                 
 2200                 if(adapter->flags & CXGB_SHUTDOWN)
 2201                         return;
 2202                 
 2203                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 
 2204                         continue;
 2205                 
 2206                 status = 0;
 2207                 PORT_LOCK(p);
 2208                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) 
 2209                         status = t3b2_mac_watchdog_task(&p->mac);
 2210                 if (status == 1)
 2211                         p->mac.stats.num_toggled++;
 2212                 else if (status == 2) {
 2213                         struct cmac *mac = &p->mac;
 2214                         int mtu = ifp->if_mtu;
 2215 
 2216                         if (ifp->if_capenable & IFCAP_VLAN_MTU)
 2217                                 mtu += ETHER_VLAN_ENCAP_LEN;
 2218                         t3_mac_set_mtu(mac, mtu);
 2219                         t3_mac_set_address(mac, 0, p->hw_addr);
 2220                         cxgb_set_rxmode(p);
 2221                         t3_link_start(&p->phy, mac, &p->link_config);
 2222                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 2223                         t3_port_intr_enable(adapter, p->port_id);
 2224                         p->mac.stats.num_resets++;
 2225                 }
 2226                 PORT_UNLOCK(p);
 2227         }
 2228 }
 2229 
 2230 static void
 2231 cxgb_tick(void *arg)
 2232 {
 2233         adapter_t *sc = (adapter_t *)arg;
 2234 
 2235         if(sc->flags & CXGB_SHUTDOWN)
 2236                 return;
 2237 
 2238         taskqueue_enqueue(sc->tq, &sc->tick_task);
 2239         callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
 2240 }
 2241 
 2242 static void
 2243 cxgb_tick_handler(void *arg, int count)
 2244 {
 2245         adapter_t *sc = (adapter_t *)arg;
 2246         const struct adapter_params *p = &sc->params;
 2247         int i;
 2248 
 2249         if(sc->flags & CXGB_SHUTDOWN)
 2250                 return;
 2251 
 2252         ADAPTER_LOCK(sc);
 2253         if (p->linkpoll_period)
 2254                 check_link_status(sc);
 2255 
 2256         sc->check_task_cnt++;
 2257 
 2258         /*
 2259          * adapter lock can currently only be acquired after the
 2260          * port lock
 2261          */
 2262         ADAPTER_UNLOCK(sc);
 2263 
 2264         if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) 
 2265                 check_t3b2_mac(sc);
 2266 
 2267         /* Update MAC stats if it's time to do so */
 2268         if (!p->linkpoll_period ||
 2269             (sc->check_task_cnt * p->linkpoll_period) / 10 >=
 2270             p->stats_update_period) {
 2271                 for_each_port(sc, i) {
 2272                         struct port_info *port = &sc->port[i];
 2273                         PORT_LOCK(port);
 2274                         t3_mac_update_stats(&port->mac);
 2275                         PORT_UNLOCK(port);
 2276                 }
 2277                 sc->check_task_cnt = 0;
 2278         }
 2279 }
 2280 
 2281 static void
 2282 touch_bars(device_t dev)
 2283 {
 2284         /*
 2285          * Don't enable yet
 2286          */
 2287 #if !defined(__LP64__) && 0
 2288         u32 v;
 2289 
 2290         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
 2291         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
 2292         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
 2293         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
 2294         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
 2295         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
 2296 #endif
 2297 }
 2298 
 2299 static int
 2300 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
 2301 {
 2302         uint8_t *buf;
 2303         int err = 0;
 2304         u32 aligned_offset, aligned_len, *p;
 2305         struct adapter *adapter = pi->adapter;
 2306 
 2307 
 2308         aligned_offset = offset & ~3;
 2309         aligned_len = (len + (offset & 3) + 3) & ~3;
 2310 
 2311         if (aligned_offset != offset || aligned_len != len) {
 2312                 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);              
 2313                 if (!buf)
 2314                         return (ENOMEM);
 2315                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
 2316                 if (!err && aligned_len > 4)
 2317                         err = t3_seeprom_read(adapter,
 2318                                               aligned_offset + aligned_len - 4,
 2319                                               (u32 *)&buf[aligned_len - 4]);
 2320                 if (err)
 2321                         goto out;
 2322                 memcpy(buf + (offset & 3), data, len);
 2323         } else
 2324                 buf = (uint8_t *)(uintptr_t)data;
 2325 
 2326         err = t3_seeprom_wp(adapter, 0);
 2327         if (err)
 2328                 goto out;
 2329 
 2330         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
 2331                 err = t3_seeprom_write(adapter, aligned_offset, *p);
 2332                 aligned_offset += 4;
 2333         }
 2334 
 2335         if (!err)
 2336                 err = t3_seeprom_wp(adapter, 1);
 2337 out:
 2338         if (buf != data)
 2339                 free(buf, M_DEVBUF);
 2340         return err;
 2341 }
 2342 
 2343 
 2344 static int
 2345 in_range(int val, int lo, int hi)
 2346 {
 2347         return val < 0 || (val <= hi && val >= lo);
 2348 }
 2349 
 2350 static int
 2351 cxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
 2352 {
 2353        return (0);
 2354 }
 2355 
 2356 static int
 2357 cxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
 2358 {
 2359        return (0);
 2360 }
 2361 
 2362 static int
 2363 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
 2364     int fflag, struct thread *td)
 2365 {
 2366         int mmd, error = 0;
 2367         struct port_info *pi = dev->si_drv1;
 2368         adapter_t *sc = pi->adapter;
 2369 
 2370 #ifdef PRIV_SUPPORTED   
 2371         if (priv_check(td, PRIV_DRIVER)) {
 2372                 if (cxgb_debug) 
 2373                         printf("user does not have access to privileged ioctls\n");
 2374                 return (EPERM);
 2375         }
 2376 #else
 2377         if (suser(td)) {
 2378                 if (cxgb_debug)
 2379                         printf("user does not have access to privileged ioctls\n");
 2380                 return (EPERM);
 2381         }
 2382 #endif
 2383         
 2384         switch (cmd) {
 2385         case CHELSIO_GET_MIIREG: {
 2386                 uint32_t val;
 2387                 struct cphy *phy = &pi->phy;
 2388                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2389                 
 2390                 if (!phy->mdio_read)
 2391                         return (EOPNOTSUPP);
 2392                 if (is_10G(sc)) {
 2393                         mmd = mid->phy_id >> 8;
 2394                         if (!mmd)
 2395                                 mmd = MDIO_DEV_PCS;
 2396                         else if (mmd > MDIO_DEV_XGXS)
 2397                                 return (EINVAL);
 2398 
 2399                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
 2400                                              mid->reg_num, &val);
 2401                 } else
 2402                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
 2403                                              mid->reg_num & 0x1f, &val);
 2404                 if (error == 0)
 2405                         mid->val_out = val;
 2406                 break;
 2407         }
 2408         case CHELSIO_SET_MIIREG: {
 2409                 struct cphy *phy = &pi->phy;
 2410                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2411 
 2412                 if (!phy->mdio_write)
 2413                         return (EOPNOTSUPP);
 2414                 if (is_10G(sc)) {
 2415                         mmd = mid->phy_id >> 8;
 2416                         if (!mmd)
 2417                                 mmd = MDIO_DEV_PCS;
 2418                         else if (mmd > MDIO_DEV_XGXS)
 2419                                 return (EINVAL);
 2420                         
 2421                         error = phy->mdio_write(sc, mid->phy_id & 0x1f,
 2422                                               mmd, mid->reg_num, mid->val_in);
 2423                 } else
 2424                         error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
 2425                                               mid->reg_num & 0x1f,
 2426                                               mid->val_in);
 2427                 break;
 2428         }
 2429         case CHELSIO_SETREG: {
 2430                 struct ch_reg *edata = (struct ch_reg *)data;
 2431                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2432                         return (EFAULT);
 2433                 t3_write_reg(sc, edata->addr, edata->val);
 2434                 break;
 2435         }
 2436         case CHELSIO_GETREG: {
 2437                 struct ch_reg *edata = (struct ch_reg *)data;
 2438                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2439                         return (EFAULT);
 2440                 edata->val = t3_read_reg(sc, edata->addr);
 2441                 break;
 2442         }
 2443         case CHELSIO_GET_SGE_CONTEXT: {
 2444                 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
 2445                 mtx_lock_spin(&sc->sge.reg_lock);
 2446                 switch (ecntxt->cntxt_type) {
 2447                 case CNTXT_TYPE_EGRESS:
 2448                         error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
 2449                             ecntxt->data);
 2450                         break;
 2451                 case CNTXT_TYPE_FL:
 2452                         error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
 2453                             ecntxt->data);
 2454                         break;
 2455                 case CNTXT_TYPE_RSP:
 2456                         error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
 2457                             ecntxt->data);
 2458                         break;
 2459                 case CNTXT_TYPE_CQ:
 2460                         error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
 2461                             ecntxt->data);
 2462                         break;
 2463                 default:
 2464                         error = EINVAL;
 2465                         break;
 2466                 }
 2467                 mtx_unlock_spin(&sc->sge.reg_lock);
 2468                 break;
 2469         }
 2470         case CHELSIO_GET_SGE_DESC: {
 2471                 struct ch_desc *edesc = (struct ch_desc *)data;
 2472                 int ret;
 2473                 if (edesc->queue_num >= SGE_QSETS * 6)
 2474                         return (EINVAL);
 2475                 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
 2476                     edesc->queue_num % 6, edesc->idx, edesc->data);
 2477                 if (ret < 0)
 2478                         return (EINVAL);
 2479                 edesc->size = ret;
 2480                 break;
 2481         }
 2482         case CHELSIO_GET_QSET_PARAMS: {
 2483                 struct qset_params *q;
 2484                 struct ch_qset_params *t = (struct ch_qset_params *)data;
 2485                 int q1 = pi->first_qset;
 2486                 int nqsets = pi->nqsets;
 2487                 int i;
 2488 
 2489                 if (t->qset_idx >= nqsets)
 2490                         return EINVAL;
 2491 
 2492                 i = q1 + t->qset_idx;
 2493                 q = &sc->params.sge.qset[i];
 2494                 t->rspq_size   = q->rspq_size;
 2495                 t->txq_size[0] = q->txq_size[0];
 2496                 t->txq_size[1] = q->txq_size[1];
 2497                 t->txq_size[2] = q->txq_size[2];
 2498                 t->fl_size[0]  = q->fl_size;
 2499                 t->fl_size[1]  = q->jumbo_size;
 2500                 t->polling     = q->polling;
 2501                 t->lro         = q->lro;
 2502                 t->intr_lat    = q->coalesce_usecs;
 2503                 t->cong_thres  = q->cong_thres;
 2504                 t->qnum        = i;
 2505 
 2506                 if (sc->flags & USING_MSIX)
 2507                         t->vector = rman_get_start(sc->msix_irq_res[i]);
 2508                 else
 2509                         t->vector = rman_get_start(sc->irq_res);
 2510 
 2511                 break;
 2512         }
 2513         case CHELSIO_GET_QSET_NUM: {
 2514                 struct ch_reg *edata = (struct ch_reg *)data;
 2515                 edata->val = pi->nqsets;
 2516                 break;
 2517         }
 2518         case CHELSIO_LOAD_FW: {
 2519                 uint8_t *fw_data;
 2520                 uint32_t vers;
 2521                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2522 
 2523                 /*
 2524                  * You're allowed to load a firmware only before FULL_INIT_DONE
 2525                  *
 2526                  * FW_UPTODATE is also set so the rest of the initialization
 2527                  * will not overwrite what was loaded here.  This gives you the
 2528                  * flexibility to load any firmware (and maybe shoot yourself in
 2529                  * the foot).
 2530                  */
 2531 
 2532                 ADAPTER_LOCK(sc);
 2533                 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
 2534                         ADAPTER_UNLOCK(sc);
 2535                         return (EBUSY);
 2536                 }
 2537 
 2538                 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2539                 if (!fw_data)
 2540                         error = ENOMEM;
 2541                 else
 2542                         error = copyin(t->buf, fw_data, t->len);
 2543 
 2544                 if (!error)
 2545                         error = -t3_load_fw(sc, fw_data, t->len);
 2546 
 2547                 if (t3_get_fw_version(sc, &vers) == 0) {
 2548                         snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
 2549                             "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
 2550                             G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
 2551                 }
 2552 
 2553                 if (!error)
 2554                         sc->flags |= FW_UPTODATE;
 2555 
 2556                 free(fw_data, M_DEVBUF);
 2557                 ADAPTER_UNLOCK(sc);
 2558                 break;
 2559         }
 2560         case CHELSIO_LOAD_BOOT: {
 2561                 uint8_t *boot_data;
 2562                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2563 
 2564                 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2565                 if (!boot_data)
 2566                         return ENOMEM;
 2567 
 2568                 error = copyin(t->buf, boot_data, t->len);
 2569                 if (!error)
 2570                         error = -t3_load_boot(sc, boot_data, t->len);
 2571 
 2572                 free(boot_data, M_DEVBUF);
 2573                 break;
 2574         }
 2575         case CHELSIO_GET_PM: {
 2576                 struct ch_pm *m = (struct ch_pm *)data;
 2577                 struct tp_params *p = &sc->params.tp;
 2578 
 2579                 if (!is_offload(sc))
 2580                         return (EOPNOTSUPP);
 2581 
 2582                 m->tx_pg_sz = p->tx_pg_size;
 2583                 m->tx_num_pg = p->tx_num_pgs;
 2584                 m->rx_pg_sz  = p->rx_pg_size;
 2585                 m->rx_num_pg = p->rx_num_pgs;
 2586                 m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
 2587 
 2588                 break;
 2589         }
 2590         case CHELSIO_SET_PM: {
 2591                 struct ch_pm *m = (struct ch_pm *)data;
 2592                 struct tp_params *p = &sc->params.tp;
 2593 
 2594                 if (!is_offload(sc))
 2595                         return (EOPNOTSUPP);
 2596                 if (sc->flags & FULL_INIT_DONE)
 2597                         return (EBUSY);
 2598 
 2599                 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
 2600                     !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
 2601                         return (EINVAL);        /* not power of 2 */
 2602                 if (!(m->rx_pg_sz & 0x14000))
 2603                         return (EINVAL);        /* not 16KB or 64KB */
 2604                 if (!(m->tx_pg_sz & 0x1554000))
 2605                         return (EINVAL);
 2606                 if (m->tx_num_pg == -1)
 2607                         m->tx_num_pg = p->tx_num_pgs;
 2608                 if (m->rx_num_pg == -1)
 2609                         m->rx_num_pg = p->rx_num_pgs;
 2610                 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
 2611                         return (EINVAL);
 2612                 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
 2613                     m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
 2614                         return (EINVAL);
 2615 
 2616                 p->rx_pg_size = m->rx_pg_sz;
 2617                 p->tx_pg_size = m->tx_pg_sz;
 2618                 p->rx_num_pgs = m->rx_num_pg;
 2619                 p->tx_num_pgs = m->tx_num_pg;
 2620                 break;
 2621         }
 2622         case CHELSIO_SETMTUTAB: {
 2623                 struct ch_mtus *m = (struct ch_mtus *)data;
 2624                 int i;
 2625                 
 2626                 if (!is_offload(sc))
 2627                         return (EOPNOTSUPP);
 2628                 if (offload_running(sc))
 2629                         return (EBUSY);
 2630                 if (m->nmtus != NMTUS)
 2631                         return (EINVAL);
 2632                 if (m->mtus[0] < 81)         /* accommodate SACK */
 2633                         return (EINVAL);
 2634                 
 2635                 /*
 2636                  * MTUs must be in ascending order
 2637                  */
 2638                 for (i = 1; i < NMTUS; ++i)
 2639                         if (m->mtus[i] < m->mtus[i - 1])
 2640                                 return (EINVAL);
 2641 
 2642                 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
 2643                 break;
 2644         }
 2645         case CHELSIO_GETMTUTAB: {
 2646                 struct ch_mtus *m = (struct ch_mtus *)data;
 2647 
 2648                 if (!is_offload(sc))
 2649                         return (EOPNOTSUPP);
 2650 
 2651                 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
 2652                 m->nmtus = NMTUS;
 2653                 break;
 2654         }
 2655         case CHELSIO_GET_MEM: {
 2656                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2657                 struct mc7 *mem;
 2658                 uint8_t *useraddr;
 2659                 u64 buf[32];
 2660 
 2661                 /*
 2662                  * Use these to avoid modifying len/addr in the the return
 2663                  * struct
 2664                  */
 2665                 uint32_t len = t->len, addr = t->addr;
 2666 
 2667                 if (!is_offload(sc))
 2668                         return (EOPNOTSUPP);
 2669                 if (!(sc->flags & FULL_INIT_DONE))
 2670                         return (EIO);         /* need the memory controllers */
 2671                 if ((addr & 0x7) || (len & 0x7))
 2672                         return (EINVAL);
 2673                 if (t->mem_id == MEM_CM)
 2674                         mem = &sc->cm;
 2675                 else if (t->mem_id == MEM_PMRX)
 2676                         mem = &sc->pmrx;
 2677                 else if (t->mem_id == MEM_PMTX)
 2678                         mem = &sc->pmtx;
 2679                 else
 2680                         return (EINVAL);
 2681 
 2682                 /*
 2683                  * Version scheme:
 2684                  * bits 0..9: chip version
 2685                  * bits 10..15: chip revision
 2686                  */
 2687                 t->version = 3 | (sc->params.rev << 10);
 2688                 
 2689                 /*
 2690                  * Read 256 bytes at a time as len can be large and we don't
 2691                  * want to use huge intermediate buffers.
 2692                  */
 2693                 useraddr = (uint8_t *)t->buf; 
 2694                 while (len) {
 2695                         unsigned int chunk = min(len, sizeof(buf));
 2696 
 2697                         error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
 2698                         if (error)
 2699                                 return (-error);
 2700                         if (copyout(buf, useraddr, chunk))
 2701                                 return (EFAULT);
 2702                         useraddr += chunk;
 2703                         addr += chunk;
 2704                         len -= chunk;
 2705                 }
 2706                 break;
 2707         }
 2708         case CHELSIO_READ_TCAM_WORD: {
 2709                 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
 2710 
 2711                 if (!is_offload(sc))
 2712                         return (EOPNOTSUPP);
 2713                 if (!(sc->flags & FULL_INIT_DONE))
 2714                         return (EIO);         /* need MC5 */            
 2715                 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
 2716                 break;
 2717         }
 2718         case CHELSIO_SET_TRACE_FILTER: {
 2719                 struct ch_trace *t = (struct ch_trace *)data;
 2720                 const struct trace_params *tp;
 2721 
 2722                 tp = (const struct trace_params *)&t->sip;
 2723                 if (t->config_tx)
 2724                         t3_config_trace_filter(sc, tp, 0, t->invert_match,
 2725                                                t->trace_tx);
 2726                 if (t->config_rx)
 2727                         t3_config_trace_filter(sc, tp, 1, t->invert_match,
 2728                                                t->trace_rx);
 2729                 break;
 2730         }
 2731         case CHELSIO_SET_PKTSCHED: {
 2732                 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
 2733                 if (sc->open_device_map == 0)
 2734                         return (EAGAIN);
 2735                 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
 2736                     p->binding);
 2737                 break;
 2738         }
 2739         case CHELSIO_IFCONF_GETREGS: {
 2740                 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
 2741                 int reglen = cxgb_get_regs_len();
 2742                 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
 2743                 if (buf == NULL) {
 2744                         return (ENOMEM);
 2745                 }
 2746                 if (regs->len > reglen)
 2747                         regs->len = reglen;
 2748                 else if (regs->len < reglen)
 2749                         error = E2BIG;
 2750 
 2751                 if (!error) {
 2752                         cxgb_get_regs(sc, regs, buf);
 2753                         error = copyout(buf, regs->data, reglen);
 2754                 }
 2755                 free(buf, M_DEVBUF);
 2756 
 2757                 break;
 2758         }
 2759         case CHELSIO_SET_HW_SCHED: {
 2760                 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
 2761                 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
 2762 
 2763                 if ((sc->flags & FULL_INIT_DONE) == 0)
 2764                         return (EAGAIN);       /* need TP to be initialized */
 2765                 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
 2766                     !in_range(t->channel, 0, 1) ||
 2767                     !in_range(t->kbps, 0, 10000000) ||
 2768                     !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
 2769                     !in_range(t->flow_ipg, 0,
 2770                               dack_ticks_to_usec(sc, 0x7ff)))
 2771                         return (EINVAL);
 2772 
 2773                 if (t->kbps >= 0) {
 2774                         error = t3_config_sched(sc, t->kbps, t->sched);
 2775                         if (error < 0)
 2776                                 return (-error);
 2777                 }
 2778                 if (t->class_ipg >= 0)
 2779                         t3_set_sched_ipg(sc, t->sched, t->class_ipg);
 2780                 if (t->flow_ipg >= 0) {
 2781                         t->flow_ipg *= 1000;     /* us -> ns */
 2782                         t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
 2783                 }
 2784                 if (t->mode >= 0) {
 2785                         int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
 2786 
 2787                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 2788                                          bit, t->mode ? bit : 0);
 2789                 }
 2790                 if (t->channel >= 0)
 2791                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 2792                                          1 << t->sched, t->channel << t->sched);
 2793                 break;
 2794         }
 2795         case CHELSIO_GET_EEPROM: {
 2796                 int i;
 2797                 struct ch_eeprom *e = (struct ch_eeprom *)data;
 2798                 uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
 2799 
 2800                 if (buf == NULL) {
 2801                         return (ENOMEM);
 2802                 }
 2803                 e->magic = EEPROM_MAGIC;
 2804                 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
 2805                         error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
 2806 
 2807                 if (!error)
 2808                         error = copyout(buf + e->offset, e->data, e->len);
 2809 
 2810                 free(buf, M_DEVBUF);
 2811                 break;
 2812         }
 2813         case CHELSIO_CLEAR_STATS: {
 2814                 if (!(sc->flags & FULL_INIT_DONE))
 2815                         return EAGAIN;
 2816 
 2817                 PORT_LOCK(pi);
 2818                 t3_mac_update_stats(&pi->mac);
 2819                 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
 2820                 PORT_UNLOCK(pi);
 2821                 break;
 2822         }
 2823         default:
 2824                 return (EOPNOTSUPP);
 2825                 break;
 2826         }
 2827 
 2828         return (error);
 2829 }
 2830 
 2831 static __inline void
 2832 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
 2833     unsigned int end)
 2834 {
 2835         uint32_t *p = (uint32_t *)(buf + start);
 2836 
 2837         for ( ; start <= end; start += sizeof(uint32_t))
 2838                 *p++ = t3_read_reg(ap, start);
 2839 }
 2840 
 2841 #define T3_REGMAP_SIZE (3 * 1024)
 2842 static int
 2843 cxgb_get_regs_len(void)
 2844 {
 2845         return T3_REGMAP_SIZE;
 2846 }
 2847 
 2848 static void
 2849 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
 2850 {           
 2851         
 2852         /*
 2853          * Version scheme:
 2854          * bits 0..9: chip version
 2855          * bits 10..15: chip revision
 2856          * bit 31: set for PCIe cards
 2857          */
 2858         regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
 2859 
 2860         /*
 2861          * We skip the MAC statistics registers because they are clear-on-read.
 2862          * Also reading multi-register stats would need to synchronize with the
 2863          * periodic mac stats accumulation.  Hard to justify the complexity.
 2864          */
 2865         memset(buf, 0, cxgb_get_regs_len());
 2866         reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
 2867         reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
 2868         reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
 2869         reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
 2870         reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
 2871         reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
 2872                        XGM_REG(A_XGM_SERDES_STAT3, 1));
 2873         reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
 2874                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
 2875 }
 2876 
 2877 
 2878 MODULE_DEPEND(if_cxgb, cxgb_t3fw, 1, 1, 1);

Cache object: 8e6de66267b1dc4cbf8057f6f215101a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.