The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_main.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12 2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15 
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/6.4/sys/dev/cxgb/cxgb_main.c 174319 2007-12-05 22:05:49Z kmacy $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/bus.h>
   37 #include <sys/module.h>
   38 #include <sys/pciio.h>
   39 #include <sys/conf.h>
   40 #include <machine/bus.h>
   41 #include <machine/resource.h>
   42 #include <sys/bus_dma.h>
   43 #include <sys/rman.h>
   44 #include <sys/ioccom.h>
   45 #include <sys/mbuf.h>
   46 #include <sys/linker.h>
   47 #include <sys/firmware.h>
   48 #include <sys/socket.h>
   49 #include <sys/sockio.h>
   50 #include <sys/smp.h>
   51 #include <sys/sysctl.h>
   52 #include <sys/queue.h>
   53 #include <sys/taskqueue.h>
   54 
   55 #include <net/bpf.h>
   56 #include <net/ethernet.h>
   57 #include <net/if.h>
   58 #include <net/if_arp.h>
   59 #include <net/if_dl.h>
   60 #include <net/if_media.h>
   61 #include <net/if_types.h>
   62 
   63 #include <netinet/in_systm.h>
   64 #include <netinet/in.h>
   65 #include <netinet/if_ether.h>
   66 #include <netinet/ip.h>
   67 #include <netinet/ip.h>
   68 #include <netinet/tcp.h>
   69 #include <netinet/udp.h>
   70 
   71 #include <dev/pci/pcireg.h>
   72 #include <dev/pci/pcivar.h>
   73 #include <dev/pci/pci_private.h>
   74 
   75 #ifdef CONFIG_DEFINED
   76 #include <cxgb_include.h>
   77 #else
   78 #include <dev/cxgb/cxgb_include.h>
   79 #endif
   80 
   81 #ifdef PRIV_SUPPORTED
   82 #include <sys/priv.h>
   83 #endif
   84 
   85 static int cxgb_setup_msix(adapter_t *, int);
   86 static void cxgb_teardown_msix(adapter_t *);
   87 static void cxgb_init(void *);
   88 static void cxgb_init_locked(struct port_info *);
   89 static void cxgb_stop_locked(struct port_info *);
   90 static void cxgb_set_rxmode(struct port_info *);
   91 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
   92 static void cxgb_start(struct ifnet *);
   93 static void cxgb_start_proc(void *, int ncount);
   94 static int cxgb_media_change(struct ifnet *);
   95 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
   96 static int setup_sge_qsets(adapter_t *);
   97 static void cxgb_async_intr(void *);
   98 static void cxgb_ext_intr_handler(void *, int);
   99 static void cxgb_tick_handler(void *, int);
  100 static void cxgb_down_locked(struct adapter *sc);
  101 static void cxgb_tick(void *);
  102 static void setup_rss(adapter_t *sc);
  103 
  104 /* Attachment glue for the PCI controller end of the device.  Each port of
  105  * the device is attached separately, as defined later.
  106  */
  107 static int cxgb_controller_probe(device_t);
  108 static int cxgb_controller_attach(device_t);
  109 static int cxgb_controller_detach(device_t);
  110 static void cxgb_free(struct adapter *);
  111 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
  112     unsigned int end);
  113 static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
  114 static int cxgb_get_regs_len(void);
  115 static int offload_open(struct port_info *pi);
  116 static void touch_bars(device_t dev);
  117 
  118 #ifdef notyet
  119 static int offload_close(struct toedev *tdev);
  120 #endif
  121 
  122 
  123 static device_method_t cxgb_controller_methods[] = {
  124         DEVMETHOD(device_probe,         cxgb_controller_probe),
  125         DEVMETHOD(device_attach,        cxgb_controller_attach),
  126         DEVMETHOD(device_detach,        cxgb_controller_detach),
  127 
  128         /* bus interface */
  129         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  130         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  131 
  132         { 0, 0 }
  133 };
  134 
  135 static driver_t cxgb_controller_driver = {
  136         "cxgbc",
  137         cxgb_controller_methods,
  138         sizeof(struct adapter)
  139 };
  140 
  141 static devclass_t       cxgb_controller_devclass;
  142 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
  143 
  144 /*
  145  * Attachment glue for the ports.  Attachment is done directly to the
  146  * controller device.
  147  */
  148 static int cxgb_port_probe(device_t);
  149 static int cxgb_port_attach(device_t);
  150 static int cxgb_port_detach(device_t);
  151 
  152 static device_method_t cxgb_port_methods[] = {
  153         DEVMETHOD(device_probe,         cxgb_port_probe),
  154         DEVMETHOD(device_attach,        cxgb_port_attach),
  155         DEVMETHOD(device_detach,        cxgb_port_detach),
  156         { 0, 0 }
  157 };
  158 
  159 static driver_t cxgb_port_driver = {
  160         "cxgb",
  161         cxgb_port_methods,
  162         0
  163 };
  164 
  165 static d_ioctl_t cxgb_extension_ioctl;
  166 static d_open_t cxgb_extension_open;
  167 static d_close_t cxgb_extension_close;
  168 
  169 static struct cdevsw cxgb_cdevsw = {
  170        .d_version =    D_VERSION,
  171        .d_flags =      0,
  172        .d_open =       cxgb_extension_open,
  173        .d_close =      cxgb_extension_close,
  174        .d_ioctl =      cxgb_extension_ioctl,
  175        .d_name =       "cxgb",
  176 };
  177 
  178 static devclass_t       cxgb_port_devclass;
  179 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
  180 
  181 #define SGE_MSIX_COUNT (SGE_QSETS + 1)
  182 
  183 extern int collapse_mbufs;
  184 /*
  185  * The driver uses the best interrupt scheme available on a platform in the
  186  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
  187  * of these schemes the driver may consider as follows:
  188  *
  189  * msi = 2: choose from among all three options
  190  * msi = 1 : only consider MSI and pin interrupts
  191  * msi = 0: force pin interrupts
  192  */
  193 static int msi_allowed = 2;
  194 
  195 TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
  196 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
  197 SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
  198     "MSI-X, MSI, INTx selector");
  199 
  200 /*
  201  * The driver enables offload as a default.
  202  * To disable it, use ofld_disable = 1.
  203  */
  204 static int ofld_disable = 0;
  205 TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
  206 SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
  207     "disable ULP offload");
  208 
  209 /*
  210  * The driver uses an auto-queue algorithm by default.
  211  * To disable it and force a single queue-set per port, use singleq = 1.
  212  */
  213 static int singleq = 1;
  214 TUNABLE_INT("hw.cxgb.singleq", &singleq);
  215 SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
  216     "use a single queue-set per port");
  217 
  218 enum {
  219         MAX_TXQ_ENTRIES      = 16384,
  220         MAX_CTRL_TXQ_ENTRIES = 1024,
  221         MAX_RSPQ_ENTRIES     = 16384,
  222         MAX_RX_BUFFERS       = 16384,
  223         MAX_RX_JUMBO_BUFFERS = 16384,
  224         MIN_TXQ_ENTRIES      = 4,
  225         MIN_CTRL_TXQ_ENTRIES = 4,
  226         MIN_RSPQ_ENTRIES     = 32,
  227         MIN_FL_ENTRIES       = 32,
  228         MIN_FL_JUMBO_ENTRIES = 32
  229 };
  230 
  231 struct filter_info {
  232         u32 sip;
  233         u32 sip_mask;
  234         u32 dip;
  235         u16 sport;
  236         u16 dport;
  237         u32 vlan:12;
  238         u32 vlan_prio:3;
  239         u32 mac_hit:1;
  240         u32 mac_idx:4;
  241         u32 mac_vld:1;
  242         u32 pkt_type:2;
  243         u32 report_filter_id:1;
  244         u32 pass:1;
  245         u32 rss:1;
  246         u32 qset:3;
  247         u32 locked:1;
  248         u32 valid:1;
  249 };
  250 
  251 enum { FILTER_NO_VLAN_PRI = 7 };
  252 
  253 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  254 
  255 /* Table for probing the cards.  The desc field isn't actually used */
  256 struct cxgb_ident {
  257         uint16_t        vendor;
  258         uint16_t        device;
  259         int             index;
  260         char            *desc;
  261 } cxgb_identifiers[] = {
  262         {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
  263         {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
  264         {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
  265         {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
  266         {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
  267         {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
  268         {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
  269         {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
  270         {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
  271         {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
  272         {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
  273         {0, 0, 0, NULL}
  274 };
  275 
  276 
  277 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
  278 
  279 static inline char
  280 t3rev2char(struct adapter *adapter)
  281 {
  282         char rev = 'z';
  283 
  284         switch(adapter->params.rev) {
  285         case T3_REV_A:
  286                 rev = 'a';
  287                 break;
  288         case T3_REV_B:
  289         case T3_REV_B2:
  290                 rev = 'b';
  291                 break;
  292         case T3_REV_C:
  293                 rev = 'c';
  294                 break;
  295         }
  296         return rev;
  297 }
  298 
  299 static struct cxgb_ident *
  300 cxgb_get_ident(device_t dev)
  301 {
  302         struct cxgb_ident *id;
  303 
  304         for (id = cxgb_identifiers; id->desc != NULL; id++) {
  305                 if ((id->vendor == pci_get_vendor(dev)) &&
  306                     (id->device == pci_get_device(dev))) {
  307                         return (id);
  308                 }
  309         }
  310         return (NULL);
  311 }
  312 
  313 static const struct adapter_info *
  314 cxgb_get_adapter_info(device_t dev)
  315 {
  316         struct cxgb_ident *id;
  317         const struct adapter_info *ai;
  318       
  319         id = cxgb_get_ident(dev);
  320         if (id == NULL)
  321                 return (NULL);
  322 
  323         ai = t3_get_adapter_info(id->index);
  324 
  325         return (ai);
  326 }
  327 
  328 static int
  329 cxgb_controller_probe(device_t dev)
  330 {
  331         const struct adapter_info *ai;
  332         char *ports, buf[80];
  333         int nports;
  334         
  335         ai = cxgb_get_adapter_info(dev);
  336         if (ai == NULL)
  337                 return (ENXIO);
  338 
  339         nports = ai->nports0 + ai->nports1;
  340         if (nports == 1)
  341                 ports = "port";
  342         else
  343                 ports = "ports";
  344 
  345         snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports);
  346         device_set_desc_copy(dev, buf);
  347         return (BUS_PROBE_DEFAULT);
  348 }
  349 
  350 #define FW_FNAME "t3fw%d%d%d"
  351 #define TPEEPROM_NAME "t3%ctpe%d%d%d"
  352 #define TPSRAM_NAME "t3%cps%d%d%d"
  353 
  354 static int
  355 upgrade_fw(adapter_t *sc)
  356 {
  357         char buf[32];
  358 #ifdef FIRMWARE_LATEST
  359         const struct firmware *fw;
  360 #else
  361         struct firmware *fw;
  362 #endif  
  363         int status;
  364         
  365         snprintf(&buf[0], sizeof(buf), FW_FNAME,  FW_VERSION_MAJOR,
  366             FW_VERSION_MINOR, FW_VERSION_MICRO);
  367         
  368         fw = firmware_get(buf);
  369         
  370         if (fw == NULL) {
  371                 device_printf(sc->dev, "Could not find firmware image %s\n", buf);
  372                 return (ENOENT);
  373         } else
  374                 device_printf(sc->dev, "updating firmware on card with %s\n", buf);
  375         status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
  376 
  377         device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
  378         
  379         firmware_put(fw, FIRMWARE_UNLOAD);
  380 
  381         return (status);        
  382 }
  383 
  384 static int
  385 cxgb_controller_attach(device_t dev)
  386 {
  387         device_t child;
  388         const struct adapter_info *ai;
  389         struct adapter *sc;
  390         int i, error = 0;
  391         uint32_t vers;
  392         int port_qsets = 1;
  393 #ifdef MSI_SUPPORTED
  394         int msi_needed, reg;
  395 #endif  
  396         sc = device_get_softc(dev);
  397         sc->dev = dev;
  398         sc->msi_count = 0;
  399         ai = cxgb_get_adapter_info(dev);
  400 
  401         /*
  402          * XXX not really related but a recent addition
  403          */
  404 #ifdef MSI_SUPPORTED    
  405         /* find the PCIe link width and set max read request to 4KB*/
  406         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
  407                 uint16_t lnk, pectl;
  408                 lnk = pci_read_config(dev, reg + 0x12, 2);
  409                 sc->link_width = (lnk >> 4) & 0x3f;
  410                 
  411                 pectl = pci_read_config(dev, reg + 0x8, 2);
  412                 pectl = (pectl & ~0x7000) | (5 << 12);
  413                 pci_write_config(dev, reg + 0x8, pectl, 2);
  414         }
  415         if (sc->link_width != 0 && sc->link_width <= 4 &&
  416             (ai->nports0 + ai->nports1) <= 2) {
  417                 device_printf(sc->dev,
  418                     "PCIe x%d Link, expect reduced performance\n",
  419                     sc->link_width);
  420         }
  421 #endif
  422         touch_bars(dev);
  423         pci_enable_busmaster(dev);
  424         /*
  425          * Allocate the registers and make them available to the driver.
  426          * The registers that we care about for NIC mode are in BAR 0
  427          */
  428         sc->regs_rid = PCIR_BAR(0);
  429         if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  430             &sc->regs_rid, RF_ACTIVE)) == NULL) {
  431                 device_printf(dev, "Cannot allocate BAR\n");
  432                 return (ENXIO);
  433         }
  434 
  435         snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
  436             device_get_unit(dev));
  437         ADAPTER_LOCK_INIT(sc, sc->lockbuf);
  438 
  439         snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
  440             device_get_unit(dev));
  441         snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
  442             device_get_unit(dev));
  443         snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
  444             device_get_unit(dev));
  445         
  446         MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
  447         MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
  448         MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
  449         
  450         sc->bt = rman_get_bustag(sc->regs_res);
  451         sc->bh = rman_get_bushandle(sc->regs_res);
  452         sc->mmio_len = rman_get_size(sc->regs_res);
  453 
  454         if (t3_prep_adapter(sc, ai, 1) < 0) {
  455                 printf("prep adapter failed\n");
  456                 error = ENODEV;
  457                 goto out;
  458         }
  459         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
  460          * enough messages for the queue sets.  If that fails, try falling
  461          * back to MSI.  If that fails, then try falling back to the legacy
  462          * interrupt pin model.
  463          */
  464 #ifdef MSI_SUPPORTED
  465 
  466         sc->msix_regs_rid = 0x20;
  467         if ((msi_allowed >= 2) &&
  468             (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  469             &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
  470 
  471                 msi_needed = sc->msi_count = SGE_MSIX_COUNT;
  472 
  473                 if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
  474                     (sc->msi_count != msi_needed)) {
  475                         device_printf(dev, "msix allocation failed - msi_count = %d"
  476                             " msi_needed=%d will try msi err=%d\n", sc->msi_count,
  477                             msi_needed, error);
  478                         sc->msi_count = 0;
  479                         pci_release_msi(dev);
  480                         bus_release_resource(dev, SYS_RES_MEMORY,
  481                             sc->msix_regs_rid, sc->msix_regs_res);
  482                         sc->msix_regs_res = NULL;
  483                 } else {
  484                         sc->flags |= USING_MSIX;
  485                         sc->cxgb_intr = t3_intr_msix;
  486                 }
  487         }
  488 
  489         if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
  490                 sc->msi_count = 1;
  491                 if (pci_alloc_msi(dev, &sc->msi_count)) {
  492                         device_printf(dev, "alloc msi failed - will try INTx\n");
  493                         sc->msi_count = 0;
  494                         pci_release_msi(dev);
  495                 } else {
  496                         sc->flags |= USING_MSI;
  497                         sc->irq_rid = 1;
  498                         sc->cxgb_intr = t3_intr_msi;
  499                 }
  500         }
  501 #endif
  502         if (sc->msi_count == 0) {
  503                 device_printf(dev, "using line interrupts\n");
  504                 sc->irq_rid = 0;
  505                 sc->cxgb_intr = t3b_intr;
  506         }
  507 
  508 
  509         /* Create a private taskqueue thread for handling driver events */
  510 #ifdef TASKQUEUE_CURRENT        
  511         sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
  512             taskqueue_thread_enqueue, &sc->tq);
  513 #else
  514         sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
  515             taskqueue_thread_enqueue, &sc->tq);
  516 #endif  
  517         if (sc->tq == NULL) {
  518                 device_printf(dev, "failed to allocate controller task queue\n");
  519                 goto out;
  520         }
  521 
  522         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
  523             device_get_nameunit(dev));
  524         TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
  525         TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
  526 
  527         
  528         /* Create a periodic callout for checking adapter status */
  529         callout_init(&sc->cxgb_tick_ch, TRUE);
  530         
  531         if (t3_check_fw_version(sc) != 0) {
  532                 /*
  533                  * Warn user that a firmware update will be attempted in init.
  534                  */
  535                 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
  536                     FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
  537                 sc->flags &= ~FW_UPTODATE;
  538         } else {
  539                 sc->flags |= FW_UPTODATE;
  540         }
  541 
  542         if (t3_check_tpsram_version(sc) != 0) {
  543                 /*
  544                  * Warn user that a firmware update will be attempted in init.
  545                  */
  546                 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
  547                     t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
  548                 sc->flags &= ~TPS_UPTODATE;
  549         } else {
  550                 sc->flags |= TPS_UPTODATE;
  551         }
  552         
  553         if ((sc->flags & USING_MSIX) && !singleq)
  554                 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
  555 
  556         /*
  557          * Create a child device for each MAC.  The ethernet attachment
  558          * will be done in these children.
  559          */     
  560         for (i = 0; i < (sc)->params.nports; i++) {
  561                 struct port_info *pi;
  562                 
  563                 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
  564                         device_printf(dev, "failed to add child port\n");
  565                         error = EINVAL;
  566                         goto out;
  567                 }
  568                 pi = &sc->port[i];
  569                 pi->adapter = sc;
  570                 pi->nqsets = port_qsets;
  571                 pi->first_qset = i*port_qsets;
  572                 pi->port_id = i;
  573                 pi->tx_chan = i >= ai->nports0;
  574                 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
  575                 sc->rxpkt_map[pi->txpkt_intf] = i;
  576                 sc->portdev[i] = child;
  577                 device_set_softc(child, pi);
  578         }
  579         if ((error = bus_generic_attach(dev)) != 0)
  580                 goto out;
  581 
  582         /*
  583          * XXX need to poll for link status
  584          */
  585         sc->params.stats_update_period = 1;
  586 
  587         /* initialize sge private state */
  588         t3_sge_init_adapter(sc);
  589 
  590         t3_led_ready(sc);
  591         
  592         cxgb_offload_init();
  593         if (is_offload(sc)) {
  594                 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  595                 cxgb_adapter_ofld(sc);
  596         }
  597         error = t3_get_fw_version(sc, &vers);
  598         if (error)
  599                 goto out;
  600 
  601         snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
  602             G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
  603             G_FW_VERSION_MICRO(vers));
  604 
  605         t3_add_sysctls(sc);
  606 out:
  607         if (error)
  608                 cxgb_free(sc);
  609 
  610         return (error);
  611 }
  612 
  613 static int
  614 cxgb_controller_detach(device_t dev)
  615 {
  616         struct adapter *sc;
  617 
  618         sc = device_get_softc(dev);
  619 
  620         cxgb_free(sc);
  621 
  622         return (0);
  623 }
  624 
  625 static void
  626 cxgb_free(struct adapter *sc)
  627 {
  628         int i;
  629 
  630         ADAPTER_LOCK(sc);
  631         /*
  632          * drops the lock
  633          */
  634         cxgb_down_locked(sc);
  635         
  636 #ifdef MSI_SUPPORTED
  637         if (sc->flags & (USING_MSI | USING_MSIX)) {
  638                 device_printf(sc->dev, "releasing msi message(s)\n");
  639                 pci_release_msi(sc->dev);
  640         } else {
  641                 device_printf(sc->dev, "no msi message to release\n");
  642         }
  643 #endif
  644         if (sc->msix_regs_res != NULL) {
  645                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
  646                     sc->msix_regs_res);
  647         }
  648         
  649         if (sc->tq != NULL) {
  650                 taskqueue_drain(sc->tq, &sc->ext_intr_task);
  651                 taskqueue_drain(sc->tq, &sc->tick_task);        
  652         }       
  653         t3_sge_deinit_sw(sc);
  654         /*
  655          * Wait for last callout
  656          */
  657         
  658         tsleep(&sc, 0, "cxgb unload", 3*hz);
  659 
  660         for (i = 0; i < (sc)->params.nports; ++i) {
  661                 if (sc->portdev[i] != NULL)
  662                         device_delete_child(sc->dev, sc->portdev[i]);
  663         }
  664                 
  665         bus_generic_detach(sc->dev);
  666         if (sc->tq != NULL) 
  667                 taskqueue_free(sc->tq);
  668 #ifdef notyet
  669         if (is_offload(sc)) {
  670                 cxgb_adapter_unofld(sc);
  671                 if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
  672                         offload_close(&sc->tdev);
  673         }
  674 #endif
  675 
  676         t3_free_sge_resources(sc);
  677         free(sc->filters, M_DEVBUF);
  678         t3_sge_free(sc);
  679         
  680         cxgb_offload_exit();
  681         
  682         if (sc->regs_res != NULL)
  683                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
  684                     sc->regs_res);
  685 
  686         MTX_DESTROY(&sc->mdio_lock);
  687         MTX_DESTROY(&sc->sge.reg_lock);
  688         MTX_DESTROY(&sc->elmer_lock);
  689         ADAPTER_LOCK_DEINIT(sc);
  690         
  691         return;
  692 }
  693 
  694 /**
  695  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
  696  *      @sc: the controller softc
  697  *
  698  *      Determines how many sets of SGE queues to use and initializes them.
  699  *      We support multiple queue sets per port if we have MSI-X, otherwise
  700  *      just one queue set per port.
  701  */
  702 static int
  703 setup_sge_qsets(adapter_t *sc)
  704 {
  705         int i, j, err, irq_idx = 0, qset_idx = 0;
  706         u_int ntxq = SGE_TXQ_PER_SET;
  707 
  708         if ((err = t3_sge_alloc(sc)) != 0) {
  709                 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
  710                 return (err);
  711         }
  712 
  713         if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
  714                 irq_idx = -1;
  715 
  716         for (i = 0; i < (sc)->params.nports; i++) {
  717                 struct port_info *pi = &sc->port[i];
  718 
  719                 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
  720                         err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
  721                             (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
  722                             &sc->params.sge.qset[qset_idx], ntxq, pi);
  723                         if (err) {
  724                                 t3_free_sge_resources(sc);
  725                                 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
  726                                     err);
  727                                 return (err);
  728                         }
  729                 }
  730         }
  731 
  732         return (0);
  733 }
  734 
  735 static void
  736 cxgb_teardown_msix(adapter_t *sc) 
  737 {
  738         int i, nqsets;
  739         
  740         for (nqsets = i = 0; i < (sc)->params.nports; i++) 
  741                 nqsets += sc->port[i].nqsets;
  742 
  743         for (i = 0; i < nqsets; i++) {
  744                 if (sc->msix_intr_tag[i] != NULL) {
  745                         bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
  746                             sc->msix_intr_tag[i]);
  747                         sc->msix_intr_tag[i] = NULL;
  748                 }
  749                 if (sc->msix_irq_res[i] != NULL) {
  750                         bus_release_resource(sc->dev, SYS_RES_IRQ,
  751                             sc->msix_irq_rid[i], sc->msix_irq_res[i]);
  752                         sc->msix_irq_res[i] = NULL;
  753                 }
  754         }
  755 }
  756 
  757 static int
  758 cxgb_setup_msix(adapter_t *sc, int msix_count)
  759 {
  760         int i, j, k, nqsets, rid;
  761 
  762         /* The first message indicates link changes and error conditions */
  763         sc->irq_rid = 1;
  764         if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
  765            &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  766                 device_printf(sc->dev, "Cannot allocate msix interrupt\n");
  767                 return (EINVAL);
  768         }
  769 
  770         if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
  771 #ifdef INTR_FILTERS
  772                 NULL,
  773 #endif
  774                 cxgb_async_intr, sc, &sc->intr_tag)) {
  775                 device_printf(sc->dev, "Cannot set up interrupt\n");
  776                 return (EINVAL);
  777         }
  778         for (i = k = 0; i < (sc)->params.nports; i++) {
  779                 nqsets = sc->port[i].nqsets;
  780                 for (j = 0; j < nqsets; j++, k++) {
  781                         struct sge_qset *qs = &sc->sge.qs[k];
  782 
  783                         rid = k + 2;
  784                         if (cxgb_debug)
  785                                 printf("rid=%d ", rid);
  786                         if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
  787                             sc->dev, SYS_RES_IRQ, &rid,
  788                             RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  789                                 device_printf(sc->dev, "Cannot allocate "
  790                                     "interrupt for message %d\n", rid);
  791                                 return (EINVAL);
  792                         }
  793                         sc->msix_irq_rid[k] = rid;
  794                         printf("setting up interrupt for port=%d\n",
  795                             qs->port->port_id);
  796                         if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
  797                             INTR_MPSAFE|INTR_TYPE_NET,
  798 #ifdef INTR_FILTERS
  799                                 NULL,
  800 #endif
  801                                 t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
  802                                 device_printf(sc->dev, "Cannot set up "
  803                                     "interrupt for message %d\n", rid);
  804                                 return (EINVAL);
  805                         }
  806                 }
  807         }
  808 
  809 
  810         return (0);
  811 }
  812 
  813 static int
  814 cxgb_port_probe(device_t dev)
  815 {
  816         struct port_info *p;
  817         char buf[80];
  818 
  819         p = device_get_softc(dev);
  820 
  821         snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, p->port_type->desc);
  822         device_set_desc_copy(dev, buf);
  823         return (0);
  824 }
  825 
  826 
  827 static int
  828 cxgb_makedev(struct port_info *pi)
  829 {
  830         
  831         pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
  832             UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
  833         
  834         if (pi->port_cdev == NULL)
  835                 return (ENOMEM);
  836 
  837         pi->port_cdev->si_drv1 = (void *)pi;
  838         
  839         return (0);
  840 }
  841 
  842 
  843 #ifdef TSO_SUPPORTED
  844 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
  845 /* Don't enable TSO6 yet */
  846 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
  847 #else
  848 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
  849 /* Don't enable TSO6 yet */
  850 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
  851 #define IFCAP_TSO4 0x0
  852 #define IFCAP_TSO6 0x0
  853 #define CSUM_TSO   0x0
  854 #endif
  855 
  856 
  857 static int
  858 cxgb_port_attach(device_t dev)
  859 {
  860         struct port_info *p;
  861         struct ifnet *ifp;
  862         int err, media_flags;
  863         
  864         p = device_get_softc(dev);
  865 
  866         snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
  867             device_get_unit(device_get_parent(dev)), p->port_id);
  868         PORT_LOCK_INIT(p, p->lockbuf);
  869 
  870         /* Allocate an ifnet object and set it up */
  871         ifp = p->ifp = if_alloc(IFT_ETHER);
  872         if (ifp == NULL) {
  873                 device_printf(dev, "Cannot allocate ifnet\n");
  874                 return (ENOMEM);
  875         }
  876         
  877         /*
  878          * Note that there is currently no watchdog timer.
  879          */
  880         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  881         ifp->if_init = cxgb_init;
  882         ifp->if_softc = p;
  883         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  884         ifp->if_ioctl = cxgb_ioctl;
  885         ifp->if_start = cxgb_start;
  886         ifp->if_timer = 0;      /* Disable ifnet watchdog */
  887         ifp->if_watchdog = NULL;
  888 
  889         ifp->if_snd.ifq_drv_maxlen = TX_ETH_Q_SIZE;
  890         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  891         IFQ_SET_READY(&ifp->if_snd);
  892 
  893         ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
  894         ifp->if_capabilities |= CXGB_CAP;
  895         ifp->if_capenable |= CXGB_CAP_ENABLE;
  896         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
  897         /*
  898          * disable TSO on 4-port - it isn't supported by the firmware yet
  899          */     
  900         if (p->adapter->params.nports > 2) {
  901                 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
  902                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
  903                 ifp->if_hwassist &= ~CSUM_TSO;
  904         }
  905 
  906         ether_ifattach(ifp, p->hw_addr);
  907         /*
  908          * Only default to jumbo frames on 10GigE
  909          */
  910         if (p->adapter->params.nports <= 2)
  911                 ifp->if_mtu = 9000;
  912         if ((err = cxgb_makedev(p)) != 0) {
  913                 printf("makedev failed %d\n", err);
  914                 return (err);
  915         }
  916         ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
  917             cxgb_media_status);
  918         
  919         if (!strcmp(p->port_type->desc, "10GBASE-CX4")) {
  920                 media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
  921         } else if (!strcmp(p->port_type->desc, "10GBASE-SR")) {
  922                 media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
  923         } else if (!strcmp(p->port_type->desc, "10GBASE-XR")) {
  924                 media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
  925         } else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) {
  926                 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
  927                 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
  928                             0, NULL);
  929                 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
  930                             0, NULL);
  931                 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
  932                             0, NULL);
  933                 ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
  934                             0, NULL);
  935                 media_flags = 0;
  936         } else {
  937                 printf("unsupported media type %s\n", p->port_type->desc);
  938                 return (ENXIO);
  939         }
  940         if (media_flags) {
  941                 ifmedia_add(&p->media, media_flags, 0, NULL);
  942                 ifmedia_set(&p->media, media_flags);
  943         } else {
  944                 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
  945                 ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
  946         }       
  947 
  948 
  949         snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port_id);
  950 #ifdef TASKQUEUE_CURRENT
  951         /* Create a port for handling TX without starvation */
  952         p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
  953             taskqueue_thread_enqueue, &p->tq);
  954 #else
  955         /* Create a port for handling TX without starvation */
  956         p->tq = taskqueue_create_fast(p->taskqbuf, M_NOWAIT,
  957             taskqueue_thread_enqueue, &p->tq);
  958 #endif  
  959 
  960         if (p->tq == NULL) {
  961                 device_printf(dev, "failed to allocate port task queue\n");
  962                 return (ENOMEM);
  963         }       
  964         taskqueue_start_threads(&p->tq, 1, PI_NET, "%s taskq",
  965             device_get_nameunit(dev));
  966         
  967         TASK_INIT(&p->start_task, 0, cxgb_start_proc, ifp);
  968 
  969         t3_sge_init_port(p);
  970 
  971         return (0);
  972 }
  973 
  974 static int
  975 cxgb_port_detach(device_t dev)
  976 {
  977         struct port_info *p;
  978 
  979         p = device_get_softc(dev);
  980 
  981         PORT_LOCK(p);
  982         if (p->ifp->if_drv_flags & IFF_DRV_RUNNING) 
  983                 cxgb_stop_locked(p);
  984         PORT_UNLOCK(p);
  985         
  986         if (p->tq != NULL) {
  987                 taskqueue_drain(p->tq, &p->start_task);
  988                 taskqueue_free(p->tq);
  989                 p->tq = NULL;
  990         }
  991 
  992         ether_ifdetach(p->ifp);
  993         /*
  994          * the lock may be acquired in ifdetach
  995          */
  996         PORT_LOCK_DEINIT(p);
  997         if_free(p->ifp);
  998         
  999         if (p->port_cdev != NULL)
 1000                 destroy_dev(p->port_cdev);
 1001         
 1002         return (0);
 1003 }
 1004 
 1005 void
 1006 t3_fatal_err(struct adapter *sc)
 1007 {
 1008         u_int fw_status[4];
 1009         
 1010         if (sc->flags & FULL_INIT_DONE) {
 1011                 t3_sge_stop(sc);
 1012                 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
 1013                 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
 1014                 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
 1015                 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
 1016                 t3_intr_disable(sc);
 1017         }
 1018         device_printf(sc->dev,"encountered fatal error, operation suspended\n");
 1019         if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
 1020                 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 1021                     fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
 1022 }
 1023 
 1024 int
 1025 t3_os_find_pci_capability(adapter_t *sc, int cap)
 1026 {
 1027         device_t dev;
 1028         struct pci_devinfo *dinfo;
 1029         pcicfgregs *cfg;
 1030         uint32_t status;
 1031         uint8_t ptr;
 1032 
 1033         dev = sc->dev;
 1034         dinfo = device_get_ivars(dev);
 1035         cfg = &dinfo->cfg;
 1036 
 1037         status = pci_read_config(dev, PCIR_STATUS, 2);
 1038         if (!(status & PCIM_STATUS_CAPPRESENT))
 1039                 return (0);
 1040 
 1041         switch (cfg->hdrtype & PCIM_HDRTYPE) {
 1042         case 0:
 1043         case 1:
 1044                 ptr = PCIR_CAP_PTR;
 1045                 break;
 1046         case 2:
 1047                 ptr = PCIR_CAP_PTR_2;
 1048                 break;
 1049         default:
 1050                 return (0);
 1051                 break;
 1052         }
 1053         ptr = pci_read_config(dev, ptr, 1);
 1054 
 1055         while (ptr != 0) {
 1056                 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
 1057                         return (ptr);
 1058                 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
 1059         }
 1060 
 1061         return (0);
 1062 }
 1063 
 1064 int
 1065 t3_os_pci_save_state(struct adapter *sc)
 1066 {
 1067         device_t dev;
 1068         struct pci_devinfo *dinfo;
 1069 
 1070         dev = sc->dev;
 1071         dinfo = device_get_ivars(dev);
 1072 
 1073         pci_cfg_save(dev, dinfo, 0);
 1074         return (0);
 1075 }
 1076 
 1077 int
 1078 t3_os_pci_restore_state(struct adapter *sc)
 1079 {
 1080         device_t dev;
 1081         struct pci_devinfo *dinfo;
 1082 
 1083         dev = sc->dev;
 1084         dinfo = device_get_ivars(dev);
 1085 
 1086         pci_cfg_restore(dev, dinfo);
 1087         return (0);
 1088 }
 1089 
 1090 /**
 1091  *      t3_os_link_changed - handle link status changes
 1092  *      @adapter: the adapter associated with the link change
 1093  *      @port_id: the port index whose limk status has changed
 1094  *      @link_stat: the new status of the link
 1095  *      @speed: the new speed setting
 1096  *      @duplex: the new duplex setting
 1097  *      @fc: the new flow-control setting
 1098  *
 1099  *      This is the OS-dependent handler for link status changes.  The OS
 1100  *      neutral handler takes care of most of the processing for these events,
 1101  *      then calls this handler for any OS-specific processing.
 1102  */
 1103 void
 1104 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
 1105      int duplex, int fc)
 1106 {
 1107         struct port_info *pi = &adapter->port[port_id];
 1108         struct cmac *mac = &adapter->port[port_id].mac;
 1109 
 1110         if ((pi->ifp->if_flags & IFF_UP) == 0)
 1111                 return;
 1112 
 1113         if (link_status) {
 1114                 t3_mac_enable(mac, MAC_DIRECTION_RX);
 1115                 if_link_state_change(pi->ifp, LINK_STATE_UP);
 1116         } else {
 1117                 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
 1118                 pi->phy.ops->power_down(&pi->phy, 1);
 1119                 t3_mac_disable(mac, MAC_DIRECTION_RX);
 1120                 t3_link_start(&pi->phy, mac, &pi->link_config);
 1121         }
 1122 }
 1123 
 1124 /*
 1125  * Interrupt-context handler for external (PHY) interrupts.
 1126  */
 1127 void
 1128 t3_os_ext_intr_handler(adapter_t *sc)
 1129 {
 1130         if (cxgb_debug)
 1131                 printf("t3_os_ext_intr_handler\n");
 1132         /*
 1133          * Schedule a task to handle external interrupts as they may be slow
 1134          * and we use a mutex to protect MDIO registers.  We disable PHY
 1135          * interrupts in the meantime and let the task reenable them when
 1136          * it's done.
 1137          */
 1138         ADAPTER_LOCK(sc);
 1139         if (sc->slow_intr_mask) {
 1140                 sc->slow_intr_mask &= ~F_T3DBG;
 1141                 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
 1142                 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
 1143         }
 1144         ADAPTER_UNLOCK(sc);
 1145 }
 1146 
 1147 void
 1148 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
 1149 {
 1150 
 1151         /*
 1152          * The ifnet might not be allocated before this gets called,
 1153          * as this is called early on in attach by t3_prep_adapter
 1154          * save the address off in the port structure
 1155          */
 1156         if (cxgb_debug)
 1157                 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
 1158         bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
 1159 }
 1160 
 1161 /**
 1162  *      link_start - enable a port
 1163  *      @p: the port to enable
 1164  *
 1165  *      Performs the MAC and PHY actions needed to enable a port.
 1166  */
 1167 static void
 1168 cxgb_link_start(struct port_info *p)
 1169 {
 1170         struct ifnet *ifp;
 1171         struct t3_rx_mode rm;
 1172         struct cmac *mac = &p->mac;
 1173 
 1174         ifp = p->ifp;
 1175 
 1176         t3_init_rx_mode(&rm, p);
 1177         if (!mac->multiport) 
 1178                 t3_mac_reset(mac);
 1179         t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
 1180         t3_mac_set_address(mac, 0, p->hw_addr);
 1181         t3_mac_set_rx_mode(mac, &rm);
 1182         t3_link_start(&p->phy, mac, &p->link_config);
 1183         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 1184 }
 1185 
 1186 /**
 1187  *      setup_rss - configure Receive Side Steering (per-queue connection demux) 
 1188  *      @adap: the adapter
 1189  *
 1190  *      Sets up RSS to distribute packets to multiple receive queues.  We
 1191  *      configure the RSS CPU lookup table to distribute to the number of HW
 1192  *      receive queues, and the response queue lookup table to narrow that
 1193  *      down to the response queues actually configured for each port.
 1194  *      We always configure the RSS mapping for two ports since the mapping
 1195  *      table has plenty of entries.
 1196  */
 1197 static void
 1198 setup_rss(adapter_t *adap)
 1199 {
 1200         int i;
 1201         u_int nq[2]; 
 1202         uint8_t cpus[SGE_QSETS + 1];
 1203         uint16_t rspq_map[RSS_TABLE_SIZE];
 1204 
 1205         for (i = 0; i < SGE_QSETS; ++i)
 1206                 cpus[i] = i;
 1207         cpus[SGE_QSETS] = 0xff;
 1208 
 1209         nq[0] = nq[1] = 0;
 1210         for_each_port(adap, i) {
 1211                 const struct port_info *pi = adap2pinfo(adap, i);
 1212 
 1213                 nq[pi->tx_chan] += pi->nqsets;
 1214         }
 1215         nq[0] = max(nq[0], 1U);
 1216         nq[1] = max(nq[1], 1U);
 1217         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 1218                 rspq_map[i] = i % nq[0];
 1219                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq[1]) + nq[0];
 1220         }
 1221         /* Calculate the reverse RSS map table */
 1222         for (i = 0; i < RSS_TABLE_SIZE; ++i)
 1223                 if (adap->rrss_map[rspq_map[i]] == 0xff)
 1224                         adap->rrss_map[rspq_map[i]] = i;
 1225 
 1226         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 1227                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
 1228                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
 1229 
 1230 }
 1231 
 1232 /*
 1233  * Sends an mbuf to an offload queue driver
 1234  * after dealing with any active network taps.
 1235  */
 1236 static inline int
 1237 offload_tx(struct toedev *tdev, struct mbuf *m)
 1238 {
 1239         int ret;
 1240 
 1241         critical_enter();
 1242         ret = t3_offload_tx(tdev, m);
 1243         critical_exit();
 1244         return (ret);
 1245 }
 1246 
 1247 static int
 1248 write_smt_entry(struct adapter *adapter, int idx)
 1249 {
 1250         struct port_info *pi = &adapter->port[idx];
 1251         struct cpl_smt_write_req *req;
 1252         struct mbuf *m;
 1253 
 1254         if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
 1255                 return (ENOMEM);
 1256 
 1257         req = mtod(m, struct cpl_smt_write_req *);
 1258         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1259         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 1260         req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
 1261         req->iff = idx;
 1262         memset(req->src_mac1, 0, sizeof(req->src_mac1));
 1263         memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
 1264 
 1265         m_set_priority(m, 1);
 1266 
 1267         offload_tx(&adapter->tdev, m);
 1268 
 1269         return (0);
 1270 }
 1271 
 1272 static int
 1273 init_smt(struct adapter *adapter)
 1274 {
 1275         int i;
 1276 
 1277         for_each_port(adapter, i)
 1278                 write_smt_entry(adapter, i);
 1279         return 0;
 1280 }
 1281 
 1282 static void
 1283 init_port_mtus(adapter_t *adapter)
 1284 {
 1285         unsigned int mtus = adapter->port[0].ifp->if_mtu;
 1286 
 1287         if (adapter->port[1].ifp)
 1288                 mtus |= adapter->port[1].ifp->if_mtu << 16;
 1289         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 1290 }
 1291 
 1292 static void
 1293 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 1294                               int hi, int port)
 1295 {
 1296         struct mbuf *m;
 1297         struct mngt_pktsched_wr *req;
 1298 
 1299         m = m_gethdr(M_DONTWAIT, MT_DATA);
 1300         if (m) {        
 1301                 req = mtod(m, struct mngt_pktsched_wr *);
 1302                 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 1303                 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 1304                 req->sched = sched;
 1305                 req->idx = qidx;
 1306                 req->min = lo;
 1307                 req->max = hi;
 1308                 req->binding = port;
 1309                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1310                 t3_mgmt_tx(adap, m);
 1311         }
 1312 }
 1313 
 1314 static void
 1315 bind_qsets(adapter_t *sc)
 1316 {
 1317         int i, j;
 1318 
 1319         for (i = 0; i < (sc)->params.nports; ++i) {
 1320                 const struct port_info *pi = adap2pinfo(sc, i);
 1321 
 1322                 for (j = 0; j < pi->nqsets; ++j) {
 1323                         send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
 1324                                           -1, pi->tx_chan);
 1325 
 1326                 }
 1327         }
 1328 }
 1329 
 1330 static void
 1331 update_tpeeprom(struct adapter *adap)
 1332 {
 1333 #ifdef FIRMWARE_LATEST
 1334         const struct firmware *tpeeprom;
 1335 #else
 1336         struct firmware *tpeeprom;
 1337 #endif  
 1338 
 1339         char buf[64];
 1340         uint32_t version;
 1341         unsigned int major, minor;
 1342         int ret, len;
 1343         char rev;
 1344 
 1345         t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
 1346 
 1347         major = G_TP_VERSION_MAJOR(version);
 1348         minor = G_TP_VERSION_MINOR(version);
 1349         if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
 1350                 return; 
 1351 
 1352         rev = t3rev2char(adap);
 1353 
 1354         snprintf(buf, sizeof(buf), TPEEPROM_NAME, rev,
 1355                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 1356 
 1357         tpeeprom = firmware_get(buf);
 1358         if (tpeeprom == NULL) {
 1359                 device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
 1360                         buf);
 1361                 return;
 1362         }
 1363 
 1364         len = tpeeprom->datasize - 4;
 1365         
 1366         ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
 1367         if (ret)
 1368                 goto release_tpeeprom;
 1369 
 1370         if (len != TP_SRAM_LEN) {
 1371                 device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", buf, len, TP_SRAM_LEN);
 1372                 return;
 1373         }
 1374         
 1375         ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
 1376             TP_SRAM_OFFSET);
 1377         
 1378         if (!ret) {
 1379                 device_printf(adap->dev,
 1380                         "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
 1381                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 1382         } else 
 1383                 device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
 1384 
 1385 release_tpeeprom:
 1386         firmware_put(tpeeprom, FIRMWARE_UNLOAD);
 1387         
 1388         return;
 1389 }
 1390 
 1391 static int
 1392 update_tpsram(struct adapter *adap)
 1393 {
 1394 #ifdef FIRMWARE_LATEST
 1395         const struct firmware *tpsram;
 1396 #else
 1397         struct firmware *tpsram;
 1398 #endif  
 1399         char buf[64];
 1400         int ret;
 1401         char rev;
 1402 
 1403         rev = t3rev2char(adap);
 1404         if (!rev)
 1405                 return 0;
 1406 
 1407         update_tpeeprom(adap);
 1408 
 1409         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
 1410                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 1411 
 1412         tpsram = firmware_get(buf);
 1413         if (tpsram == NULL){
 1414                 device_printf(adap->dev, "could not load TP SRAM: unable to load %s\n",
 1415                         buf);
 1416                 return (EINVAL);
 1417         } else
 1418                 device_printf(adap->dev, "updating TP SRAM with %s\n", buf);
 1419         
 1420         ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
 1421         if (ret)
 1422                 goto release_tpsram;    
 1423 
 1424         ret = t3_set_proto_sram(adap, tpsram->data);
 1425         if (ret)
 1426                 device_printf(adap->dev, "loading protocol SRAM failed\n");
 1427 
 1428 release_tpsram:
 1429         firmware_put(tpsram, FIRMWARE_UNLOAD);
 1430         
 1431         return ret;
 1432 }
 1433 
 1434 /**
 1435  *      cxgb_up - enable the adapter
 1436  *      @adap: adapter being enabled
 1437  *
 1438  *      Called when the first port is enabled, this function performs the
 1439  *      actions necessary to make an adapter operational, such as completing
 1440  *      the initialization of HW modules, and enabling interrupts.
 1441  *
 1442  */
 1443 static int
 1444 cxgb_up(struct adapter *sc)
 1445 {
 1446         int err = 0;
 1447 
 1448         if ((sc->flags & FULL_INIT_DONE) == 0) {
 1449 
 1450                 if ((sc->flags & FW_UPTODATE) == 0)
 1451                         if ((err = upgrade_fw(sc)))
 1452                                 goto out;
 1453                 if ((sc->flags & TPS_UPTODATE) == 0)
 1454                         if ((err = update_tpsram(sc)))
 1455                                 goto out;
 1456                 err = t3_init_hw(sc, 0);
 1457                 if (err)
 1458                         goto out;
 1459 
 1460                 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 1461 
 1462                 err = setup_sge_qsets(sc);
 1463                 if (err)
 1464                         goto out;
 1465 
 1466                 setup_rss(sc);
 1467                 sc->flags |= FULL_INIT_DONE;
 1468         }
 1469 
 1470         t3_intr_clear(sc);
 1471 
 1472         /* If it's MSI or INTx, allocate a single interrupt for everything */
 1473         if ((sc->flags & USING_MSIX) == 0) {
 1474                 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
 1475                    &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
 1476                         device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
 1477                             sc->irq_rid);
 1478                         err = EINVAL;
 1479                         goto out;
 1480                 }
 1481                 device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
 1482 
 1483                 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
 1484 #ifdef INTR_FILTERS
 1485                         NULL,
 1486 #endif                  
 1487                         sc->cxgb_intr, sc, &sc->intr_tag)) {
 1488                         device_printf(sc->dev, "Cannot set up interrupt\n");
 1489                         err = EINVAL;
 1490                         goto irq_err;
 1491                 }
 1492         } else {
 1493                 cxgb_setup_msix(sc, sc->msi_count);
 1494         }
 1495 
 1496         t3_sge_start(sc);
 1497         t3_intr_enable(sc);
 1498 
 1499         if (!(sc->flags & QUEUES_BOUND)) {
 1500                 printf("bind qsets\n");
 1501                 bind_qsets(sc);
 1502                 sc->flags |= QUEUES_BOUND;              
 1503         }
 1504 out:
 1505         return (err);
 1506 irq_err:
 1507         CH_ERR(sc, "request_irq failed, err %d\n", err);
 1508         goto out;
 1509 }
 1510 
 1511 
 1512 /*
 1513  * Release resources when all the ports and offloading have been stopped.
 1514  */
 1515 static void
 1516 cxgb_down_locked(struct adapter *sc)
 1517 {
 1518         int i;
 1519         
 1520         t3_sge_stop(sc);
 1521         t3_intr_disable(sc);
 1522         
 1523         if (sc->intr_tag != NULL) {
 1524                 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
 1525                 sc->intr_tag = NULL;
 1526         }
 1527         if (sc->irq_res != NULL) {
 1528                 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
 1529                     sc->irq_rid, sc->irq_res);
 1530                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
 1531                     sc->irq_res);
 1532                 sc->irq_res = NULL;
 1533         }
 1534         
 1535         if (sc->flags & USING_MSIX)
 1536                 cxgb_teardown_msix(sc);
 1537         ADAPTER_UNLOCK(sc);
 1538 
 1539         callout_drain(&sc->cxgb_tick_ch);
 1540         callout_drain(&sc->sge_timer_ch);
 1541         
 1542         if (sc->tq != NULL) {
 1543                 taskqueue_drain(sc->tq, &sc->slow_intr_task);
 1544                 for (i = 0; i < sc->params.nports; i++) 
 1545                         taskqueue_drain(sc->tq, &sc->port[i].timer_reclaim_task);
 1546         }
 1547 #ifdef notyet   
 1548 
 1549                 if (sc->port[i].tq != NULL)
 1550 #endif                  
 1551 
 1552 }
 1553 
 1554 static int
 1555 offload_open(struct port_info *pi)
 1556 {
 1557         struct adapter *adapter = pi->adapter;
 1558         struct toedev *tdev = TOEDEV(pi->ifp);
 1559         int adap_up = adapter->open_device_map & PORT_MASK;
 1560         int err = 0;
 1561 
 1562         if (atomic_cmpset_int(&adapter->open_device_map,
 1563                 (adapter->open_device_map & ~OFFLOAD_DEVMAP_BIT),
 1564                 (adapter->open_device_map | OFFLOAD_DEVMAP_BIT)) == 0)
 1565                 return (0);
 1566 
 1567         ADAPTER_LOCK(pi->adapter); 
 1568         if (!adap_up)
 1569                 err = cxgb_up(adapter);
 1570         ADAPTER_UNLOCK(pi->adapter);
 1571         if (err)
 1572                 return (err);
 1573 
 1574         t3_tp_set_offload_mode(adapter, 1);
 1575         tdev->lldev = adapter->port[0].ifp;
 1576         err = cxgb_offload_activate(adapter);
 1577         if (err)
 1578                 goto out;
 1579 
 1580         init_port_mtus(adapter);
 1581         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
 1582                      adapter->params.b_wnd,
 1583                      adapter->params.rev == 0 ?
 1584                        adapter->port[0].ifp->if_mtu : 0xffff);
 1585         init_smt(adapter);
 1586 
 1587         /* Call back all registered clients */
 1588         cxgb_add_clients(tdev);
 1589 
 1590 out:
 1591         /* restore them in case the offload module has changed them */
 1592         if (err) {
 1593                 t3_tp_set_offload_mode(adapter, 0);
 1594                 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
 1595                 cxgb_set_dummy_ops(tdev);
 1596         }
 1597         return (err);
 1598 }
 1599 #ifdef notyet
 1600 static int
 1601 offload_close(struct toedev *tdev)
 1602 {
 1603         struct adapter *adapter = tdev2adap(tdev);
 1604 
 1605         if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
 1606                 return (0);
 1607 
 1608         /* Call back all registered clients */
 1609         cxgb_remove_clients(tdev);
 1610         tdev->lldev = NULL;
 1611         cxgb_set_dummy_ops(tdev);
 1612         t3_tp_set_offload_mode(adapter, 0);
 1613         clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
 1614 
 1615         if (!adapter->open_device_map)
 1616                 cxgb_down(adapter);
 1617 
 1618         cxgb_offload_deactivate(adapter);
 1619         return (0);
 1620 }
 1621 #endif
 1622 
 1623 static void
 1624 cxgb_init(void *arg)
 1625 {
 1626         struct port_info *p = arg;
 1627 
 1628         PORT_LOCK(p);
 1629         cxgb_init_locked(p);
 1630         PORT_UNLOCK(p);
 1631 }
 1632 
 1633 static void
 1634 cxgb_init_locked(struct port_info *p)
 1635 {
 1636         struct ifnet *ifp;
 1637         adapter_t *sc = p->adapter;
 1638         int err;
 1639 
 1640         PORT_LOCK_ASSERT_OWNED(p);
 1641         ifp = p->ifp;
 1642 
 1643         ADAPTER_LOCK(p->adapter);
 1644         if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
 1645                 ADAPTER_UNLOCK(p->adapter);
 1646                 cxgb_stop_locked(p);
 1647                 return;
 1648         }
 1649         if (p->adapter->open_device_map == 0) {
 1650                 t3_intr_clear(sc);
 1651                 t3_sge_init_adapter(sc);
 1652         }
 1653         setbit(&p->adapter->open_device_map, p->port_id);
 1654         ADAPTER_UNLOCK(p->adapter);
 1655 
 1656         if (is_offload(sc) && !ofld_disable) {
 1657                 err = offload_open(p);
 1658                 if (err)
 1659                         log(LOG_WARNING,
 1660                             "Could not initialize offload capabilities\n");
 1661         }
 1662         cxgb_link_start(p);
 1663         t3_link_changed(sc, p->port_id);
 1664         ifp->if_baudrate = p->link_config.speed * 1000000;
 1665 
 1666         device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
 1667         t3_port_intr_enable(sc, p->port_id);
 1668 
 1669         callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
 1670             cxgb_tick, sc);
 1671 
 1672         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1673         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1674 }
 1675 
 1676 static void
 1677 cxgb_set_rxmode(struct port_info *p)
 1678 {
 1679         struct t3_rx_mode rm;
 1680         struct cmac *mac = &p->mac;
 1681 
 1682         PORT_LOCK_ASSERT_OWNED(p);
 1683 
 1684         t3_init_rx_mode(&rm, p);
 1685         t3_mac_set_rx_mode(mac, &rm);
 1686 }
 1687 
 1688 static void
 1689 cxgb_stop_locked(struct port_info *p)
 1690 {
 1691         struct ifnet *ifp;
 1692 
 1693         PORT_LOCK_ASSERT_OWNED(p);
 1694         ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
 1695         
 1696         ifp = p->ifp;
 1697 
 1698         t3_port_intr_disable(p->adapter, p->port_id);
 1699         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1700         p->phy.ops->power_down(&p->phy, 1);
 1701         t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 1702 
 1703         ADAPTER_LOCK(p->adapter);
 1704         clrbit(&p->adapter->open_device_map, p->port_id);
 1705 
 1706         
 1707         if (p->adapter->open_device_map == 0) {
 1708                 cxgb_down_locked(p->adapter);
 1709         } else 
 1710                 ADAPTER_UNLOCK(p->adapter);
 1711 
 1712 }
 1713 
 1714 static int
 1715 cxgb_set_mtu(struct port_info *p, int mtu)
 1716 {
 1717         struct ifnet *ifp = p->ifp;
 1718         int error = 0;
 1719         
 1720         if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
 1721                 error = EINVAL;
 1722         else if (ifp->if_mtu != mtu) {
 1723                 PORT_LOCK(p);
 1724                 ifp->if_mtu = mtu;
 1725                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1726                         callout_stop(&p->adapter->cxgb_tick_ch);
 1727                         cxgb_stop_locked(p);
 1728                         cxgb_init_locked(p);
 1729                 }
 1730                 PORT_UNLOCK(p);
 1731         }
 1732         return (error);
 1733 }
 1734 
 1735 static int
 1736 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
 1737 {
 1738         struct port_info *p = ifp->if_softc;
 1739         struct ifaddr *ifa = (struct ifaddr *)data;
 1740         struct ifreq *ifr = (struct ifreq *)data;
 1741         int flags, error = 0;
 1742         uint32_t mask;
 1743 
 1744         /* 
 1745          * XXX need to check that we aren't in the middle of an unload
 1746          */
 1747         switch (command) {
 1748         case SIOCSIFMTU:
 1749                 error = cxgb_set_mtu(p, ifr->ifr_mtu);
 1750                 break;
 1751         case SIOCSIFADDR:
 1752         case SIOCGIFADDR:
 1753                 PORT_LOCK(p);
 1754                 if (ifa->ifa_addr->sa_family == AF_INET) {
 1755                         ifp->if_flags |= IFF_UP;
 1756                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 
 1757                                 cxgb_init_locked(p);
 1758                         arp_ifinit(ifp, ifa);
 1759                 } else
 1760                         error = ether_ioctl(ifp, command, data);
 1761                 PORT_UNLOCK(p);
 1762                 break;
 1763         case SIOCSIFFLAGS:
 1764                 callout_drain(&p->adapter->cxgb_tick_ch);
 1765                 PORT_LOCK(p);
 1766                 if (ifp->if_flags & IFF_UP) {
 1767                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1768                                 flags = p->if_flags;
 1769                                 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
 1770                                     ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
 1771                                         cxgb_set_rxmode(p);
 1772                         } else
 1773                                 cxgb_init_locked(p);
 1774                         p->if_flags = ifp->if_flags;
 1775                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1776                         cxgb_stop_locked(p);
 1777                                 
 1778                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1779                         adapter_t *sc = p->adapter;
 1780                         callout_reset(&sc->cxgb_tick_ch,
 1781                             sc->params.stats_update_period * hz,
 1782                             cxgb_tick, sc);
 1783                 }
 1784                 PORT_UNLOCK(p);
 1785                 break;
 1786         case SIOCSIFMEDIA:
 1787         case SIOCGIFMEDIA:
 1788                 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
 1789                 break;
 1790         case SIOCSIFCAP:
 1791                 PORT_LOCK(p);
 1792                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1793                 if (mask & IFCAP_TXCSUM) {
 1794                         if (IFCAP_TXCSUM & ifp->if_capenable) {
 1795                                 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
 1796                                 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
 1797                                     | CSUM_TSO);
 1798                         } else {
 1799                                 ifp->if_capenable |= IFCAP_TXCSUM;
 1800                                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
 1801                         }
 1802                 } else if (mask & IFCAP_RXCSUM) {
 1803                         if (IFCAP_RXCSUM & ifp->if_capenable) {
 1804                                 ifp->if_capenable &= ~IFCAP_RXCSUM;
 1805                         } else {
 1806                                 ifp->if_capenable |= IFCAP_RXCSUM;
 1807                         }
 1808                 }
 1809                 if (mask & IFCAP_TSO4) {
 1810                         if (IFCAP_TSO4 & ifp->if_capenable) {
 1811                                 ifp->if_capenable &= ~IFCAP_TSO4;
 1812                                 ifp->if_hwassist &= ~CSUM_TSO;
 1813                         } else if (IFCAP_TXCSUM & ifp->if_capenable) {
 1814                                 ifp->if_capenable |= IFCAP_TSO4;
 1815                                 ifp->if_hwassist |= CSUM_TSO;
 1816                         } else {
 1817                                 if (cxgb_debug)
 1818                                         printf("cxgb requires tx checksum offload"
 1819                                             " be enabled to use TSO\n");
 1820                                 error = EINVAL;
 1821                         }
 1822                 }
 1823                 PORT_UNLOCK(p);
 1824                 break;
 1825         default:
 1826                 error = ether_ioctl(ifp, command, data);
 1827                 break;
 1828         }
 1829         return (error);
 1830 }
 1831 
 1832 static int
 1833 cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
 1834 {
 1835         struct sge_qset *qs;
 1836         struct sge_txq *txq;
 1837         struct port_info *p = ifp->if_softc;
 1838         struct mbuf *m = NULL;
 1839         int err, in_use_init, free;
 1840 
 1841         if (!p->link_config.link_ok)
 1842                 return (ENXIO);
 1843 
 1844         if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1845                 return (ENOBUFS);
 1846 
 1847         qs = &p->adapter->sge.qs[p->first_qset];
 1848         txq = &qs->txq[TXQ_ETH];
 1849         err = 0;
 1850 
 1851         if (txq->flags & TXQ_TRANSMITTING)
 1852                 return (EINPROGRESS);
 1853         
 1854         mtx_lock(&txq->lock);
 1855         txq->flags |= TXQ_TRANSMITTING;
 1856         in_use_init = txq->in_use;
 1857         while ((txq->in_use - in_use_init < txmax) &&
 1858             (txq->size > txq->in_use + TX_MAX_DESC)) {
 1859                 free = 0;
 1860                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1861                 if (m == NULL)
 1862                         break;
 1863                 /*
 1864                  * Convert chain to M_IOVEC
 1865                  */
 1866                 KASSERT((m->m_flags & M_IOVEC) == 0, ("IOVEC set too early"));
 1867 #ifdef notyet
 1868                 m0 = m;
 1869                 if (collapse_mbufs && m->m_pkthdr.len > MCLBYTES &&
 1870                     m_collapse(m, TX_MAX_SEGS, &m0) == EFBIG) {
 1871                         if ((m0 = m_defrag(m, M_NOWAIT)) != NULL) {
 1872                                 m = m0;
 1873                                 m_collapse(m, TX_MAX_SEGS, &m0);
 1874                         } else
 1875                                 break;
 1876                 }
 1877                 m = m0;
 1878 #endif          
 1879                 if ((err = t3_encap(p, &m, &free)) != 0)
 1880                         break;
 1881                 BPF_MTAP(ifp, m);
 1882                 if (free)
 1883                         m_freem(m);
 1884         }
 1885         txq->flags &= ~TXQ_TRANSMITTING;
 1886         mtx_unlock(&txq->lock);
 1887 
 1888         if (__predict_false(err)) {
 1889                 if (err == ENOMEM) {
 1890                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1891                         IFQ_LOCK(&ifp->if_snd);
 1892                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1893                         IFQ_UNLOCK(&ifp->if_snd);
 1894                 }
 1895         }
 1896         if (err == 0 && m == NULL) 
 1897                 err = ENOBUFS;
 1898         else if ((err == 0) &&  (txq->size <= txq->in_use + TX_MAX_DESC) &&
 1899             (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
 1900                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1901                 err = ENOSPC;
 1902         }
 1903         return (err);
 1904 }
 1905 
 1906 static void
 1907 cxgb_start_proc(void *arg, int ncount)
 1908 {
 1909         struct ifnet *ifp = arg;
 1910         struct port_info *pi = ifp->if_softc;   
 1911         struct sge_qset *qs;
 1912         struct sge_txq *txq;
 1913         int error;
 1914 
 1915         qs = &pi->adapter->sge.qs[pi->first_qset];
 1916         txq = &qs->txq[TXQ_ETH];
 1917 
 1918         do {
 1919                 if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
 1920                         taskqueue_enqueue(pi->tq, &txq->qreclaim_task);
 1921 
 1922                 error = cxgb_start_tx(ifp, TX_START_MAX_DESC);
 1923         } while (error == 0);
 1924 }
 1925 
 1926 static void
 1927 cxgb_start(struct ifnet *ifp)
 1928 {
 1929         struct port_info *pi = ifp->if_softc;   
 1930         struct sge_qset *qs;
 1931         struct sge_txq *txq;
 1932         int err;
 1933 
 1934         qs = &pi->adapter->sge.qs[pi->first_qset];
 1935         txq = &qs->txq[TXQ_ETH];
 1936         
 1937         if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
 1938                 taskqueue_enqueue(pi->tq,
 1939                     &txq->qreclaim_task);
 1940         
 1941         err = cxgb_start_tx(ifp, TX_START_MAX_DESC);
 1942         
 1943         if (err == 0)
 1944                 taskqueue_enqueue(pi->tq, &pi->start_task);
 1945 }
 1946 
 1947 
 1948 static int
 1949 cxgb_media_change(struct ifnet *ifp)
 1950 {
 1951         if_printf(ifp, "media change not supported\n");
 1952         return (ENXIO);
 1953 }
 1954 
 1955 static void
 1956 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 1957 {
 1958         struct port_info *p = ifp->if_softc;
 1959 
 1960         ifmr->ifm_status = IFM_AVALID;
 1961         ifmr->ifm_active = IFM_ETHER;
 1962 
 1963         if (!p->link_config.link_ok)
 1964                 return;
 1965 
 1966         ifmr->ifm_status |= IFM_ACTIVE;
 1967 
 1968         switch (p->link_config.speed) {
 1969         case 10:
 1970                 ifmr->ifm_active |= IFM_10_T;
 1971                 break;
 1972         case 100:
 1973                 ifmr->ifm_active |= IFM_100_TX;
 1974                         break;
 1975         case 1000:
 1976                 ifmr->ifm_active |= IFM_1000_T;
 1977                 break;
 1978         }
 1979         
 1980         if (p->link_config.duplex)
 1981                 ifmr->ifm_active |= IFM_FDX;
 1982         else
 1983                 ifmr->ifm_active |= IFM_HDX;
 1984 }
 1985 
 1986 static void
 1987 cxgb_async_intr(void *data)
 1988 {
 1989         adapter_t *sc = data;
 1990 
 1991         if (cxgb_debug)
 1992                 device_printf(sc->dev, "cxgb_async_intr\n");
 1993         /*
 1994          * May need to sleep - defer to taskqueue
 1995          */
 1996         taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
 1997 }
 1998 
 1999 static void
 2000 cxgb_ext_intr_handler(void *arg, int count)
 2001 {
 2002         adapter_t *sc = (adapter_t *)arg;
 2003 
 2004         if (cxgb_debug)
 2005                 printf("cxgb_ext_intr_handler\n");
 2006 
 2007         t3_phy_intr_handler(sc);
 2008 
 2009         /* Now reenable external interrupts */
 2010         ADAPTER_LOCK(sc);
 2011         if (sc->slow_intr_mask) {
 2012                 sc->slow_intr_mask |= F_T3DBG;
 2013                 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
 2014                 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
 2015         }
 2016         ADAPTER_UNLOCK(sc);
 2017 }
 2018 
 2019 static void
 2020 check_link_status(adapter_t *sc)
 2021 {
 2022         int i;
 2023 
 2024         for (i = 0; i < (sc)->params.nports; ++i) {
 2025                 struct port_info *p = &sc->port[i];
 2026 
 2027                 if (!(p->port_type->caps & SUPPORTED_IRQ)) 
 2028                         t3_link_changed(sc, i);
 2029                 p->ifp->if_baudrate = p->link_config.speed * 1000000;
 2030         }
 2031 }
 2032 
 2033 static void
 2034 check_t3b2_mac(struct adapter *adapter)
 2035 {
 2036         int i;
 2037 
 2038         for_each_port(adapter, i) {
 2039                 struct port_info *p = &adapter->port[i];
 2040                 struct ifnet *ifp = p->ifp;
 2041                 int status;
 2042 
 2043                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 
 2044                         continue;
 2045                 
 2046                 status = 0;
 2047                 PORT_LOCK(p);
 2048                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) 
 2049                         status = t3b2_mac_watchdog_task(&p->mac);
 2050                 if (status == 1)
 2051                         p->mac.stats.num_toggled++;
 2052                 else if (status == 2) {
 2053                         struct cmac *mac = &p->mac;
 2054 
 2055                         t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
 2056                             + ETHER_VLAN_ENCAP_LEN);
 2057                         t3_mac_set_address(mac, 0, p->hw_addr);
 2058                         cxgb_set_rxmode(p);
 2059                         t3_link_start(&p->phy, mac, &p->link_config);
 2060                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 2061                         t3_port_intr_enable(adapter, p->port_id);
 2062                         p->mac.stats.num_resets++;
 2063                 }
 2064                 PORT_UNLOCK(p);
 2065         }
 2066 }
 2067 
 2068 static void
 2069 cxgb_tick(void *arg)
 2070 {
 2071         adapter_t *sc = (adapter_t *)arg;
 2072 
 2073         taskqueue_enqueue(sc->tq, &sc->tick_task);
 2074         
 2075         if (sc->open_device_map != 0) 
 2076                 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
 2077                     cxgb_tick, sc);
 2078 }
 2079 
 2080 static void
 2081 cxgb_tick_handler(void *arg, int count)
 2082 {
 2083         adapter_t *sc = (adapter_t *)arg;
 2084         const struct adapter_params *p = &sc->params;
 2085 
 2086         ADAPTER_LOCK(sc);
 2087         if (p->linkpoll_period)
 2088                 check_link_status(sc);
 2089 
 2090         /*
 2091          * adapter lock can currently only be acquire after the
 2092          * port lock
 2093          */
 2094         ADAPTER_UNLOCK(sc);
 2095 
 2096         if (p->rev == T3_REV_B2 && p->nports < 4) 
 2097                 check_t3b2_mac(sc);
 2098 }
 2099 
 2100 static void
 2101 touch_bars(device_t dev)
 2102 {
 2103         /*
 2104          * Don't enable yet
 2105          */
 2106 #if !defined(__LP64__) && 0
 2107         u32 v;
 2108 
 2109         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
 2110         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
 2111         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
 2112         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
 2113         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
 2114         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
 2115 #endif
 2116 }
 2117 
 2118 static int
 2119 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
 2120 {
 2121         uint8_t *buf;
 2122         int err = 0;
 2123         u32 aligned_offset, aligned_len, *p;
 2124         struct adapter *adapter = pi->adapter;
 2125 
 2126 
 2127         aligned_offset = offset & ~3;
 2128         aligned_len = (len + (offset & 3) + 3) & ~3;
 2129 
 2130         if (aligned_offset != offset || aligned_len != len) {
 2131                 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);              
 2132                 if (!buf)
 2133                         return (ENOMEM);
 2134                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
 2135                 if (!err && aligned_len > 4)
 2136                         err = t3_seeprom_read(adapter,
 2137                                               aligned_offset + aligned_len - 4,
 2138                                               (u32 *)&buf[aligned_len - 4]);
 2139                 if (err)
 2140                         goto out;
 2141                 memcpy(buf + (offset & 3), data, len);
 2142         } else
 2143                 buf = (uint8_t *)(uintptr_t)data;
 2144 
 2145         err = t3_seeprom_wp(adapter, 0);
 2146         if (err)
 2147                 goto out;
 2148 
 2149         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
 2150                 err = t3_seeprom_write(adapter, aligned_offset, *p);
 2151                 aligned_offset += 4;
 2152         }
 2153 
 2154         if (!err)
 2155                 err = t3_seeprom_wp(adapter, 1);
 2156 out:
 2157         if (buf != data)
 2158                 free(buf, M_DEVBUF);
 2159         return err;
 2160 }
 2161 
 2162 
 2163 static int
 2164 in_range(int val, int lo, int hi)
 2165 {
 2166         return val < 0 || (val <= hi && val >= lo);
 2167 }
 2168 
 2169 static int
 2170 cxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
 2171 {
 2172        return (0);
 2173 }
 2174 
 2175 static int
 2176 cxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
 2177 {
 2178        return (0);
 2179 }
 2180 
 2181 static int
 2182 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
 2183     int fflag, struct thread *td)
 2184 {
 2185         int mmd, error = 0;
 2186         struct port_info *pi = dev->si_drv1;
 2187         adapter_t *sc = pi->adapter;
 2188 
 2189 #ifdef PRIV_SUPPORTED   
 2190         if (priv_check(td, PRIV_DRIVER)) {
 2191                 if (cxgb_debug) 
 2192                         printf("user does not have access to privileged ioctls\n");
 2193                 return (EPERM);
 2194         }
 2195 #else
 2196         if (suser(td)) {
 2197                 if (cxgb_debug)
 2198                         printf("user does not have access to privileged ioctls\n");
 2199                 return (EPERM);
 2200         }
 2201 #endif
 2202         
 2203         switch (cmd) {
 2204         case SIOCGMIIREG: {
 2205                 uint32_t val;
 2206                 struct cphy *phy = &pi->phy;
 2207                 struct mii_data *mid = (struct mii_data *)data;
 2208                 
 2209                 if (!phy->mdio_read)
 2210                         return (EOPNOTSUPP);
 2211                 if (is_10G(sc)) {
 2212                         mmd = mid->phy_id >> 8;
 2213                         if (!mmd)
 2214                                 mmd = MDIO_DEV_PCS;
 2215                         else if (mmd > MDIO_DEV_XGXS)
 2216                                 return (EINVAL);
 2217 
 2218                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
 2219                                              mid->reg_num, &val);
 2220                 } else
 2221                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
 2222                                              mid->reg_num & 0x1f, &val);
 2223                 if (error == 0)
 2224                         mid->val_out = val;
 2225                 break;
 2226         }
 2227         case SIOCSMIIREG: {
 2228                 struct cphy *phy = &pi->phy;
 2229                 struct mii_data *mid = (struct mii_data *)data;
 2230 
 2231                 if (!phy->mdio_write)
 2232                         return (EOPNOTSUPP);
 2233                 if (is_10G(sc)) {
 2234                         mmd = mid->phy_id >> 8;
 2235                         if (!mmd)
 2236                                 mmd = MDIO_DEV_PCS;
 2237                         else if (mmd > MDIO_DEV_XGXS)
 2238                                 return (EINVAL);
 2239                         
 2240                         error = phy->mdio_write(sc, mid->phy_id & 0x1f,
 2241                                               mmd, mid->reg_num, mid->val_in);
 2242                 } else
 2243                         error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
 2244                                               mid->reg_num & 0x1f,
 2245                                               mid->val_in);
 2246                 break;
 2247         }
 2248         case CHELSIO_SETREG: {
 2249                 struct ch_reg *edata = (struct ch_reg *)data;
 2250                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2251                         return (EFAULT);
 2252                 t3_write_reg(sc, edata->addr, edata->val);
 2253                 break;
 2254         }
 2255         case CHELSIO_GETREG: {
 2256                 struct ch_reg *edata = (struct ch_reg *)data;
 2257                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2258                         return (EFAULT);
 2259                 edata->val = t3_read_reg(sc, edata->addr);
 2260                 break;
 2261         }
 2262         case CHELSIO_GET_SGE_CONTEXT: {
 2263                 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
 2264                 mtx_lock(&sc->sge.reg_lock);
 2265                 switch (ecntxt->cntxt_type) {
 2266                 case CNTXT_TYPE_EGRESS:
 2267                         error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
 2268                             ecntxt->data);
 2269                         break;
 2270                 case CNTXT_TYPE_FL:
 2271                         error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
 2272                             ecntxt->data);
 2273                         break;
 2274                 case CNTXT_TYPE_RSP:
 2275                         error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
 2276                             ecntxt->data);
 2277                         break;
 2278                 case CNTXT_TYPE_CQ:
 2279                         error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
 2280                             ecntxt->data);
 2281                         break;
 2282                 default:
 2283                         error = EINVAL;
 2284                         break;
 2285                 }
 2286                 mtx_unlock(&sc->sge.reg_lock);
 2287                 break;
 2288         }
 2289         case CHELSIO_GET_SGE_DESC: {
 2290                 struct ch_desc *edesc = (struct ch_desc *)data;
 2291                 int ret;
 2292                 if (edesc->queue_num >= SGE_QSETS * 6)
 2293                         return (EINVAL);
 2294                 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
 2295                     edesc->queue_num % 6, edesc->idx, edesc->data);
 2296                 if (ret < 0)
 2297                         return (EINVAL);
 2298                 edesc->size = ret;
 2299                 break;
 2300         }
 2301         case CHELSIO_SET_QSET_PARAMS: {
 2302                 struct qset_params *q;
 2303                 struct ch_qset_params *t = (struct ch_qset_params *)data;
 2304                 
 2305                 if (t->qset_idx >= SGE_QSETS)
 2306                         return (EINVAL);
 2307                 if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
 2308                     !in_range(t->cong_thres, 0, 255) ||
 2309                     !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
 2310                               MAX_TXQ_ENTRIES) ||
 2311                     !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
 2312                               MAX_TXQ_ENTRIES) ||
 2313                     !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
 2314                               MAX_CTRL_TXQ_ENTRIES) ||
 2315                     !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
 2316                     !in_range(t->fl_size[1], MIN_FL_ENTRIES,
 2317                               MAX_RX_JUMBO_BUFFERS) ||
 2318                     !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
 2319                         return (EINVAL);
 2320                 if ((sc->flags & FULL_INIT_DONE) &&
 2321                     (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
 2322                      t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
 2323                      t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
 2324                      t->polling >= 0 || t->cong_thres >= 0))
 2325                         return (EBUSY);
 2326 
 2327                 q = &sc->params.sge.qset[t->qset_idx];
 2328 
 2329                 if (t->rspq_size >= 0)
 2330                         q->rspq_size = t->rspq_size;
 2331                 if (t->fl_size[0] >= 0)
 2332                         q->fl_size = t->fl_size[0];
 2333                 if (t->fl_size[1] >= 0)
 2334                         q->jumbo_size = t->fl_size[1];
 2335                 if (t->txq_size[0] >= 0)
 2336                         q->txq_size[0] = t->txq_size[0];
 2337                 if (t->txq_size[1] >= 0)
 2338                         q->txq_size[1] = t->txq_size[1];
 2339                 if (t->txq_size[2] >= 0)
 2340                         q->txq_size[2] = t->txq_size[2];
 2341                 if (t->cong_thres >= 0)
 2342                         q->cong_thres = t->cong_thres;
 2343                 if (t->intr_lat >= 0) {
 2344                         struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
 2345 
 2346                         q->coalesce_nsecs = t->intr_lat*1000;
 2347                         t3_update_qset_coalesce(qs, q);
 2348                 }
 2349                 break;
 2350         }
 2351         case CHELSIO_GET_QSET_PARAMS: {
 2352                 struct qset_params *q;
 2353                 struct ch_qset_params *t = (struct ch_qset_params *)data;
 2354 
 2355                 if (t->qset_idx >= SGE_QSETS)
 2356                         return (EINVAL);
 2357 
 2358                 q = &(sc)->params.sge.qset[t->qset_idx];
 2359                 t->rspq_size   = q->rspq_size;
 2360                 t->txq_size[0] = q->txq_size[0];
 2361                 t->txq_size[1] = q->txq_size[1];
 2362                 t->txq_size[2] = q->txq_size[2];
 2363                 t->fl_size[0]  = q->fl_size;
 2364                 t->fl_size[1]  = q->jumbo_size;
 2365                 t->polling     = q->polling;
 2366                 t->intr_lat    = q->coalesce_nsecs / 1000;
 2367                 t->cong_thres  = q->cong_thres;
 2368                 break;
 2369         }
 2370         case CHELSIO_SET_QSET_NUM: {
 2371                 struct ch_reg *edata = (struct ch_reg *)data;
 2372                 unsigned int port_idx = pi->port_id;
 2373                 
 2374                 if (sc->flags & FULL_INIT_DONE)
 2375                         return (EBUSY);
 2376                 if (edata->val < 1 ||
 2377                     (edata->val > 1 && !(sc->flags & USING_MSIX)))
 2378                         return (EINVAL);
 2379                 if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
 2380                         return (EINVAL);
 2381                 sc->port[port_idx].nqsets = edata->val;
 2382                 sc->port[0].first_qset = 0;
 2383                 /*
 2384                  * XXX hardcode ourselves to 2 ports just like LEEENUX
 2385                  */
 2386                 sc->port[1].first_qset = sc->port[0].nqsets;
 2387                 break;
 2388         }
 2389         case CHELSIO_GET_QSET_NUM: {
 2390                 struct ch_reg *edata = (struct ch_reg *)data;
 2391                 edata->val = pi->nqsets;
 2392                 break;
 2393         }
 2394 #ifdef notyet           
 2395         case CHELSIO_LOAD_FW:
 2396         case CHELSIO_GET_PM:
 2397         case CHELSIO_SET_PM:
 2398                 return (EOPNOTSUPP);
 2399                 break;
 2400 #endif          
 2401         case CHELSIO_SETMTUTAB: {
 2402                 struct ch_mtus *m = (struct ch_mtus *)data;
 2403                 int i;
 2404                 
 2405                 if (!is_offload(sc))
 2406                         return (EOPNOTSUPP);
 2407                 if (offload_running(sc))
 2408                         return (EBUSY);
 2409                 if (m->nmtus != NMTUS)
 2410                         return (EINVAL);
 2411                 if (m->mtus[0] < 81)         /* accommodate SACK */
 2412                         return (EINVAL);
 2413                 
 2414                 /*
 2415                  * MTUs must be in ascending order
 2416                  */
 2417                 for (i = 1; i < NMTUS; ++i)
 2418                         if (m->mtus[i] < m->mtus[i - 1])
 2419                                 return (EINVAL);
 2420 
 2421                 memcpy(sc->params.mtus, m->mtus,
 2422                        sizeof(sc->params.mtus));
 2423                 break;
 2424         }
 2425         case CHELSIO_GETMTUTAB: {
 2426                 struct ch_mtus *m = (struct ch_mtus *)data;
 2427 
 2428                 if (!is_offload(sc))
 2429                         return (EOPNOTSUPP);
 2430 
 2431                 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
 2432                 m->nmtus = NMTUS;
 2433                 break;
 2434         }
 2435         case CHELSIO_DEVUP:
 2436                 if (!is_offload(sc))
 2437                         return (EOPNOTSUPP);
 2438                 return offload_open(pi);
 2439                 break;
 2440         case CHELSIO_GET_MEM: {
 2441                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2442                 struct mc7 *mem;
 2443                 uint8_t *useraddr;
 2444                 u64 buf[32];
 2445                 
 2446                 if (!is_offload(sc))
 2447                         return (EOPNOTSUPP);
 2448                 if (!(sc->flags & FULL_INIT_DONE))
 2449                         return (EIO);         /* need the memory controllers */
 2450                 if ((t->addr & 0x7) || (t->len & 0x7))
 2451                         return (EINVAL);
 2452                 if (t->mem_id == MEM_CM)
 2453                         mem = &sc->cm;
 2454                 else if (t->mem_id == MEM_PMRX)
 2455                         mem = &sc->pmrx;
 2456                 else if (t->mem_id == MEM_PMTX)
 2457                         mem = &sc->pmtx;
 2458                 else
 2459                         return (EINVAL);
 2460 
 2461                 /*
 2462                  * Version scheme:
 2463                  * bits 0..9: chip version
 2464                  * bits 10..15: chip revision
 2465                  */
 2466                 t->version = 3 | (sc->params.rev << 10);
 2467                 
 2468                 /*
 2469                  * Read 256 bytes at a time as len can be large and we don't
 2470                  * want to use huge intermediate buffers.
 2471                  */
 2472                 useraddr = (uint8_t *)(t + 1);   /* advance to start of buffer */
 2473                 while (t->len) {
 2474                         unsigned int chunk = min(t->len, sizeof(buf));
 2475 
 2476                         error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
 2477                         if (error)
 2478                                 return (-error);
 2479                         if (copyout(buf, useraddr, chunk))
 2480                                 return (EFAULT);
 2481                         useraddr += chunk;
 2482                         t->addr += chunk;
 2483                         t->len -= chunk;
 2484                 }
 2485                 break;
 2486         }
 2487         case CHELSIO_READ_TCAM_WORD: {
 2488                 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
 2489 
 2490                 if (!is_offload(sc))
 2491                         return (EOPNOTSUPP);
 2492                 if (!(sc->flags & FULL_INIT_DONE))
 2493                         return (EIO);         /* need MC5 */            
 2494                 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
 2495                 break;
 2496         }
 2497         case CHELSIO_SET_TRACE_FILTER: {
 2498                 struct ch_trace *t = (struct ch_trace *)data;
 2499                 const struct trace_params *tp;
 2500 
 2501                 tp = (const struct trace_params *)&t->sip;
 2502                 if (t->config_tx)
 2503                         t3_config_trace_filter(sc, tp, 0, t->invert_match,
 2504                                                t->trace_tx);
 2505                 if (t->config_rx)
 2506                         t3_config_trace_filter(sc, tp, 1, t->invert_match,
 2507                                                t->trace_rx);
 2508                 break;
 2509         }
 2510         case CHELSIO_SET_PKTSCHED: {
 2511                 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
 2512                 if (sc->open_device_map == 0)
 2513                         return (EAGAIN);
 2514                 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
 2515                     p->binding);
 2516                 break;
 2517         }
 2518         case CHELSIO_IFCONF_GETREGS: {
 2519                 struct ifconf_regs *regs = (struct ifconf_regs *)data;
 2520                 int reglen = cxgb_get_regs_len();
 2521                 uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
 2522                 if (buf == NULL) {
 2523                         return (ENOMEM);
 2524                 } if (regs->len > reglen)
 2525                         regs->len = reglen;
 2526                 else if (regs->len < reglen) {
 2527                         error = E2BIG;
 2528                         goto done;
 2529                 }
 2530                 cxgb_get_regs(sc, regs, buf);
 2531                 error = copyout(buf, regs->data, reglen);
 2532                 
 2533                 done:
 2534                 free(buf, M_DEVBUF);
 2535 
 2536                 break;
 2537         }
 2538         case CHELSIO_SET_HW_SCHED: {
 2539                 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
 2540                 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
 2541 
 2542                 if ((sc->flags & FULL_INIT_DONE) == 0)
 2543                         return (EAGAIN);       /* need TP to be initialized */
 2544                 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
 2545                     !in_range(t->channel, 0, 1) ||
 2546                     !in_range(t->kbps, 0, 10000000) ||
 2547                     !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
 2548                     !in_range(t->flow_ipg, 0,
 2549                               dack_ticks_to_usec(sc, 0x7ff)))
 2550                         return (EINVAL);
 2551 
 2552                 if (t->kbps >= 0) {
 2553                         error = t3_config_sched(sc, t->kbps, t->sched);
 2554                         if (error < 0)
 2555                                 return (-error);
 2556                 }
 2557                 if (t->class_ipg >= 0)
 2558                         t3_set_sched_ipg(sc, t->sched, t->class_ipg);
 2559                 if (t->flow_ipg >= 0) {
 2560                         t->flow_ipg *= 1000;     /* us -> ns */
 2561                         t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
 2562                 }
 2563                 if (t->mode >= 0) {
 2564                         int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
 2565 
 2566                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 2567                                          bit, t->mode ? bit : 0);
 2568                 }
 2569                 if (t->channel >= 0)
 2570                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 2571                                          1 << t->sched, t->channel << t->sched);
 2572                 break;
 2573         }       
 2574         default:
 2575                 return (EOPNOTSUPP);
 2576                 break;
 2577         }
 2578 
 2579         return (error);
 2580 }
 2581 
 2582 static __inline void
 2583 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
 2584     unsigned int end)
 2585 {
 2586         uint32_t *p = (uint32_t *)buf + start;
 2587 
 2588         for ( ; start <= end; start += sizeof(uint32_t))
 2589                 *p++ = t3_read_reg(ap, start);
 2590 }
 2591 
 2592 #define T3_REGMAP_SIZE (3 * 1024)
 2593 static int
 2594 cxgb_get_regs_len(void)
 2595 {
 2596         return T3_REGMAP_SIZE;
 2597 }
 2598 #undef T3_REGMAP_SIZE
 2599 
 2600 static void
 2601 cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
 2602 {           
 2603         
 2604         /*
 2605          * Version scheme:
 2606          * bits 0..9: chip version
 2607          * bits 10..15: chip revision
 2608          * bit 31: set for PCIe cards
 2609          */
 2610         regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
 2611 
 2612         /*
 2613          * We skip the MAC statistics registers because they are clear-on-read.
 2614          * Also reading multi-register stats would need to synchronize with the
 2615          * periodic mac stats accumulation.  Hard to justify the complexity.
 2616          */
 2617         memset(buf, 0, REGDUMP_SIZE);
 2618         reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
 2619         reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
 2620         reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
 2621         reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
 2622         reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
 2623         reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
 2624                        XGM_REG(A_XGM_SERDES_STAT3, 1));
 2625         reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
 2626                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
 2627 }

Cache object: c4b74a24de77912b8a10d1787d7a0d59


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.