The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_main.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007-2009, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15 
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/7.3/sys/dev/cxgb/cxgb_main.c 202875 2010-01-23 08:43:11Z np $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/bus.h>
   37 #include <sys/module.h>
   38 #include <sys/pciio.h>
   39 #include <sys/conf.h>
   40 #include <machine/bus.h>
   41 #include <machine/resource.h>
   42 #include <sys/bus_dma.h>
   43 #include <sys/ktr.h>
   44 #include <sys/rman.h>
   45 #include <sys/ioccom.h>
   46 #include <sys/mbuf.h>
   47 #include <sys/linker.h>
   48 #include <sys/firmware.h>
   49 #include <sys/socket.h>
   50 #include <sys/sockio.h>
   51 #include <sys/smp.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/syslog.h>
   54 #include <sys/queue.h>
   55 #include <sys/taskqueue.h>
   56 #include <sys/proc.h>
   57 
   58 #include <net/bpf.h>
   59 #include <net/ethernet.h>
   60 #include <net/if.h>
   61 #include <net/if_arp.h>
   62 #include <net/if_dl.h>
   63 #include <net/if_media.h>
   64 #include <net/if_types.h>
   65 #include <net/if_vlan_var.h>
   66 
   67 #include <netinet/in_systm.h>
   68 #include <netinet/in.h>
   69 #include <netinet/if_ether.h>
   70 #include <netinet/ip.h>
   71 #include <netinet/ip.h>
   72 #include <netinet/tcp.h>
   73 #include <netinet/udp.h>
   74 
   75 #include <dev/pci/pcireg.h>
   76 #include <dev/pci/pcivar.h>
   77 #include <dev/pci/pci_private.h>
   78 
   79 #ifdef CONFIG_DEFINED
   80 #include <cxgb_include.h>
   81 #else
   82 #include <dev/cxgb/cxgb_include.h>
   83 #endif
   84 
   85 #ifdef PRIV_SUPPORTED
   86 #include <sys/priv.h>
   87 #endif
   88 
   89 static int cxgb_setup_interrupts(adapter_t *);
   90 static void cxgb_teardown_interrupts(adapter_t *);
   91 static void cxgb_init(void *);
   92 static int cxgb_init_locked(struct port_info *);
   93 static int cxgb_uninit_locked(struct port_info *);
   94 static int cxgb_uninit_synchronized(struct port_info *);
   95 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
   96 static int cxgb_media_change(struct ifnet *);
   97 static int cxgb_ifm_type(int);
   98 static void cxgb_build_medialist(struct port_info *);
   99 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
  100 static int setup_sge_qsets(adapter_t *);
  101 static void cxgb_async_intr(void *);
  102 static void cxgb_ext_intr_handler(void *, int);
  103 static void cxgb_tick_handler(void *, int);
  104 static void cxgb_tick(void *);
  105 static void setup_rss(adapter_t *sc);
  106 
  107 /* Attachment glue for the PCI controller end of the device.  Each port of
  108  * the device is attached separately, as defined later.
  109  */
  110 static int cxgb_controller_probe(device_t);
  111 static int cxgb_controller_attach(device_t);
  112 static int cxgb_controller_detach(device_t);
  113 static void cxgb_free(struct adapter *);
  114 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
  115     unsigned int end);
  116 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
  117 static int cxgb_get_regs_len(void);
  118 static int offload_open(struct port_info *pi);
  119 static void touch_bars(device_t dev);
  120 static int offload_close(struct t3cdev *tdev);
  121 static void cxgb_update_mac_settings(struct port_info *p);
  122 
  123 static device_method_t cxgb_controller_methods[] = {
  124         DEVMETHOD(device_probe,         cxgb_controller_probe),
  125         DEVMETHOD(device_attach,        cxgb_controller_attach),
  126         DEVMETHOD(device_detach,        cxgb_controller_detach),
  127 
  128         /* bus interface */
  129         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  130         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  131 
  132         { 0, 0 }
  133 };
  134 
  135 static driver_t cxgb_controller_driver = {
  136         "cxgbc",
  137         cxgb_controller_methods,
  138         sizeof(struct adapter)
  139 };
  140 
  141 static devclass_t       cxgb_controller_devclass;
  142 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
  143 
  144 /*
  145  * Attachment glue for the ports.  Attachment is done directly to the
  146  * controller device.
  147  */
  148 static int cxgb_port_probe(device_t);
  149 static int cxgb_port_attach(device_t);
  150 static int cxgb_port_detach(device_t);
  151 
  152 static device_method_t cxgb_port_methods[] = {
  153         DEVMETHOD(device_probe,         cxgb_port_probe),
  154         DEVMETHOD(device_attach,        cxgb_port_attach),
  155         DEVMETHOD(device_detach,        cxgb_port_detach),
  156         { 0, 0 }
  157 };
  158 
  159 static driver_t cxgb_port_driver = {
  160         "cxgb",
  161         cxgb_port_methods,
  162         0
  163 };
  164 
  165 static d_ioctl_t cxgb_extension_ioctl;
  166 static d_open_t cxgb_extension_open;
  167 static d_close_t cxgb_extension_close;
  168 
  169 static struct cdevsw cxgb_cdevsw = {
  170        .d_version =    D_VERSION,
  171        .d_flags =      0,
  172        .d_open =       cxgb_extension_open,
  173        .d_close =      cxgb_extension_close,
  174        .d_ioctl =      cxgb_extension_ioctl,
  175        .d_name =       "cxgb",
  176 };
  177 
  178 static devclass_t       cxgb_port_devclass;
  179 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
  180 
  181 /*
  182  * The driver uses the best interrupt scheme available on a platform in the
  183  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
  184  * of these schemes the driver may consider as follows:
  185  *
  186  * msi = 2: choose from among all three options
  187  * msi = 1 : only consider MSI and pin interrupts
  188  * msi = 0: force pin interrupts
  189  */
  190 static int msi_allowed = 2;
  191 
  192 TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
  193 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
  194 SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
  195     "MSI-X, MSI, INTx selector");
  196 
  197 /*
  198  * The driver enables offload as a default.
  199  * To disable it, use ofld_disable = 1.
  200  */
  201 static int ofld_disable = 0;
  202 TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
  203 SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
  204     "disable ULP offload");
  205 
  206 /*
  207  * The driver uses an auto-queue algorithm by default.
  208  * To disable it and force a single queue-set per port, use multiq = 0
  209  */
  210 static int multiq = 1;
  211 TUNABLE_INT("hw.cxgb.multiq", &multiq);
  212 SYSCTL_UINT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
  213     "use min(ncpus/ports, 8) queue-sets per port");
  214 
  215 /*
  216  * By default the driver will not update the firmware unless
  217  * it was compiled against a newer version
  218  * 
  219  */
  220 static int force_fw_update = 0;
  221 TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
  222 SYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
  223     "update firmware even if up to date");
  224 
  225 int cxgb_use_16k_clusters = 1;
  226 TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
  227 SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
  228     &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
  229 
  230 /*
  231  * Tune the size of the output queue.
  232  */
  233 int cxgb_snd_queue_len = IFQ_MAXLEN;
  234 TUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
  235 SYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
  236     &cxgb_snd_queue_len, 0, "send queue size ");
  237 
  238 
  239 enum {
  240         MAX_TXQ_ENTRIES      = 16384,
  241         MAX_CTRL_TXQ_ENTRIES = 1024,
  242         MAX_RSPQ_ENTRIES     = 16384,
  243         MAX_RX_BUFFERS       = 16384,
  244         MAX_RX_JUMBO_BUFFERS = 16384,
  245         MIN_TXQ_ENTRIES      = 4,
  246         MIN_CTRL_TXQ_ENTRIES = 4,
  247         MIN_RSPQ_ENTRIES     = 32,
  248         MIN_FL_ENTRIES       = 32,
  249         MIN_FL_JUMBO_ENTRIES = 32
  250 };
  251 
  252 struct filter_info {
  253         u32 sip;
  254         u32 sip_mask;
  255         u32 dip;
  256         u16 sport;
  257         u16 dport;
  258         u32 vlan:12;
  259         u32 vlan_prio:3;
  260         u32 mac_hit:1;
  261         u32 mac_idx:4;
  262         u32 mac_vld:1;
  263         u32 pkt_type:2;
  264         u32 report_filter_id:1;
  265         u32 pass:1;
  266         u32 rss:1;
  267         u32 qset:3;
  268         u32 locked:1;
  269         u32 valid:1;
  270 };
  271 
  272 enum { FILTER_NO_VLAN_PRI = 7 };
  273 
  274 #define EEPROM_MAGIC 0x38E2F10C
  275 
  276 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  277 
  278 /* Table for probing the cards.  The desc field isn't actually used */
  279 struct cxgb_ident {
  280         uint16_t        vendor;
  281         uint16_t        device;
  282         int             index;
  283         char            *desc;
  284 } cxgb_identifiers[] = {
  285         {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
  286         {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
  287         {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
  288         {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
  289         {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
  290         {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
  291         {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
  292         {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
  293         {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
  294         {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
  295         {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
  296         {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
  297         {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
  298         {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
  299         {0, 0, 0, NULL}
  300 };
  301 
  302 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
  303 
  304 
  305 static __inline char
  306 t3rev2char(struct adapter *adapter)
  307 {
  308         char rev = 'z';
  309 
  310         switch(adapter->params.rev) {
  311         case T3_REV_A:
  312                 rev = 'a';
  313                 break;
  314         case T3_REV_B:
  315         case T3_REV_B2:
  316                 rev = 'b';
  317                 break;
  318         case T3_REV_C:
  319                 rev = 'c';
  320                 break;
  321         }
  322         return rev;
  323 }
  324 
  325 static struct cxgb_ident *
  326 cxgb_get_ident(device_t dev)
  327 {
  328         struct cxgb_ident *id;
  329 
  330         for (id = cxgb_identifiers; id->desc != NULL; id++) {
  331                 if ((id->vendor == pci_get_vendor(dev)) &&
  332                     (id->device == pci_get_device(dev))) {
  333                         return (id);
  334                 }
  335         }
  336         return (NULL);
  337 }
  338 
  339 static const struct adapter_info *
  340 cxgb_get_adapter_info(device_t dev)
  341 {
  342         struct cxgb_ident *id;
  343         const struct adapter_info *ai;
  344 
  345         id = cxgb_get_ident(dev);
  346         if (id == NULL)
  347                 return (NULL);
  348 
  349         ai = t3_get_adapter_info(id->index);
  350 
  351         return (ai);
  352 }
  353 
  354 static int
  355 cxgb_controller_probe(device_t dev)
  356 {
  357         const struct adapter_info *ai;
  358         char *ports, buf[80];
  359         int nports;
  360 
  361         ai = cxgb_get_adapter_info(dev);
  362         if (ai == NULL)
  363                 return (ENXIO);
  364 
  365         nports = ai->nports0 + ai->nports1;
  366         if (nports == 1)
  367                 ports = "port";
  368         else
  369                 ports = "ports";
  370 
  371         snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
  372         device_set_desc_copy(dev, buf);
  373         return (BUS_PROBE_DEFAULT);
  374 }
  375 
  376 #define FW_FNAME "cxgb_t3fw"
  377 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
  378 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
  379 
  380 static int
  381 upgrade_fw(adapter_t *sc)
  382 {
  383 #ifdef FIRMWARE_LATEST
  384         const struct firmware *fw;
  385 #else
  386         struct firmware *fw;
  387 #endif  
  388         int status;
  389         
  390         if ((fw = firmware_get(FW_FNAME)) == NULL)  {
  391                 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
  392                 return (ENOENT);
  393         } else
  394                 device_printf(sc->dev, "updating firmware on card\n");
  395         status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
  396 
  397         device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
  398         
  399         firmware_put(fw, FIRMWARE_UNLOAD);
  400 
  401         return (status);        
  402 }
  403 
  404 /*
  405  * The cxgb_controller_attach function is responsible for the initial
  406  * bringup of the device.  Its responsibilities include:
  407  *
  408  *  1. Determine if the device supports MSI or MSI-X.
  409  *  2. Allocate bus resources so that we can access the Base Address Register
  410  *  3. Create and initialize mutexes for the controller and its control
  411  *     logic such as SGE and MDIO.
  412  *  4. Call hardware specific setup routine for the adapter as a whole.
  413  *  5. Allocate the BAR for doing MSI-X.
  414  *  6. Setup the line interrupt iff MSI-X is not supported.
  415  *  7. Create the driver's taskq.
  416  *  8. Start one task queue service thread.
  417  *  9. Check if the firmware and SRAM are up-to-date.  They will be
  418  *     auto-updated later (before FULL_INIT_DONE), if required.
  419  * 10. Create a child device for each MAC (port)
  420  * 11. Initialize T3 private state.
  421  * 12. Trigger the LED
  422  * 13. Setup offload iff supported.
  423  * 14. Reset/restart the tick callout.
  424  * 15. Attach sysctls
  425  *
  426  * NOTE: Any modification or deviation from this list MUST be reflected in
  427  * the above comment.  Failure to do so will result in problems on various
  428  * error conditions including link flapping.
  429  */
  430 static int
  431 cxgb_controller_attach(device_t dev)
  432 {
  433         device_t child;
  434         const struct adapter_info *ai;
  435         struct adapter *sc;
  436         int i, error = 0;
  437         uint32_t vers;
  438         int port_qsets = 1;
  439 #ifdef MSI_SUPPORTED
  440         int msi_needed, reg;
  441 #endif
  442         char buf[80];
  443 
  444         sc = device_get_softc(dev);
  445         sc->dev = dev;
  446         sc->msi_count = 0;
  447         ai = cxgb_get_adapter_info(dev);
  448 
  449         /*
  450          * XXX not really related but a recent addition
  451          */
  452 #ifdef MSI_SUPPORTED    
  453         /* find the PCIe link width and set max read request to 4KB*/
  454         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
  455                 uint16_t lnk, pectl;
  456                 lnk = pci_read_config(dev, reg + 0x12, 2);
  457                 sc->link_width = (lnk >> 4) & 0x3f;
  458                 
  459                 pectl = pci_read_config(dev, reg + 0x8, 2);
  460                 pectl = (pectl & ~0x7000) | (5 << 12);
  461                 pci_write_config(dev, reg + 0x8, pectl, 2);
  462         }
  463 
  464         if (sc->link_width != 0 && sc->link_width <= 4 &&
  465             (ai->nports0 + ai->nports1) <= 2) {
  466                 device_printf(sc->dev,
  467                     "PCIe x%d Link, expect reduced performance\n",
  468                     sc->link_width);
  469         }
  470 #endif
  471         touch_bars(dev);
  472         pci_enable_busmaster(dev);
  473         /*
  474          * Allocate the registers and make them available to the driver.
  475          * The registers that we care about for NIC mode are in BAR 0
  476          */
  477         sc->regs_rid = PCIR_BAR(0);
  478         if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  479             &sc->regs_rid, RF_ACTIVE)) == NULL) {
  480                 device_printf(dev, "Cannot allocate BAR region 0\n");
  481                 return (ENXIO);
  482         }
  483         sc->udbs_rid = PCIR_BAR(2);
  484         sc->udbs_res = NULL;
  485         if (is_offload(sc) &&
  486             ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  487                    &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
  488                 device_printf(dev, "Cannot allocate BAR region 1\n");
  489                 error = ENXIO;
  490                 goto out;
  491         }
  492 
  493         snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
  494             device_get_unit(dev));
  495         ADAPTER_LOCK_INIT(sc, sc->lockbuf);
  496 
  497         snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
  498             device_get_unit(dev));
  499         snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
  500             device_get_unit(dev));
  501         snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
  502             device_get_unit(dev));
  503         
  504         MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
  505         MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
  506         MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
  507         
  508         sc->bt = rman_get_bustag(sc->regs_res);
  509         sc->bh = rman_get_bushandle(sc->regs_res);
  510         sc->mmio_len = rman_get_size(sc->regs_res);
  511 
  512         for (i = 0; i < MAX_NPORTS; i++)
  513                 sc->port[i].adapter = sc;
  514 
  515         if (t3_prep_adapter(sc, ai, 1) < 0) {
  516                 printf("prep adapter failed\n");
  517                 error = ENODEV;
  518                 goto out;
  519         }
  520         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
  521          * enough messages for the queue sets.  If that fails, try falling
  522          * back to MSI.  If that fails, then try falling back to the legacy
  523          * interrupt pin model.
  524          */
  525 #ifdef MSI_SUPPORTED
  526 
  527         sc->msix_regs_rid = 0x20;
  528         if ((msi_allowed >= 2) &&
  529             (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  530             &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
  531 
  532                 if (multiq)
  533                         port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
  534                 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
  535 
  536                 if (pci_msix_count(dev) == 0 ||
  537                     (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
  538                     sc->msi_count != msi_needed) {
  539                         device_printf(dev, "alloc msix failed - "
  540                                       "msi_count=%d, msi_needed=%d, err=%d; "
  541                                       "will try MSI\n", sc->msi_count,
  542                                       msi_needed, error);
  543                         sc->msi_count = 0;
  544                         port_qsets = 1;
  545                         pci_release_msi(dev);
  546                         bus_release_resource(dev, SYS_RES_MEMORY,
  547                             sc->msix_regs_rid, sc->msix_regs_res);
  548                         sc->msix_regs_res = NULL;
  549                 } else {
  550                         sc->flags |= USING_MSIX;
  551                         sc->cxgb_intr = cxgb_async_intr;
  552                         device_printf(dev,
  553                                       "using MSI-X interrupts (%u vectors)\n",
  554                                       sc->msi_count);
  555                 }
  556         }
  557 
  558         if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
  559                 sc->msi_count = 1;
  560                 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
  561                         device_printf(dev, "alloc msi failed - "
  562                                       "err=%d; will try INTx\n", error);
  563                         sc->msi_count = 0;
  564                         port_qsets = 1;
  565                         pci_release_msi(dev);
  566                 } else {
  567                         sc->flags |= USING_MSI;
  568                         sc->cxgb_intr = t3_intr_msi;
  569                         device_printf(dev, "using MSI interrupts\n");
  570                 }
  571         }
  572 #endif
  573         if (sc->msi_count == 0) {
  574                 device_printf(dev, "using line interrupts\n");
  575                 sc->cxgb_intr = t3b_intr;
  576         }
  577 
  578         /* Create a private taskqueue thread for handling driver events */
  579 #ifdef TASKQUEUE_CURRENT        
  580         sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
  581             taskqueue_thread_enqueue, &sc->tq);
  582 #else
  583         sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
  584             taskqueue_thread_enqueue, &sc->tq);
  585 #endif  
  586         if (sc->tq == NULL) {
  587                 device_printf(dev, "failed to allocate controller task queue\n");
  588                 goto out;
  589         }
  590 
  591         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
  592             device_get_nameunit(dev));
  593         TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
  594         TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
  595 
  596         
  597         /* Create a periodic callout for checking adapter status */
  598         callout_init(&sc->cxgb_tick_ch, TRUE);
  599         
  600         if (t3_check_fw_version(sc) < 0 || force_fw_update) {
  601                 /*
  602                  * Warn user that a firmware update will be attempted in init.
  603                  */
  604                 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
  605                     FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
  606                 sc->flags &= ~FW_UPTODATE;
  607         } else {
  608                 sc->flags |= FW_UPTODATE;
  609         }
  610 
  611         if (t3_check_tpsram_version(sc) < 0) {
  612                 /*
  613                  * Warn user that a firmware update will be attempted in init.
  614                  */
  615                 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
  616                     t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
  617                 sc->flags &= ~TPS_UPTODATE;
  618         } else {
  619                 sc->flags |= TPS_UPTODATE;
  620         }
  621         
  622         /*
  623          * Create a child device for each MAC.  The ethernet attachment
  624          * will be done in these children.
  625          */     
  626         for (i = 0; i < (sc)->params.nports; i++) {
  627                 struct port_info *pi;
  628                 
  629                 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
  630                         device_printf(dev, "failed to add child port\n");
  631                         error = EINVAL;
  632                         goto out;
  633                 }
  634                 pi = &sc->port[i];
  635                 pi->adapter = sc;
  636                 pi->nqsets = port_qsets;
  637                 pi->first_qset = i*port_qsets;
  638                 pi->port_id = i;
  639                 pi->tx_chan = i >= ai->nports0;
  640                 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
  641                 sc->rxpkt_map[pi->txpkt_intf] = i;
  642                 sc->port[i].tx_chan = i >= ai->nports0;
  643                 sc->portdev[i] = child;
  644                 device_set_softc(child, pi);
  645         }
  646         if ((error = bus_generic_attach(dev)) != 0)
  647                 goto out;
  648 
  649         /* initialize sge private state */
  650         t3_sge_init_adapter(sc);
  651 
  652         t3_led_ready(sc);
  653         
  654         cxgb_offload_init();
  655         if (is_offload(sc)) {
  656                 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  657                 cxgb_adapter_ofld(sc);
  658         }
  659         error = t3_get_fw_version(sc, &vers);
  660         if (error)
  661                 goto out;
  662 
  663         snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
  664             G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
  665             G_FW_VERSION_MICRO(vers));
  666 
  667         snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
  668                  ai->desc, is_offload(sc) ? "R" : "",
  669                  sc->params.vpd.ec, sc->params.vpd.sn);
  670         device_set_desc_copy(dev, buf);
  671 
  672         snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
  673                  sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
  674                  sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
  675 
  676         device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
  677         callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
  678         t3_add_attach_sysctls(sc);
  679 out:
  680         if (error)
  681                 cxgb_free(sc);
  682 
  683         return (error);
  684 }
  685 
  686 /*
  687  * The cxgb_controller_detach routine is called with the device is
  688  * unloaded from the system.
  689  */
  690 
  691 static int
  692 cxgb_controller_detach(device_t dev)
  693 {
  694         struct adapter *sc;
  695 
  696         sc = device_get_softc(dev);
  697 
  698         cxgb_free(sc);
  699 
  700         return (0);
  701 }
  702 
  703 /*
  704  * The cxgb_free() is called by the cxgb_controller_detach() routine
  705  * to tear down the structures that were built up in
  706  * cxgb_controller_attach(), and should be the final piece of work
  707  * done when fully unloading the driver.
  708  * 
  709  *
  710  *  1. Shutting down the threads started by the cxgb_controller_attach()
  711  *     routine.
  712  *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
  713  *  3. Detaching all of the port devices created during the
  714  *     cxgb_controller_attach() routine.
  715  *  4. Removing the device children created via cxgb_controller_attach().
  716  *  5. Releasing PCI resources associated with the device.
  717  *  6. Turning off the offload support, iff it was turned on.
  718  *  7. Destroying the mutexes created in cxgb_controller_attach().
  719  *
  720  */
  721 static void
  722 cxgb_free(struct adapter *sc)
  723 {
  724         int i;
  725 
  726         ADAPTER_LOCK(sc);
  727         sc->flags |= CXGB_SHUTDOWN;
  728         ADAPTER_UNLOCK(sc);
  729 
  730         cxgb_pcpu_shutdown_threads(sc);
  731 
  732         t3_sge_deinit_sw(sc);
  733         /*
  734          * Make sure all child devices are gone.
  735          */
  736         bus_generic_detach(sc->dev);
  737         for (i = 0; i < (sc)->params.nports; i++) {
  738                 if (sc->portdev[i] &&
  739                     device_delete_child(sc->dev, sc->portdev[i]) != 0)
  740                         device_printf(sc->dev, "failed to delete child port\n");
  741         }
  742 
  743         /*
  744          * At this point, it is as if cxgb_port_detach has run on all ports, and
  745          * cxgb_down has run on the adapter.  All interrupts have been silenced,
  746          * all open devices have been closed.
  747          */
  748         KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
  749                                            __func__, sc->open_device_map));
  750         for (i = 0; i < sc->params.nports; i++) {
  751                 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
  752                                                   __func__, i));
  753         }
  754 
  755         /*
  756          * Finish off the adapter's callouts.
  757          */
  758         callout_drain(&sc->cxgb_tick_ch);
  759         callout_drain(&sc->sge_timer_ch);
  760 
  761         /*
  762          * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
  763          * sysctls are cleaned up by the kernel linker.
  764          */
  765         if (sc->flags & FULL_INIT_DONE) {
  766                 t3_free_sge_resources(sc);
  767                 sc->flags &= ~FULL_INIT_DONE;
  768         }
  769 
  770         /*
  771          * Release all interrupt resources.
  772          */
  773         cxgb_teardown_interrupts(sc);
  774 
  775 #ifdef MSI_SUPPORTED
  776         if (sc->flags & (USING_MSI | USING_MSIX)) {
  777                 device_printf(sc->dev, "releasing msi message(s)\n");
  778                 pci_release_msi(sc->dev);
  779         } else {
  780                 device_printf(sc->dev, "no msi message to release\n");
  781         }
  782 
  783         if (sc->msix_regs_res != NULL) {
  784                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
  785                     sc->msix_regs_res);
  786         }
  787 #endif
  788 
  789         /*
  790          * Free the adapter's taskqueue.
  791          */
  792         if (sc->tq != NULL) {
  793                 taskqueue_free(sc->tq);
  794                 sc->tq = NULL;
  795         }
  796         
  797         if (is_offload(sc)) {
  798                 clrbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  799                 cxgb_adapter_unofld(sc);
  800         }
  801 
  802 #ifdef notyet
  803         if (sc->flags & CXGB_OFLD_INIT)
  804                 cxgb_offload_deactivate(sc);
  805 #endif
  806         free(sc->filters, M_DEVBUF);
  807         t3_sge_free(sc);
  808 
  809         cxgb_offload_exit();
  810 
  811         if (sc->udbs_res != NULL)
  812                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
  813                     sc->udbs_res);
  814 
  815         if (sc->regs_res != NULL)
  816                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
  817                     sc->regs_res);
  818 
  819         MTX_DESTROY(&sc->mdio_lock);
  820         MTX_DESTROY(&sc->sge.reg_lock);
  821         MTX_DESTROY(&sc->elmer_lock);
  822         ADAPTER_LOCK_DEINIT(sc);
  823 }
  824 
  825 /**
  826  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
  827  *      @sc: the controller softc
  828  *
  829  *      Determines how many sets of SGE queues to use and initializes them.
  830  *      We support multiple queue sets per port if we have MSI-X, otherwise
  831  *      just one queue set per port.
  832  */
  833 static int
  834 setup_sge_qsets(adapter_t *sc)
  835 {
  836         int i, j, err, irq_idx = 0, qset_idx = 0;
  837         u_int ntxq = SGE_TXQ_PER_SET;
  838 
  839         if ((err = t3_sge_alloc(sc)) != 0) {
  840                 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
  841                 return (err);
  842         }
  843 
  844         if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
  845                 irq_idx = -1;
  846 
  847         for (i = 0; i < (sc)->params.nports; i++) {
  848                 struct port_info *pi = &sc->port[i];
  849 
  850                 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
  851                         err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
  852                             (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
  853                             &sc->params.sge.qset[qset_idx], ntxq, pi);
  854                         if (err) {
  855                                 t3_free_sge_resources(sc);
  856                                 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
  857                                     err);
  858                                 return (err);
  859                         }
  860                 }
  861         }
  862 
  863         return (0);
  864 }
  865 
  866 static void
  867 cxgb_teardown_interrupts(adapter_t *sc)
  868 {
  869         int i;
  870 
  871         for (i = 0; i < SGE_QSETS; i++) {
  872                 if (sc->msix_intr_tag[i] == NULL) {
  873 
  874                         /* Should have been setup fully or not at all */
  875                         KASSERT(sc->msix_irq_res[i] == NULL &&
  876                                 sc->msix_irq_rid[i] == 0,
  877                                 ("%s: half-done interrupt (%d).", __func__, i));
  878 
  879                         continue;
  880                 }
  881 
  882                 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
  883                                   sc->msix_intr_tag[i]);
  884                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
  885                                      sc->msix_irq_res[i]);
  886 
  887                 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
  888                 sc->msix_irq_rid[i] = 0;
  889         }
  890 
  891         if (sc->intr_tag) {
  892                 KASSERT(sc->irq_res != NULL,
  893                         ("%s: half-done interrupt.", __func__));
  894 
  895                 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
  896                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
  897                                      sc->irq_res);
  898 
  899                 sc->irq_res = sc->intr_tag = NULL;
  900                 sc->irq_rid = 0;
  901         }
  902 }
  903 
  904 static int
  905 cxgb_setup_interrupts(adapter_t *sc)
  906 {
  907         struct resource *res;
  908         void *tag;
  909         int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
  910 
  911         sc->irq_rid = intr_flag ? 1 : 0;
  912         sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
  913                                              RF_SHAREABLE | RF_ACTIVE);
  914         if (sc->irq_res == NULL) {
  915                 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
  916                               intr_flag, sc->irq_rid);
  917                 err = EINVAL;
  918                 sc->irq_rid = 0;
  919         } else {
  920                 err = bus_setup_intr(sc->dev, sc->irq_res,
  921                                      INTR_MPSAFE | INTR_TYPE_NET,
  922 #ifdef INTR_FILTERS
  923                                      NULL,
  924 #endif
  925                                      sc->cxgb_intr, sc, &sc->intr_tag);
  926 
  927                 if (err) {
  928                         device_printf(sc->dev,
  929                                       "Cannot set up interrupt (%x, %u, %d)\n",
  930                                       intr_flag, sc->irq_rid, err);
  931                         bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
  932                                              sc->irq_res);
  933                         sc->irq_res = sc->intr_tag = NULL;
  934                         sc->irq_rid = 0;
  935                 }
  936         }
  937 
  938         /* That's all for INTx or MSI */
  939         if (!(intr_flag & USING_MSIX) || err)
  940                 return (err);
  941 
  942         for (i = 0; i < sc->msi_count - 1; i++) {
  943                 rid = i + 2;
  944                 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
  945                                              RF_SHAREABLE | RF_ACTIVE);
  946                 if (res == NULL) {
  947                         device_printf(sc->dev, "Cannot allocate interrupt "
  948                                       "for message %d\n", rid);
  949                         err = EINVAL;
  950                         break;
  951                 }
  952 
  953                 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
  954 #ifdef INTR_FILTERS
  955                                      NULL,
  956 #endif
  957                                      t3_intr_msix, &sc->sge.qs[i], &tag);
  958                 if (err) {
  959                         device_printf(sc->dev, "Cannot set up interrupt "
  960                                       "for message %d (%d)\n", rid, err);
  961                         bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
  962                         break;
  963                 }
  964 
  965                 sc->msix_irq_rid[i] = rid;
  966                 sc->msix_irq_res[i] = res;
  967                 sc->msix_intr_tag[i] = tag;
  968         }
  969 
  970         if (err)
  971                 cxgb_teardown_interrupts(sc);
  972 
  973         return (err);
  974 }
  975 
  976 
  977 static int
  978 cxgb_port_probe(device_t dev)
  979 {
  980         struct port_info *p;
  981         char buf[80];
  982         const char *desc;
  983         
  984         p = device_get_softc(dev);
  985         desc = p->phy.desc;
  986         snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
  987         device_set_desc_copy(dev, buf);
  988         return (0);
  989 }
  990 
  991 
  992 static int
  993 cxgb_makedev(struct port_info *pi)
  994 {
  995         
  996         pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
  997             UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
  998         
  999         if (pi->port_cdev == NULL)
 1000                 return (ENOMEM);
 1001 
 1002         pi->port_cdev->si_drv1 = (void *)pi;
 1003         
 1004         return (0);
 1005 }
 1006 
 1007 #ifdef TSO_SUPPORTED
 1008 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO)
 1009 /* Don't enable TSO6 yet */
 1010 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO)
 1011 #else
 1012 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
 1013 /* Don't enable TSO6 yet */
 1014 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
 1015 #define IFCAP_TSO4 0x0
 1016 #define IFCAP_TSO6 0x0
 1017 #define CSUM_TSO   0x0
 1018 #endif
 1019 
 1020 
 1021 static int
 1022 cxgb_port_attach(device_t dev)
 1023 {
 1024         struct port_info *p;
 1025         struct ifnet *ifp;
 1026         int err;
 1027         struct adapter *sc;
 1028         
 1029         
 1030         p = device_get_softc(dev);
 1031         sc = p->adapter;
 1032         snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
 1033             device_get_unit(device_get_parent(dev)), p->port_id);
 1034         PORT_LOCK_INIT(p, p->lockbuf);
 1035 
 1036         /* Allocate an ifnet object and set it up */
 1037         ifp = p->ifp = if_alloc(IFT_ETHER);
 1038         if (ifp == NULL) {
 1039                 device_printf(dev, "Cannot allocate ifnet\n");
 1040                 return (ENOMEM);
 1041         }
 1042         
 1043         /*
 1044          * Note that there is currently no watchdog timer.
 1045          */
 1046         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1047         ifp->if_init = cxgb_init;
 1048         ifp->if_softc = p;
 1049         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1050         ifp->if_ioctl = cxgb_ioctl;
 1051         ifp->if_start = cxgb_start;
 1052 
 1053 
 1054         ifp->if_timer = 0;      /* Disable ifnet watchdog */
 1055         ifp->if_watchdog = NULL;
 1056 
 1057         ifp->if_snd.ifq_drv_maxlen = cxgb_snd_queue_len;
 1058         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
 1059         IFQ_SET_READY(&ifp->if_snd);
 1060 
 1061         ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
 1062         ifp->if_capabilities |= CXGB_CAP;
 1063         ifp->if_capenable |= CXGB_CAP_ENABLE;
 1064         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
 1065         /*
 1066          * disable TSO on 4-port - it isn't supported by the firmware yet
 1067          */     
 1068         if (p->adapter->params.nports > 2) {
 1069                 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
 1070                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
 1071                 ifp->if_hwassist &= ~CSUM_TSO;
 1072         }
 1073 
 1074         ether_ifattach(ifp, p->hw_addr);
 1075 
 1076 #ifdef IFNET_MULTIQUEUE
 1077         ifp->if_transmit = cxgb_pcpu_transmit;
 1078 #endif
 1079         /*
 1080          * Only default to jumbo frames on 10GigE
 1081          */
 1082         if (p->adapter->params.nports <= 2)
 1083                 ifp->if_mtu = ETHERMTU_JUMBO;
 1084         if ((err = cxgb_makedev(p)) != 0) {
 1085                 printf("makedev failed %d\n", err);
 1086                 return (err);
 1087         }
 1088 
 1089         /* Create a list of media supported by this port */
 1090         ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
 1091             cxgb_media_status);
 1092         cxgb_build_medialist(p);
 1093       
 1094         t3_sge_init_port(p);
 1095 
 1096         return (err);
 1097 }
 1098 
 1099 /*
 1100  * cxgb_port_detach() is called via the device_detach methods when
 1101  * cxgb_free() calls the bus_generic_detach.  It is responsible for 
 1102  * removing the device from the view of the kernel, i.e. from all 
 1103  * interfaces lists etc.  This routine is only called when the driver is 
 1104  * being unloaded, not when the link goes down.
 1105  */
 1106 static int
 1107 cxgb_port_detach(device_t dev)
 1108 {
 1109         struct port_info *p;
 1110         struct adapter *sc;
 1111 
 1112         p = device_get_softc(dev);
 1113         sc = p->adapter;
 1114 
 1115         /* Tell cxgb_ioctl and if_init that the port is going away */
 1116         ADAPTER_LOCK(sc);
 1117         SET_DOOMED(p);
 1118         wakeup(&sc->flags);
 1119         while (IS_BUSY(sc))
 1120                 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
 1121         SET_BUSY(sc);
 1122         ADAPTER_UNLOCK(sc);
 1123 
 1124         if (p->port_cdev != NULL)
 1125                 destroy_dev(p->port_cdev);
 1126 
 1127         cxgb_uninit_synchronized(p);
 1128         ether_ifdetach(p->ifp);
 1129 
 1130         PORT_LOCK_DEINIT(p);
 1131         if_free(p->ifp);
 1132         p->ifp = NULL;
 1133 
 1134         ADAPTER_LOCK(sc);
 1135         CLR_BUSY(sc);
 1136         wakeup_one(&sc->flags);
 1137         ADAPTER_UNLOCK(sc);
 1138         return (0);
 1139 }
 1140 
 1141 void
 1142 t3_fatal_err(struct adapter *sc)
 1143 {
 1144         u_int fw_status[4];
 1145 
 1146         if (sc->flags & FULL_INIT_DONE) {
 1147                 t3_sge_stop(sc);
 1148                 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
 1149                 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
 1150                 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
 1151                 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
 1152                 t3_intr_disable(sc);
 1153         }
 1154         device_printf(sc->dev,"encountered fatal error, operation suspended\n");
 1155         if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
 1156                 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 1157                     fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
 1158 }
 1159 
 1160 int
 1161 t3_os_find_pci_capability(adapter_t *sc, int cap)
 1162 {
 1163         device_t dev;
 1164         struct pci_devinfo *dinfo;
 1165         pcicfgregs *cfg;
 1166         uint32_t status;
 1167         uint8_t ptr;
 1168 
 1169         dev = sc->dev;
 1170         dinfo = device_get_ivars(dev);
 1171         cfg = &dinfo->cfg;
 1172 
 1173         status = pci_read_config(dev, PCIR_STATUS, 2);
 1174         if (!(status & PCIM_STATUS_CAPPRESENT))
 1175                 return (0);
 1176 
 1177         switch (cfg->hdrtype & PCIM_HDRTYPE) {
 1178         case 0:
 1179         case 1:
 1180                 ptr = PCIR_CAP_PTR;
 1181                 break;
 1182         case 2:
 1183                 ptr = PCIR_CAP_PTR_2;
 1184                 break;
 1185         default:
 1186                 return (0);
 1187                 break;
 1188         }
 1189         ptr = pci_read_config(dev, ptr, 1);
 1190 
 1191         while (ptr != 0) {
 1192                 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
 1193                         return (ptr);
 1194                 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
 1195         }
 1196 
 1197         return (0);
 1198 }
 1199 
 1200 int
 1201 t3_os_pci_save_state(struct adapter *sc)
 1202 {
 1203         device_t dev;
 1204         struct pci_devinfo *dinfo;
 1205 
 1206         dev = sc->dev;
 1207         dinfo = device_get_ivars(dev);
 1208 
 1209         pci_cfg_save(dev, dinfo, 0);
 1210         return (0);
 1211 }
 1212 
 1213 int
 1214 t3_os_pci_restore_state(struct adapter *sc)
 1215 {
 1216         device_t dev;
 1217         struct pci_devinfo *dinfo;
 1218 
 1219         dev = sc->dev;
 1220         dinfo = device_get_ivars(dev);
 1221 
 1222         pci_cfg_restore(dev, dinfo);
 1223         return (0);
 1224 }
 1225 
 1226 /**
 1227  *      t3_os_link_changed - handle link status changes
 1228  *      @sc: the adapter associated with the link change
 1229  *      @port_id: the port index whose link status has changed
 1230  *      @link_status: the new status of the link
 1231  *      @speed: the new speed setting
 1232  *      @duplex: the new duplex setting
 1233  *      @fc: the new flow-control setting
 1234  *
 1235  *      This is the OS-dependent handler for link status changes.  The OS
 1236  *      neutral handler takes care of most of the processing for these events,
 1237  *      then calls this handler for any OS-specific processing.
 1238  */
 1239 void
 1240 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
 1241      int duplex, int fc, int mac_was_reset)
 1242 {
 1243         struct port_info *pi = &adapter->port[port_id];
 1244         struct ifnet *ifp = pi->ifp;
 1245 
 1246         /* no race with detach, so ifp should always be good */
 1247         KASSERT(ifp, ("%s: if detached.", __func__));
 1248 
 1249         /* Reapply mac settings if they were lost due to a reset */
 1250         if (mac_was_reset) {
 1251                 PORT_LOCK(pi);
 1252                 cxgb_update_mac_settings(pi);
 1253                 PORT_UNLOCK(pi);
 1254         }
 1255 
 1256         if (link_status) {
 1257                 ifp->if_baudrate = IF_Mbps(speed);
 1258                 if_link_state_change(ifp, LINK_STATE_UP);
 1259         } else
 1260                 if_link_state_change(ifp, LINK_STATE_DOWN);
 1261 }
 1262 
 1263 /**
 1264  *      t3_os_phymod_changed - handle PHY module changes
 1265  *      @phy: the PHY reporting the module change
 1266  *      @mod_type: new module type
 1267  *
 1268  *      This is the OS-dependent handler for PHY module changes.  It is
 1269  *      invoked when a PHY module is removed or inserted for any OS-specific
 1270  *      processing.
 1271  */
 1272 void t3_os_phymod_changed(struct adapter *adap, int port_id)
 1273 {
 1274         static const char *mod_str[] = {
 1275                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 1276         };
 1277         struct port_info *pi = &adap->port[port_id];
 1278         int mod = pi->phy.modtype;
 1279 
 1280         if (mod != pi->media.ifm_cur->ifm_data)
 1281                 cxgb_build_medialist(pi);
 1282 
 1283         if (mod == phy_modtype_none)
 1284                 if_printf(pi->ifp, "PHY module unplugged\n");
 1285         else {
 1286                 KASSERT(mod < ARRAY_SIZE(mod_str),
 1287                         ("invalid PHY module type %d", mod));
 1288                 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
 1289         }
 1290 }
 1291 
 1292 /*
 1293  * Interrupt-context handler for external (PHY) interrupts.
 1294  */
 1295 void
 1296 t3_os_ext_intr_handler(adapter_t *sc)
 1297 {
 1298         if (cxgb_debug)
 1299                 printf("t3_os_ext_intr_handler\n");
 1300         /*
 1301          * Schedule a task to handle external interrupts as they may be slow
 1302          * and we use a mutex to protect MDIO registers.  We disable PHY
 1303          * interrupts in the meantime and let the task reenable them when
 1304          * it's done.
 1305          */
 1306         if (sc->slow_intr_mask) {
 1307                 ADAPTER_LOCK(sc);
 1308                 sc->slow_intr_mask &= ~F_T3DBG;
 1309                 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
 1310                 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
 1311                 ADAPTER_UNLOCK(sc);
 1312         }
 1313 }
 1314 
 1315 void
 1316 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
 1317 {
 1318 
 1319         /*
 1320          * The ifnet might not be allocated before this gets called,
 1321          * as this is called early on in attach by t3_prep_adapter
 1322          * save the address off in the port structure
 1323          */
 1324         if (cxgb_debug)
 1325                 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
 1326         bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
 1327 }
 1328 
 1329 /*
 1330  * Programs the XGMAC based on the settings in the ifnet.  These settings
 1331  * include MTU, MAC address, mcast addresses, etc.
 1332  */
 1333 static void
 1334 cxgb_update_mac_settings(struct port_info *p)
 1335 {
 1336         struct ifnet *ifp = p->ifp;
 1337         struct t3_rx_mode rm;
 1338         struct cmac *mac = &p->mac;
 1339         int mtu, hwtagging;
 1340 
 1341         PORT_LOCK_ASSERT_OWNED(p);
 1342 
 1343         bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
 1344 
 1345         mtu = ifp->if_mtu;
 1346         if (ifp->if_capenable & IFCAP_VLAN_MTU)
 1347                 mtu += ETHER_VLAN_ENCAP_LEN;
 1348 
 1349         hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
 1350 
 1351         t3_mac_set_mtu(mac, mtu);
 1352         t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
 1353         t3_mac_set_address(mac, 0, p->hw_addr);
 1354         t3_init_rx_mode(&rm, p);
 1355         t3_mac_set_rx_mode(mac, &rm);
 1356 }
 1357 
 1358 static int
 1359 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 1360                               unsigned long n)
 1361 {
 1362         int attempts = 5;
 1363 
 1364         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 1365                 if (!--attempts)
 1366                         return (ETIMEDOUT);
 1367                 t3_os_sleep(10);
 1368         }
 1369         return 0;
 1370 }
 1371 
 1372 static int
 1373 init_tp_parity(struct adapter *adap)
 1374 {
 1375         int i;
 1376         struct mbuf *m;
 1377         struct cpl_set_tcb_field *greq;
 1378         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 1379 
 1380         t3_tp_set_offload_mode(adap, 1);
 1381 
 1382         for (i = 0; i < 16; i++) {
 1383                 struct cpl_smt_write_req *req;
 1384 
 1385                 m = m_gethdr(M_WAITOK, MT_DATA);
 1386                 req = mtod(m, struct cpl_smt_write_req *);
 1387                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1388                 memset(req, 0, sizeof(*req));
 1389                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1390                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 1391                 req->iff = i;
 1392                 t3_mgmt_tx(adap, m);
 1393         }
 1394 
 1395         for (i = 0; i < 2048; i++) {
 1396                 struct cpl_l2t_write_req *req;
 1397 
 1398                 m = m_gethdr(M_WAITOK, MT_DATA);
 1399                 req = mtod(m, struct cpl_l2t_write_req *);
 1400                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1401                 memset(req, 0, sizeof(*req));
 1402                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1403                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 1404                 req->params = htonl(V_L2T_W_IDX(i));
 1405                 t3_mgmt_tx(adap, m);
 1406         }
 1407 
 1408         for (i = 0; i < 2048; i++) {
 1409                 struct cpl_rte_write_req *req;
 1410 
 1411                 m = m_gethdr(M_WAITOK, MT_DATA);
 1412                 req = mtod(m, struct cpl_rte_write_req *);
 1413                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1414                 memset(req, 0, sizeof(*req));
 1415                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1416                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 1417                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
 1418                 t3_mgmt_tx(adap, m);
 1419         }
 1420 
 1421         m = m_gethdr(M_WAITOK, MT_DATA);
 1422         greq = mtod(m, struct cpl_set_tcb_field *);
 1423         m->m_len = m->m_pkthdr.len = sizeof(*greq);
 1424         memset(greq, 0, sizeof(*greq));
 1425         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1426         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 1427         greq->mask = htobe64(1);
 1428         t3_mgmt_tx(adap, m);
 1429 
 1430         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 1431         t3_tp_set_offload_mode(adap, 0);
 1432         return (i);
 1433 }
 1434 
 1435 /**
 1436  *      setup_rss - configure Receive Side Steering (per-queue connection demux) 
 1437  *      @adap: the adapter
 1438  *
 1439  *      Sets up RSS to distribute packets to multiple receive queues.  We
 1440  *      configure the RSS CPU lookup table to distribute to the number of HW
 1441  *      receive queues, and the response queue lookup table to narrow that
 1442  *      down to the response queues actually configured for each port.
 1443  *      We always configure the RSS mapping for two ports since the mapping
 1444  *      table has plenty of entries.
 1445  */
 1446 static void
 1447 setup_rss(adapter_t *adap)
 1448 {
 1449         int i;
 1450         u_int nq[2]; 
 1451         uint8_t cpus[SGE_QSETS + 1];
 1452         uint16_t rspq_map[RSS_TABLE_SIZE];
 1453         
 1454         for (i = 0; i < SGE_QSETS; ++i)
 1455                 cpus[i] = i;
 1456         cpus[SGE_QSETS] = 0xff;
 1457 
 1458         nq[0] = nq[1] = 0;
 1459         for_each_port(adap, i) {
 1460                 const struct port_info *pi = adap2pinfo(adap, i);
 1461 
 1462                 nq[pi->tx_chan] += pi->nqsets;
 1463         }
 1464         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 1465                 rspq_map[i] = nq[0] ? i % nq[0] : 0;
 1466                 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
 1467         }
 1468 
 1469         /* Calculate the reverse RSS map table */
 1470         for (i = 0; i < SGE_QSETS; ++i)
 1471                 adap->rrss_map[i] = 0xff;
 1472         for (i = 0; i < RSS_TABLE_SIZE; ++i)
 1473                 if (adap->rrss_map[rspq_map[i]] == 0xff)
 1474                         adap->rrss_map[rspq_map[i]] = i;
 1475 
 1476         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 1477                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
 1478                       F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
 1479                       cpus, rspq_map);
 1480 
 1481 }
 1482 
 1483 /*
 1484  * Sends an mbuf to an offload queue driver
 1485  * after dealing with any active network taps.
 1486  */
 1487 static inline int
 1488 offload_tx(struct t3cdev *tdev, struct mbuf *m)
 1489 {
 1490         int ret;
 1491 
 1492         ret = t3_offload_tx(tdev, m);
 1493         return (ret);
 1494 }
 1495 
 1496 static int
 1497 write_smt_entry(struct adapter *adapter, int idx)
 1498 {
 1499         struct port_info *pi = &adapter->port[idx];
 1500         struct cpl_smt_write_req *req;
 1501         struct mbuf *m;
 1502 
 1503         if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
 1504                 return (ENOMEM);
 1505 
 1506         req = mtod(m, struct cpl_smt_write_req *);
 1507         m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
 1508         
 1509         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1510         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 1511         req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
 1512         req->iff = idx;
 1513         memset(req->src_mac1, 0, sizeof(req->src_mac1));
 1514         memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
 1515 
 1516         m_set_priority(m, 1);
 1517 
 1518         offload_tx(&adapter->tdev, m);
 1519 
 1520         return (0);
 1521 }
 1522 
 1523 static int
 1524 init_smt(struct adapter *adapter)
 1525 {
 1526         int i;
 1527 
 1528         for_each_port(adapter, i)
 1529                 write_smt_entry(adapter, i);
 1530         return 0;
 1531 }
 1532 
 1533 static void
 1534 init_port_mtus(adapter_t *adapter)
 1535 {
 1536         unsigned int mtus = ETHERMTU | (ETHERMTU << 16);
 1537 
 1538         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 1539 }
 1540 
 1541 static void
 1542 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 1543                               int hi, int port)
 1544 {
 1545         struct mbuf *m;
 1546         struct mngt_pktsched_wr *req;
 1547 
 1548         m = m_gethdr(M_DONTWAIT, MT_DATA);
 1549         if (m) {        
 1550                 req = mtod(m, struct mngt_pktsched_wr *);
 1551                 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 1552                 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 1553                 req->sched = sched;
 1554                 req->idx = qidx;
 1555                 req->min = lo;
 1556                 req->max = hi;
 1557                 req->binding = port;
 1558                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1559                 t3_mgmt_tx(adap, m);
 1560         }
 1561 }
 1562 
 1563 static void
 1564 bind_qsets(adapter_t *sc)
 1565 {
 1566         int i, j;
 1567 
 1568         cxgb_pcpu_startup_threads(sc);
 1569         for (i = 0; i < (sc)->params.nports; ++i) {
 1570                 const struct port_info *pi = adap2pinfo(sc, i);
 1571 
 1572                 for (j = 0; j < pi->nqsets; ++j) {
 1573                         send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
 1574                                           -1, pi->tx_chan);
 1575 
 1576                 }
 1577         }
 1578 }
 1579 
 1580 static void
 1581 update_tpeeprom(struct adapter *adap)
 1582 {
 1583 #ifdef FIRMWARE_LATEST
 1584         const struct firmware *tpeeprom;
 1585 #else
 1586         struct firmware *tpeeprom;
 1587 #endif
 1588 
 1589         uint32_t version;
 1590         unsigned int major, minor;
 1591         int ret, len;
 1592         char rev, name[32];
 1593 
 1594         t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
 1595 
 1596         major = G_TP_VERSION_MAJOR(version);
 1597         minor = G_TP_VERSION_MINOR(version);
 1598         if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
 1599                 return; 
 1600 
 1601         rev = t3rev2char(adap);
 1602         snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
 1603 
 1604         tpeeprom = firmware_get(name);
 1605         if (tpeeprom == NULL) {
 1606                 device_printf(adap->dev,
 1607                               "could not load TP EEPROM: unable to load %s\n",
 1608                               name);
 1609                 return;
 1610         }
 1611 
 1612         len = tpeeprom->datasize - 4;
 1613         
 1614         ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
 1615         if (ret)
 1616                 goto release_tpeeprom;
 1617 
 1618         if (len != TP_SRAM_LEN) {
 1619                 device_printf(adap->dev,
 1620                               "%s length is wrong len=%d expected=%d\n", name,
 1621                               len, TP_SRAM_LEN);
 1622                 return;
 1623         }
 1624         
 1625         ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
 1626             TP_SRAM_OFFSET);
 1627         
 1628         if (!ret) {
 1629                 device_printf(adap->dev,
 1630                         "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
 1631                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 1632         } else 
 1633                 device_printf(adap->dev,
 1634                               "Protocol SRAM image update in EEPROM failed\n");
 1635 
 1636 release_tpeeprom:
 1637         firmware_put(tpeeprom, FIRMWARE_UNLOAD);
 1638         
 1639         return;
 1640 }
 1641 
 1642 static int
 1643 update_tpsram(struct adapter *adap)
 1644 {
 1645 #ifdef FIRMWARE_LATEST
 1646         const struct firmware *tpsram;
 1647 #else
 1648         struct firmware *tpsram;
 1649 #endif  
 1650         int ret;
 1651         char rev, name[32];
 1652 
 1653         rev = t3rev2char(adap);
 1654         snprintf(name, sizeof(name), TPSRAM_NAME, rev);
 1655 
 1656         update_tpeeprom(adap);
 1657 
 1658         tpsram = firmware_get(name);
 1659         if (tpsram == NULL){
 1660                 device_printf(adap->dev, "could not load TP SRAM\n");
 1661                 return (EINVAL);
 1662         } else
 1663                 device_printf(adap->dev, "updating TP SRAM\n");
 1664         
 1665         ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
 1666         if (ret)
 1667                 goto release_tpsram;    
 1668 
 1669         ret = t3_set_proto_sram(adap, tpsram->data);
 1670         if (ret)
 1671                 device_printf(adap->dev, "loading protocol SRAM failed\n");
 1672 
 1673 release_tpsram:
 1674         firmware_put(tpsram, FIRMWARE_UNLOAD);
 1675         
 1676         return ret;
 1677 }
 1678 
 1679 /**
 1680  *      cxgb_up - enable the adapter
 1681  *      @adap: adapter being enabled
 1682  *
 1683  *      Called when the first port is enabled, this function performs the
 1684  *      actions necessary to make an adapter operational, such as completing
 1685  *      the initialization of HW modules, and enabling interrupts.
 1686  */
 1687 static int
 1688 cxgb_up(struct adapter *sc)
 1689 {
 1690         int err = 0;
 1691 
 1692         KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
 1693                                            __func__, sc->open_device_map));
 1694 
 1695         if ((sc->flags & FULL_INIT_DONE) == 0) {
 1696 
 1697                 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1698 
 1699                 if ((sc->flags & FW_UPTODATE) == 0)
 1700                         if ((err = upgrade_fw(sc)))
 1701                                 goto out;
 1702 
 1703                 if ((sc->flags & TPS_UPTODATE) == 0)
 1704                         if ((err = update_tpsram(sc)))
 1705                                 goto out;
 1706 
 1707                 err = t3_init_hw(sc, 0);
 1708                 if (err)
 1709                         goto out;
 1710 
 1711                 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
 1712                 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 1713 
 1714                 err = setup_sge_qsets(sc);
 1715                 if (err)
 1716                         goto out;
 1717 
 1718                 setup_rss(sc);
 1719 
 1720                 t3_intr_clear(sc);
 1721                 err = cxgb_setup_interrupts(sc);
 1722                 if (err)
 1723                         goto out;
 1724 
 1725                 t3_add_configured_sysctls(sc);
 1726                 sc->flags |= FULL_INIT_DONE;
 1727         }
 1728 
 1729         t3_intr_clear(sc);
 1730         t3_sge_start(sc);
 1731         t3_intr_enable(sc);
 1732 
 1733         if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
 1734             is_offload(sc) && init_tp_parity(sc) == 0)
 1735                 sc->flags |= TP_PARITY_INIT;
 1736 
 1737         if (sc->flags & TP_PARITY_INIT) {
 1738                 t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
 1739                 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
 1740         }
 1741         
 1742         if (!(sc->flags & QUEUES_BOUND)) {
 1743                 bind_qsets(sc);
 1744                 sc->flags |= QUEUES_BOUND;              
 1745         }
 1746 
 1747         t3_sge_reset_adapter(sc);
 1748 out:
 1749         return (err);
 1750 }
 1751 
 1752 /*
 1753  * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
 1754  * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
 1755  * during controller_detach, not here.
 1756  */
 1757 static void
 1758 cxgb_down(struct adapter *sc)
 1759 {
 1760         t3_sge_stop(sc);
 1761         t3_intr_disable(sc);
 1762 }
 1763 
 1764 static int
 1765 offload_open(struct port_info *pi)
 1766 {
 1767         struct adapter *sc = pi->adapter;
 1768         struct t3cdev *tdev = &sc->tdev;
 1769 
 1770         setbit(&sc->open_device_map, OFFLOAD_DEVMAP_BIT);
 1771 
 1772         t3_tp_set_offload_mode(sc, 1);
 1773         tdev->lldev = pi->ifp;
 1774         init_port_mtus(sc);
 1775         t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
 1776                      sc->params.rev == 0 ?  sc->port[0].ifp->if_mtu : 0xffff);
 1777         init_smt(sc);
 1778         cxgb_add_clients(tdev);
 1779 
 1780         return (0);
 1781 }
 1782 
 1783 static int
 1784 offload_close(struct t3cdev *tdev)
 1785 {
 1786         struct adapter *adapter = tdev2adap(tdev);
 1787 
 1788         if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
 1789                 return (0);
 1790 
 1791         /* Call back all registered clients */
 1792         cxgb_remove_clients(tdev);
 1793 
 1794         tdev->lldev = NULL;
 1795         cxgb_set_dummy_ops(tdev);
 1796         t3_tp_set_offload_mode(adapter, 0);
 1797 
 1798         clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
 1799 
 1800         return (0);
 1801 }
 1802 
 1803 /*
 1804  * if_init for cxgb ports.
 1805  */
 1806 static void
 1807 cxgb_init(void *arg)
 1808 {
 1809         struct port_info *p = arg;
 1810         struct adapter *sc = p->adapter;
 1811 
 1812         ADAPTER_LOCK(sc);
 1813         cxgb_init_locked(p); /* releases adapter lock */
 1814         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1815 }
 1816 
 1817 static int
 1818 cxgb_init_locked(struct port_info *p)
 1819 {
 1820         struct adapter *sc = p->adapter;
 1821         struct ifnet *ifp = p->ifp;
 1822         struct cmac *mac = &p->mac;
 1823         int rc = 0, may_sleep = 0;
 1824 
 1825         ADAPTER_LOCK_ASSERT_OWNED(sc);
 1826 
 1827         while (!IS_DOOMED(p) && IS_BUSY(sc)) {
 1828                 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
 1829                         rc = EINTR;
 1830                         goto done;
 1831                 }
 1832         }
 1833         if (IS_DOOMED(p)) {
 1834                 rc = ENXIO;
 1835                 goto done;
 1836         }
 1837         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 1838 
 1839         /*
 1840          * The code that runs during one-time adapter initialization can sleep
 1841          * so it's important not to hold any locks across it.
 1842          */
 1843         may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
 1844 
 1845         if (may_sleep) {
 1846                 SET_BUSY(sc);
 1847                 ADAPTER_UNLOCK(sc);
 1848         }
 1849 
 1850         if (sc->open_device_map == 0) {
 1851                 if ((rc = cxgb_up(sc)) != 0)
 1852                         goto done;
 1853 
 1854                 if (is_offload(sc) && !ofld_disable && offload_open(p))
 1855                         log(LOG_WARNING,
 1856                             "Could not initialize offload capabilities\n");
 1857         }
 1858 
 1859         PORT_LOCK(p);
 1860         if (isset(&sc->open_device_map, p->port_id) &&
 1861             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1862                 PORT_UNLOCK(p);
 1863                 goto done;
 1864         }
 1865         t3_port_intr_enable(sc, p->port_id);
 1866         if (!mac->multiport) 
 1867                 t3_mac_init(mac);
 1868         cxgb_update_mac_settings(p);
 1869         t3_link_start(&p->phy, mac, &p->link_config);
 1870         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 1871         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1872         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1873         PORT_UNLOCK(p);
 1874 
 1875         t3_link_changed(sc, p->port_id);
 1876 
 1877         /* all ok */
 1878         setbit(&sc->open_device_map, p->port_id);
 1879 
 1880 done:
 1881         if (may_sleep) {
 1882                 ADAPTER_LOCK(sc);
 1883                 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 1884                 CLR_BUSY(sc);
 1885                 wakeup_one(&sc->flags);
 1886         }
 1887         ADAPTER_UNLOCK(sc);
 1888         return (rc);
 1889 }
 1890 
 1891 static int
 1892 cxgb_uninit_locked(struct port_info *p)
 1893 {
 1894         struct adapter *sc = p->adapter;
 1895         int rc;
 1896 
 1897         ADAPTER_LOCK_ASSERT_OWNED(sc);
 1898 
 1899         while (!IS_DOOMED(p) && IS_BUSY(sc)) {
 1900                 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
 1901                         rc = EINTR;
 1902                         goto done;
 1903                 }
 1904         }
 1905         if (IS_DOOMED(p)) {
 1906                 rc = ENXIO;
 1907                 goto done;
 1908         }
 1909         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 1910         SET_BUSY(sc);
 1911         ADAPTER_UNLOCK(sc);
 1912 
 1913         rc = cxgb_uninit_synchronized(p);
 1914 
 1915         ADAPTER_LOCK(sc);
 1916         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 1917         CLR_BUSY(sc);
 1918         wakeup_one(&sc->flags);
 1919 done:
 1920         ADAPTER_UNLOCK(sc);
 1921         return (rc);
 1922 }
 1923 
 1924 /*
 1925  * Called on "ifconfig down", and from port_detach
 1926  */
 1927 static int
 1928 cxgb_uninit_synchronized(struct port_info *pi)
 1929 {
 1930         struct adapter *sc = pi->adapter;
 1931         struct ifnet *ifp = pi->ifp;
 1932 
 1933         /*
 1934          * taskqueue_drain may cause a deadlock if the adapter lock is held.
 1935          */
 1936         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1937 
 1938         /*
 1939          * Clear this port's bit from the open device map, and then drain all
 1940          * the tasks that can access/manipulate this port's port_info or ifp.
 1941          * We disable this port's interrupts here and so the the slow/ext
 1942          * interrupt tasks won't be enqueued.  The tick task will continue to
 1943          * be enqueued every second but the runs after this drain will not see
 1944          * this port in the open device map.
 1945          *
 1946          * A well behaved task must take open_device_map into account and ignore
 1947          * ports that are not open.
 1948          */
 1949         clrbit(&sc->open_device_map, pi->port_id);
 1950         t3_port_intr_disable(sc, pi->port_id);
 1951         taskqueue_drain(sc->tq, &sc->slow_intr_task);
 1952         taskqueue_drain(sc->tq, &sc->ext_intr_task);
 1953         taskqueue_drain(sc->tq, &sc->tick_task);
 1954 
 1955         PORT_LOCK(pi);
 1956         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1957 
 1958         /* disable pause frames */
 1959         t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
 1960 
 1961         /* Reset RX FIFO HWM */
 1962         t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
 1963                          V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
 1964 
 1965         DELAY(100 * 1000);
 1966 
 1967         /* Wait for TXFIFO empty */
 1968         t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
 1969                         F_TXFIFO_EMPTY, 1, 20, 5);
 1970 
 1971         DELAY(100 * 1000);
 1972         t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
 1973 
 1974 
 1975         pi->phy.ops->power_down(&pi->phy, 1);
 1976 
 1977         PORT_UNLOCK(pi);
 1978 
 1979         pi->link_config.link_ok = 0;
 1980         t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
 1981 
 1982         if ((sc->open_device_map & PORT_MASK) == 0)
 1983                 offload_close(&sc->tdev);
 1984 
 1985         if (sc->open_device_map == 0)
 1986                 cxgb_down(pi->adapter);
 1987 
 1988         return (0);
 1989 }
 1990 
 1991 /*
 1992  * Mark lro enabled or disabled in all qsets for this port
 1993  */
 1994 static int
 1995 cxgb_set_lro(struct port_info *p, int enabled)
 1996 {
 1997         int i;
 1998         struct adapter *adp = p->adapter;
 1999         struct sge_qset *q;
 2000 
 2001         PORT_LOCK_ASSERT_OWNED(p);
 2002         for (i = 0; i < p->nqsets; i++) {
 2003                 q = &adp->sge.qs[p->first_qset + i];
 2004                 q->lro.enabled = (enabled != 0);
 2005         }
 2006         return (0);
 2007 }
 2008 
 2009 static int
 2010 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
 2011 {
 2012         struct port_info *p = ifp->if_softc;
 2013         struct adapter *sc = p->adapter;
 2014         struct ifreq *ifr = (struct ifreq *)data;
 2015         int flags, error = 0, mtu;
 2016         uint32_t mask;
 2017 
 2018         switch (command) {
 2019         case SIOCSIFMTU:
 2020                 ADAPTER_LOCK(sc);
 2021                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2022                 if (error) {
 2023 fail:
 2024                         ADAPTER_UNLOCK(sc);
 2025                         return (error);
 2026                 }
 2027 
 2028                 mtu = ifr->ifr_mtu;
 2029                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
 2030                         error = EINVAL;
 2031                 } else {
 2032                         ifp->if_mtu = mtu;
 2033                         PORT_LOCK(p);
 2034                         cxgb_update_mac_settings(p);
 2035                         PORT_UNLOCK(p);
 2036                 }
 2037                 ADAPTER_UNLOCK(sc);
 2038                 break;
 2039         case SIOCSIFFLAGS:
 2040                 ADAPTER_LOCK(sc);
 2041                 if (IS_DOOMED(p)) {
 2042                         error = ENXIO;
 2043                         goto fail;
 2044                 }
 2045                 if (ifp->if_flags & IFF_UP) {
 2046                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2047                                 flags = p->if_flags;
 2048                                 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
 2049                                     ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
 2050                                         if (IS_BUSY(sc)) {
 2051                                                 error = EBUSY;
 2052                                                 goto fail;
 2053                                         }
 2054                                         PORT_LOCK(p);
 2055                                         cxgb_update_mac_settings(p);
 2056                                         PORT_UNLOCK(p);
 2057                                 }
 2058                                 ADAPTER_UNLOCK(sc);
 2059                         } else
 2060                                 error = cxgb_init_locked(p);
 2061                         p->if_flags = ifp->if_flags;
 2062                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2063                         error = cxgb_uninit_locked(p);
 2064                 else
 2065                         ADAPTER_UNLOCK(sc);
 2066 
 2067                 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 2068                 break;
 2069         case SIOCADDMULTI:
 2070         case SIOCDELMULTI:
 2071                 ADAPTER_LOCK(sc);
 2072                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2073                 if (error)
 2074                         goto fail;
 2075 
 2076                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2077                         PORT_LOCK(p);
 2078                         cxgb_update_mac_settings(p);
 2079                         PORT_UNLOCK(p);
 2080                 }
 2081                 ADAPTER_UNLOCK(sc);
 2082 
 2083                 break;
 2084         case SIOCSIFCAP:
 2085                 ADAPTER_LOCK(sc);
 2086                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2087                 if (error)
 2088                         goto fail;
 2089 
 2090                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2091                 if (mask & IFCAP_TXCSUM) {
 2092                         if (IFCAP_TXCSUM & ifp->if_capenable) {
 2093                                 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
 2094                                 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
 2095                                     | CSUM_IP | CSUM_TSO);
 2096                         } else {
 2097                                 ifp->if_capenable |= IFCAP_TXCSUM;
 2098                                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
 2099                                     | CSUM_IP);
 2100                         }
 2101                 }
 2102                 if (mask & IFCAP_RXCSUM) {
 2103                         ifp->if_capenable ^= IFCAP_RXCSUM;
 2104                 }
 2105                 if (mask & IFCAP_TSO4) {
 2106                         if (IFCAP_TSO4 & ifp->if_capenable) {
 2107                                 ifp->if_capenable &= ~IFCAP_TSO4;
 2108                                 ifp->if_hwassist &= ~CSUM_TSO;
 2109                         } else if (IFCAP_TXCSUM & ifp->if_capenable) {
 2110                                 ifp->if_capenable |= IFCAP_TSO4;
 2111                                 ifp->if_hwassist |= CSUM_TSO;
 2112                         } else
 2113                                 error = EINVAL;
 2114                 }
 2115                 if (mask & IFCAP_LRO) {
 2116                         ifp->if_capenable ^= IFCAP_LRO;
 2117 
 2118                         /* Safe to do this even if cxgb_up not called yet */
 2119                         cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
 2120                 }
 2121                 if (mask & IFCAP_VLAN_HWTAGGING) {
 2122                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2123                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2124                                 PORT_LOCK(p);
 2125                                 cxgb_update_mac_settings(p);
 2126                                 PORT_UNLOCK(p);
 2127                         }
 2128                 }
 2129                 if (mask & IFCAP_VLAN_MTU) {
 2130                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
 2131                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2132                                 PORT_LOCK(p);
 2133                                 cxgb_update_mac_settings(p);
 2134                                 PORT_UNLOCK(p);
 2135                         }
 2136                 }
 2137                 if (mask & IFCAP_VLAN_HWCSUM)
 2138                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 2139 
 2140 #ifdef VLAN_CAPABILITIES
 2141                 VLAN_CAPABILITIES(ifp);
 2142 #endif
 2143                 ADAPTER_UNLOCK(sc);
 2144                 break;
 2145         case SIOCSIFMEDIA:
 2146         case SIOCGIFMEDIA:
 2147                 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
 2148                 break;
 2149         default:
 2150                 error = ether_ioctl(ifp, command, data);
 2151         }
 2152 
 2153         return (error);
 2154 }
 2155 
 2156 static int
 2157 cxgb_media_change(struct ifnet *ifp)
 2158 {
 2159         return (EOPNOTSUPP);
 2160 }
 2161 
 2162 /*
 2163  * Translates phy->modtype to the correct Ethernet media subtype.
 2164  */
 2165 static int
 2166 cxgb_ifm_type(int mod)
 2167 {
 2168         switch (mod) {
 2169         case phy_modtype_sr:
 2170                 return (IFM_10G_SR);
 2171         case phy_modtype_lr:
 2172                 return (IFM_10G_LR);
 2173         case phy_modtype_lrm:
 2174                 return (IFM_10G_LRM);
 2175         case phy_modtype_twinax:
 2176                 return (IFM_10G_TWINAX);
 2177         case phy_modtype_twinax_long:
 2178                 return (IFM_10G_TWINAX_LONG);
 2179         case phy_modtype_none:
 2180                 return (IFM_NONE);
 2181         case phy_modtype_unknown:
 2182                 return (IFM_UNKNOWN);
 2183         }
 2184 
 2185         KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
 2186         return (IFM_UNKNOWN);
 2187 }
 2188 
 2189 /*
 2190  * Rebuilds the ifmedia list for this port, and sets the current media.
 2191  */
 2192 static void
 2193 cxgb_build_medialist(struct port_info *p)
 2194 {
 2195         struct cphy *phy = &p->phy;
 2196         struct ifmedia *media = &p->media;
 2197         int mod = phy->modtype;
 2198         int m = IFM_ETHER | IFM_FDX;
 2199 
 2200         PORT_LOCK(p);
 2201 
 2202         ifmedia_removeall(media);
 2203         if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
 2204                 /* Copper (RJ45) */
 2205 
 2206                 if (phy->caps & SUPPORTED_10000baseT_Full)
 2207                         ifmedia_add(media, m | IFM_10G_T, mod, NULL);
 2208 
 2209                 if (phy->caps & SUPPORTED_1000baseT_Full)
 2210                         ifmedia_add(media, m | IFM_1000_T, mod, NULL);
 2211 
 2212                 if (phy->caps & SUPPORTED_100baseT_Full)
 2213                         ifmedia_add(media, m | IFM_100_TX, mod, NULL);
 2214 
 2215                 if (phy->caps & SUPPORTED_10baseT_Full)
 2216                         ifmedia_add(media, m | IFM_10_T, mod, NULL);
 2217 
 2218                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
 2219                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
 2220 
 2221         } else if (phy->caps & SUPPORTED_TP) {
 2222                 /* Copper (CX4) */
 2223 
 2224                 KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
 2225                         ("%s: unexpected cap 0x%x", __func__, phy->caps));
 2226 
 2227                 ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
 2228                 ifmedia_set(media, m | IFM_10G_CX4);
 2229 
 2230         } else if (phy->caps & SUPPORTED_FIBRE &&
 2231                    phy->caps & SUPPORTED_10000baseT_Full) {
 2232                 /* 10G optical (but includes SFP+ twinax) */
 2233 
 2234                 m |= cxgb_ifm_type(mod);
 2235                 if (IFM_SUBTYPE(m) == IFM_NONE)
 2236                         m &= ~IFM_FDX;
 2237 
 2238                 ifmedia_add(media, m, mod, NULL);
 2239                 ifmedia_set(media, m);
 2240 
 2241         } else if (phy->caps & SUPPORTED_FIBRE &&
 2242                    phy->caps & SUPPORTED_1000baseT_Full) {
 2243                 /* 1G optical */
 2244 
 2245                 /* XXX: Lie and claim to be SX, could actually be any 1G-X */
 2246                 ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
 2247                 ifmedia_set(media, m | IFM_1000_SX);
 2248 
 2249         } else {
 2250                 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
 2251                             phy->caps));
 2252         }
 2253 
 2254         PORT_UNLOCK(p);
 2255 }
 2256 
 2257 static void
 2258 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 2259 {
 2260         struct port_info *p = ifp->if_softc;
 2261         struct ifmedia_entry *cur = p->media.ifm_cur;
 2262         int speed = p->link_config.speed;
 2263 
 2264         if (cur->ifm_data != p->phy.modtype) {
 2265                 cxgb_build_medialist(p);
 2266                 cur = p->media.ifm_cur;
 2267         }
 2268 
 2269         ifmr->ifm_status = IFM_AVALID;
 2270         if (!p->link_config.link_ok)
 2271                 return;
 2272 
 2273         ifmr->ifm_status |= IFM_ACTIVE;
 2274 
 2275         /*
 2276          * active and current will differ iff current media is autoselect.  That
 2277          * can happen only for copper RJ45.
 2278          */
 2279         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
 2280                 return;
 2281         KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
 2282                 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
 2283 
 2284         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
 2285         if (speed == SPEED_10000)
 2286                 ifmr->ifm_active |= IFM_10G_T;
 2287         else if (speed == SPEED_1000)
 2288                 ifmr->ifm_active |= IFM_1000_T;
 2289         else if (speed == SPEED_100)
 2290                 ifmr->ifm_active |= IFM_100_TX;
 2291         else if (speed == SPEED_10)
 2292                 ifmr->ifm_active |= IFM_10_T;
 2293         else
 2294                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
 2295                             speed));
 2296 }
 2297 
 2298 static void
 2299 cxgb_async_intr(void *data)
 2300 {
 2301         adapter_t *sc = data;
 2302 
 2303         if (cxgb_debug)
 2304                 device_printf(sc->dev, "cxgb_async_intr\n");
 2305         /*
 2306          * May need to sleep - defer to taskqueue
 2307          */
 2308         taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
 2309 }
 2310 
 2311 static void
 2312 cxgb_ext_intr_handler(void *arg, int count)
 2313 {
 2314         adapter_t *sc = (adapter_t *)arg;
 2315 
 2316         if (cxgb_debug)
 2317                 printf("cxgb_ext_intr_handler\n");
 2318 
 2319         t3_phy_intr_handler(sc);
 2320 
 2321         /* Now reenable external interrupts */
 2322         ADAPTER_LOCK(sc);
 2323         if (sc->slow_intr_mask) {
 2324                 sc->slow_intr_mask |= F_T3DBG;
 2325                 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
 2326                 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
 2327         }
 2328         ADAPTER_UNLOCK(sc);
 2329 }
 2330 
 2331 static inline int
 2332 link_poll_needed(struct port_info *p)
 2333 {
 2334         struct cphy *phy = &p->phy;
 2335 
 2336         if (phy->caps & POLL_LINK_1ST_TIME) {
 2337                 p->phy.caps &= ~POLL_LINK_1ST_TIME;
 2338                 return (1);
 2339         }
 2340 
 2341         return (p->link_fault || !(phy->caps & SUPPORTED_LINK_IRQ));
 2342 }
 2343 
 2344 static void
 2345 check_link_status(adapter_t *sc)
 2346 {
 2347         int i;
 2348 
 2349         for (i = 0; i < (sc)->params.nports; ++i) {
 2350                 struct port_info *p = &sc->port[i];
 2351 
 2352                 if (!isset(&sc->open_device_map, p->port_id))
 2353                         continue;
 2354 
 2355                 if (link_poll_needed(p))
 2356                         t3_link_changed(sc, i);
 2357         }
 2358 }
 2359 
 2360 static void
 2361 check_t3b2_mac(struct adapter *sc)
 2362 {
 2363         int i;
 2364 
 2365         if (sc->flags & CXGB_SHUTDOWN)
 2366                 return;
 2367 
 2368         for_each_port(sc, i) {
 2369                 struct port_info *p = &sc->port[i];
 2370                 int status;
 2371 #ifdef INVARIANTS
 2372                 struct ifnet *ifp = p->ifp;
 2373 #endif          
 2374 
 2375                 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
 2376                     !p->link_config.link_ok)
 2377                         continue;
 2378 
 2379                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
 2380                         ("%s: state mismatch (drv_flags %x, device_map %x)",
 2381                          __func__, ifp->if_drv_flags, sc->open_device_map));
 2382 
 2383                 PORT_LOCK(p);
 2384                 status = t3b2_mac_watchdog_task(&p->mac);
 2385                 if (status == 1)
 2386                         p->mac.stats.num_toggled++;
 2387                 else if (status == 2) {
 2388                         struct cmac *mac = &p->mac;
 2389 
 2390                         cxgb_update_mac_settings(p);
 2391                         t3_link_start(&p->phy, mac, &p->link_config);
 2392                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 2393                         t3_port_intr_enable(sc, p->port_id);
 2394                         p->mac.stats.num_resets++;
 2395                 }
 2396                 PORT_UNLOCK(p);
 2397         }
 2398 }
 2399 
 2400 static void
 2401 cxgb_tick(void *arg)
 2402 {
 2403         adapter_t *sc = (adapter_t *)arg;
 2404 
 2405         if (sc->flags & CXGB_SHUTDOWN)
 2406                 return;
 2407 
 2408         taskqueue_enqueue(sc->tq, &sc->tick_task);      
 2409         callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
 2410 }
 2411 
 2412 static void
 2413 cxgb_tick_handler(void *arg, int count)
 2414 {
 2415         adapter_t *sc = (adapter_t *)arg;
 2416         const struct adapter_params *p = &sc->params;
 2417         int i;
 2418         uint32_t cause, reset;
 2419 
 2420         if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
 2421                 return;
 2422 
 2423         check_link_status(sc);
 2424 
 2425         if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) 
 2426                 check_t3b2_mac(sc);
 2427 
 2428         cause = t3_read_reg(sc, A_SG_INT_CAUSE);
 2429         reset = 0;
 2430         if (cause & F_FLEMPTY) {
 2431                 struct sge_qset *qs = &sc->sge.qs[0];
 2432 
 2433                 i = 0;
 2434                 reset |= F_FLEMPTY;
 2435 
 2436                 cause = (t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) >>
 2437                          S_FL0EMPTY) & 0xffff;
 2438                 while (cause) {
 2439                         qs->fl[i].empty += (cause & 1);
 2440                         if (i)
 2441                                 qs++;
 2442                         i ^= 1;
 2443                         cause >>= 1;
 2444                 }
 2445         }
 2446         t3_write_reg(sc, A_SG_INT_CAUSE, reset);
 2447 
 2448         for (i = 0; i < sc->params.nports; i++) {
 2449                 struct port_info *pi = &sc->port[i];
 2450                 struct ifnet *ifp = pi->ifp;
 2451                 struct cmac *mac = &pi->mac;
 2452                 struct mac_stats *mstats = &mac->stats;
 2453 
 2454                 if (!isset(&sc->open_device_map, pi->port_id))
 2455                         continue;
 2456 
 2457                 PORT_LOCK(pi);
 2458                 t3_mac_update_stats(mac);
 2459                 PORT_UNLOCK(pi);
 2460 
 2461                 ifp->if_opackets =
 2462                     mstats->tx_frames_64 +
 2463                     mstats->tx_frames_65_127 +
 2464                     mstats->tx_frames_128_255 +
 2465                     mstats->tx_frames_256_511 +
 2466                     mstats->tx_frames_512_1023 +
 2467                     mstats->tx_frames_1024_1518 +
 2468                     mstats->tx_frames_1519_max;
 2469                 
 2470                 ifp->if_ipackets =
 2471                     mstats->rx_frames_64 +
 2472                     mstats->rx_frames_65_127 +
 2473                     mstats->rx_frames_128_255 +
 2474                     mstats->rx_frames_256_511 +
 2475                     mstats->rx_frames_512_1023 +
 2476                     mstats->rx_frames_1024_1518 +
 2477                     mstats->rx_frames_1519_max;
 2478 
 2479                 ifp->if_obytes = mstats->tx_octets;
 2480                 ifp->if_ibytes = mstats->rx_octets;
 2481                 ifp->if_omcasts = mstats->tx_mcast_frames;
 2482                 ifp->if_imcasts = mstats->rx_mcast_frames;
 2483                 
 2484                 ifp->if_collisions =
 2485                     mstats->tx_total_collisions;
 2486 
 2487                 ifp->if_iqdrops = mstats->rx_cong_drops;
 2488                 
 2489                 ifp->if_oerrors =
 2490                     mstats->tx_excess_collisions +
 2491                     mstats->tx_underrun +
 2492                     mstats->tx_len_errs +
 2493                     mstats->tx_mac_internal_errs +
 2494                     mstats->tx_excess_deferral +
 2495                     mstats->tx_fcs_errs;
 2496                 ifp->if_ierrors =
 2497                     mstats->rx_jabber +
 2498                     mstats->rx_data_errs +
 2499                     mstats->rx_sequence_errs +
 2500                     mstats->rx_runt + 
 2501                     mstats->rx_too_long +
 2502                     mstats->rx_mac_internal_errs +
 2503                     mstats->rx_short +
 2504                     mstats->rx_fcs_errs;
 2505 
 2506                 if (mac->multiport)
 2507                         continue;
 2508 
 2509                 /* Count rx fifo overflows, once per second */
 2510                 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
 2511                 reset = 0;
 2512                 if (cause & F_RXFIFO_OVERFLOW) {
 2513                         mac->stats.rx_fifo_ovfl++;
 2514                         reset |= F_RXFIFO_OVERFLOW;
 2515                 }
 2516                 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
 2517         }
 2518 }
 2519 
 2520 static void
 2521 touch_bars(device_t dev)
 2522 {
 2523         /*
 2524          * Don't enable yet
 2525          */
 2526 #if !defined(__LP64__) && 0
 2527         u32 v;
 2528 
 2529         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
 2530         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
 2531         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
 2532         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
 2533         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
 2534         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
 2535 #endif
 2536 }
 2537 
 2538 static int
 2539 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
 2540 {
 2541         uint8_t *buf;
 2542         int err = 0;
 2543         u32 aligned_offset, aligned_len, *p;
 2544         struct adapter *adapter = pi->adapter;
 2545 
 2546 
 2547         aligned_offset = offset & ~3;
 2548         aligned_len = (len + (offset & 3) + 3) & ~3;
 2549 
 2550         if (aligned_offset != offset || aligned_len != len) {
 2551                 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);              
 2552                 if (!buf)
 2553                         return (ENOMEM);
 2554                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
 2555                 if (!err && aligned_len > 4)
 2556                         err = t3_seeprom_read(adapter,
 2557                                               aligned_offset + aligned_len - 4,
 2558                                               (u32 *)&buf[aligned_len - 4]);
 2559                 if (err)
 2560                         goto out;
 2561                 memcpy(buf + (offset & 3), data, len);
 2562         } else
 2563                 buf = (uint8_t *)(uintptr_t)data;
 2564 
 2565         err = t3_seeprom_wp(adapter, 0);
 2566         if (err)
 2567                 goto out;
 2568 
 2569         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
 2570                 err = t3_seeprom_write(adapter, aligned_offset, *p);
 2571                 aligned_offset += 4;
 2572         }
 2573 
 2574         if (!err)
 2575                 err = t3_seeprom_wp(adapter, 1);
 2576 out:
 2577         if (buf != data)
 2578                 free(buf, M_DEVBUF);
 2579         return err;
 2580 }
 2581 
 2582 
 2583 static int
 2584 in_range(int val, int lo, int hi)
 2585 {
 2586         return val < 0 || (val <= hi && val >= lo);
 2587 }
 2588 
 2589 static int
 2590 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
 2591 {
 2592        return (0);
 2593 }
 2594 
 2595 static int
 2596 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
 2597 {
 2598        return (0);
 2599 }
 2600 
 2601 static int
 2602 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
 2603     int fflag, struct thread *td)
 2604 {
 2605         int mmd, error = 0;
 2606         struct port_info *pi = dev->si_drv1;
 2607         adapter_t *sc = pi->adapter;
 2608 
 2609 #ifdef PRIV_SUPPORTED   
 2610         if (priv_check(td, PRIV_DRIVER)) {
 2611                 if (cxgb_debug) 
 2612                         printf("user does not have access to privileged ioctls\n");
 2613                 return (EPERM);
 2614         }
 2615 #else
 2616         if (suser(td)) {
 2617                 if (cxgb_debug)
 2618                         printf("user does not have access to privileged ioctls\n");
 2619                 return (EPERM);
 2620         }
 2621 #endif
 2622         
 2623         switch (cmd) {
 2624         case CHELSIO_GET_MIIREG: {
 2625                 uint32_t val;
 2626                 struct cphy *phy = &pi->phy;
 2627                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2628                 
 2629                 if (!phy->mdio_read)
 2630                         return (EOPNOTSUPP);
 2631                 if (is_10G(sc)) {
 2632                         mmd = mid->phy_id >> 8;
 2633                         if (!mmd)
 2634                                 mmd = MDIO_DEV_PCS;
 2635                         else if (mmd > MDIO_DEV_VEND2)
 2636                                 return (EINVAL);
 2637 
 2638                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
 2639                                              mid->reg_num, &val);
 2640                 } else
 2641                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
 2642                                              mid->reg_num & 0x1f, &val);
 2643                 if (error == 0)
 2644                         mid->val_out = val;
 2645                 break;
 2646         }
 2647         case CHELSIO_SET_MIIREG: {
 2648                 struct cphy *phy = &pi->phy;
 2649                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2650 
 2651                 if (!phy->mdio_write)
 2652                         return (EOPNOTSUPP);
 2653                 if (is_10G(sc)) {
 2654                         mmd = mid->phy_id >> 8;
 2655                         if (!mmd)
 2656                                 mmd = MDIO_DEV_PCS;
 2657                         else if (mmd > MDIO_DEV_VEND2)
 2658                                 return (EINVAL);
 2659                         
 2660                         error = phy->mdio_write(sc, mid->phy_id & 0x1f,
 2661                                               mmd, mid->reg_num, mid->val_in);
 2662                 } else
 2663                         error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
 2664                                               mid->reg_num & 0x1f,
 2665                                               mid->val_in);
 2666                 break;
 2667         }
 2668         case CHELSIO_SETREG: {
 2669                 struct ch_reg *edata = (struct ch_reg *)data;
 2670                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2671                         return (EFAULT);
 2672                 t3_write_reg(sc, edata->addr, edata->val);
 2673                 break;
 2674         }
 2675         case CHELSIO_GETREG: {
 2676                 struct ch_reg *edata = (struct ch_reg *)data;
 2677                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2678                         return (EFAULT);
 2679                 edata->val = t3_read_reg(sc, edata->addr);
 2680                 break;
 2681         }
 2682         case CHELSIO_GET_SGE_CONTEXT: {
 2683                 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
 2684                 mtx_lock_spin(&sc->sge.reg_lock);
 2685                 switch (ecntxt->cntxt_type) {
 2686                 case CNTXT_TYPE_EGRESS:
 2687                         error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
 2688                             ecntxt->data);
 2689                         break;
 2690                 case CNTXT_TYPE_FL:
 2691                         error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
 2692                             ecntxt->data);
 2693                         break;
 2694                 case CNTXT_TYPE_RSP:
 2695                         error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
 2696                             ecntxt->data);
 2697                         break;
 2698                 case CNTXT_TYPE_CQ:
 2699                         error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
 2700                             ecntxt->data);
 2701                         break;
 2702                 default:
 2703                         error = EINVAL;
 2704                         break;
 2705                 }
 2706                 mtx_unlock_spin(&sc->sge.reg_lock);
 2707                 break;
 2708         }
 2709         case CHELSIO_GET_SGE_DESC: {
 2710                 struct ch_desc *edesc = (struct ch_desc *)data;
 2711                 int ret;
 2712                 if (edesc->queue_num >= SGE_QSETS * 6)
 2713                         return (EINVAL);
 2714                 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
 2715                     edesc->queue_num % 6, edesc->idx, edesc->data);
 2716                 if (ret < 0)
 2717                         return (EINVAL);
 2718                 edesc->size = ret;
 2719                 break;
 2720         }
 2721         case CHELSIO_GET_QSET_PARAMS: {
 2722                 struct qset_params *q;
 2723                 struct ch_qset_params *t = (struct ch_qset_params *)data;
 2724                 int q1 = pi->first_qset;
 2725                 int nqsets = pi->nqsets;
 2726                 int i;
 2727 
 2728                 if (t->qset_idx >= nqsets)
 2729                         return EINVAL;
 2730 
 2731                 i = q1 + t->qset_idx;
 2732                 q = &sc->params.sge.qset[i];
 2733                 t->rspq_size   = q->rspq_size;
 2734                 t->txq_size[0] = q->txq_size[0];
 2735                 t->txq_size[1] = q->txq_size[1];
 2736                 t->txq_size[2] = q->txq_size[2];
 2737                 t->fl_size[0]  = q->fl_size;
 2738                 t->fl_size[1]  = q->jumbo_size;
 2739                 t->polling     = q->polling;
 2740                 t->lro         = q->lro;
 2741                 t->intr_lat    = q->coalesce_usecs;
 2742                 t->cong_thres  = q->cong_thres;
 2743                 t->qnum        = i;
 2744 
 2745                 if (sc->flags & USING_MSIX)
 2746                         t->vector = rman_get_start(sc->msix_irq_res[i]);
 2747                 else
 2748                         t->vector = rman_get_start(sc->irq_res);
 2749 
 2750                 break;
 2751         }
 2752         case CHELSIO_GET_QSET_NUM: {
 2753                 struct ch_reg *edata = (struct ch_reg *)data;
 2754                 edata->val = pi->nqsets;
 2755                 break;
 2756         }
 2757         case CHELSIO_LOAD_FW: {
 2758                 uint8_t *fw_data;
 2759                 uint32_t vers;
 2760                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2761 
 2762                 /*
 2763                  * You're allowed to load a firmware only before FULL_INIT_DONE
 2764                  *
 2765                  * FW_UPTODATE is also set so the rest of the initialization
 2766                  * will not overwrite what was loaded here.  This gives you the
 2767                  * flexibility to load any firmware (and maybe shoot yourself in
 2768                  * the foot).
 2769                  */
 2770 
 2771                 ADAPTER_LOCK(sc);
 2772                 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
 2773                         ADAPTER_UNLOCK(sc);
 2774                         return (EBUSY);
 2775                 }
 2776 
 2777                 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2778                 if (!fw_data)
 2779                         error = ENOMEM;
 2780                 else
 2781                         error = copyin(t->buf, fw_data, t->len);
 2782 
 2783                 if (!error)
 2784                         error = -t3_load_fw(sc, fw_data, t->len);
 2785 
 2786                 if (t3_get_fw_version(sc, &vers) == 0) {
 2787                         snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
 2788                             "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
 2789                             G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
 2790                 }
 2791 
 2792                 if (!error)
 2793                         sc->flags |= FW_UPTODATE;
 2794 
 2795                 free(fw_data, M_DEVBUF);
 2796                 ADAPTER_UNLOCK(sc);
 2797                 break;
 2798         }
 2799         case CHELSIO_LOAD_BOOT: {
 2800                 uint8_t *boot_data;
 2801                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2802 
 2803                 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2804                 if (!boot_data)
 2805                         return ENOMEM;
 2806 
 2807                 error = copyin(t->buf, boot_data, t->len);
 2808                 if (!error)
 2809                         error = -t3_load_boot(sc, boot_data, t->len);
 2810 
 2811                 free(boot_data, M_DEVBUF);
 2812                 break;
 2813         }
 2814         case CHELSIO_GET_PM: {
 2815                 struct ch_pm *m = (struct ch_pm *)data;
 2816                 struct tp_params *p = &sc->params.tp;
 2817 
 2818                 if (!is_offload(sc))
 2819                         return (EOPNOTSUPP);
 2820 
 2821                 m->tx_pg_sz = p->tx_pg_size;
 2822                 m->tx_num_pg = p->tx_num_pgs;
 2823                 m->rx_pg_sz  = p->rx_pg_size;
 2824                 m->rx_num_pg = p->rx_num_pgs;
 2825                 m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
 2826 
 2827                 break;
 2828         }
 2829         case CHELSIO_SET_PM: {
 2830                 struct ch_pm *m = (struct ch_pm *)data;
 2831                 struct tp_params *p = &sc->params.tp;
 2832 
 2833                 if (!is_offload(sc))
 2834                         return (EOPNOTSUPP);
 2835                 if (sc->flags & FULL_INIT_DONE)
 2836                         return (EBUSY);
 2837 
 2838                 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
 2839                     !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
 2840                         return (EINVAL);        /* not power of 2 */
 2841                 if (!(m->rx_pg_sz & 0x14000))
 2842                         return (EINVAL);        /* not 16KB or 64KB */
 2843                 if (!(m->tx_pg_sz & 0x1554000))
 2844                         return (EINVAL);
 2845                 if (m->tx_num_pg == -1)
 2846                         m->tx_num_pg = p->tx_num_pgs;
 2847                 if (m->rx_num_pg == -1)
 2848                         m->rx_num_pg = p->rx_num_pgs;
 2849                 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
 2850                         return (EINVAL);
 2851                 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
 2852                     m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
 2853                         return (EINVAL);
 2854 
 2855                 p->rx_pg_size = m->rx_pg_sz;
 2856                 p->tx_pg_size = m->tx_pg_sz;
 2857                 p->rx_num_pgs = m->rx_num_pg;
 2858                 p->tx_num_pgs = m->tx_num_pg;
 2859                 break;
 2860         }
 2861         case CHELSIO_SETMTUTAB: {
 2862                 struct ch_mtus *m = (struct ch_mtus *)data;
 2863                 int i;
 2864                 
 2865                 if (!is_offload(sc))
 2866                         return (EOPNOTSUPP);
 2867                 if (offload_running(sc))
 2868                         return (EBUSY);
 2869                 if (m->nmtus != NMTUS)
 2870                         return (EINVAL);
 2871                 if (m->mtus[0] < 81)         /* accommodate SACK */
 2872                         return (EINVAL);
 2873                 
 2874                 /*
 2875                  * MTUs must be in ascending order
 2876                  */
 2877                 for (i = 1; i < NMTUS; ++i)
 2878                         if (m->mtus[i] < m->mtus[i - 1])
 2879                                 return (EINVAL);
 2880 
 2881                 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
 2882                 break;
 2883         }
 2884         case CHELSIO_GETMTUTAB: {
 2885                 struct ch_mtus *m = (struct ch_mtus *)data;
 2886 
 2887                 if (!is_offload(sc))
 2888                         return (EOPNOTSUPP);
 2889 
 2890                 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
 2891                 m->nmtus = NMTUS;
 2892                 break;
 2893         }
 2894         case CHELSIO_GET_MEM: {
 2895                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2896                 struct mc7 *mem;
 2897                 uint8_t *useraddr;
 2898                 u64 buf[32];
 2899 
 2900                 /*
 2901                  * Use these to avoid modifying len/addr in the the return
 2902                  * struct
 2903                  */
 2904                 uint32_t len = t->len, addr = t->addr;
 2905 
 2906                 if (!is_offload(sc))
 2907                         return (EOPNOTSUPP);
 2908                 if (!(sc->flags & FULL_INIT_DONE))
 2909                         return (EIO);         /* need the memory controllers */
 2910                 if ((addr & 0x7) || (len & 0x7))
 2911                         return (EINVAL);
 2912                 if (t->mem_id == MEM_CM)
 2913                         mem = &sc->cm;
 2914                 else if (t->mem_id == MEM_PMRX)
 2915                         mem = &sc->pmrx;
 2916                 else if (t->mem_id == MEM_PMTX)
 2917                         mem = &sc->pmtx;
 2918                 else
 2919                         return (EINVAL);
 2920 
 2921                 /*
 2922                  * Version scheme:
 2923                  * bits 0..9: chip version
 2924                  * bits 10..15: chip revision
 2925                  */
 2926                 t->version = 3 | (sc->params.rev << 10);
 2927                 
 2928                 /*
 2929                  * Read 256 bytes at a time as len can be large and we don't
 2930                  * want to use huge intermediate buffers.
 2931                  */
 2932                 useraddr = (uint8_t *)t->buf; 
 2933                 while (len) {
 2934                         unsigned int chunk = min(len, sizeof(buf));
 2935 
 2936                         error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
 2937                         if (error)
 2938                                 return (-error);
 2939                         if (copyout(buf, useraddr, chunk))
 2940                                 return (EFAULT);
 2941                         useraddr += chunk;
 2942                         addr += chunk;
 2943                         len -= chunk;
 2944                 }
 2945                 break;
 2946         }
 2947         case CHELSIO_READ_TCAM_WORD: {
 2948                 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
 2949 
 2950                 if (!is_offload(sc))
 2951                         return (EOPNOTSUPP);
 2952                 if (!(sc->flags & FULL_INIT_DONE))
 2953                         return (EIO);         /* need MC5 */            
 2954                 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
 2955                 break;
 2956         }
 2957         case CHELSIO_SET_TRACE_FILTER: {
 2958                 struct ch_trace *t = (struct ch_trace *)data;
 2959                 const struct trace_params *tp;
 2960 
 2961                 tp = (const struct trace_params *)&t->sip;
 2962                 if (t->config_tx)
 2963                         t3_config_trace_filter(sc, tp, 0, t->invert_match,
 2964                                                t->trace_tx);
 2965                 if (t->config_rx)
 2966                         t3_config_trace_filter(sc, tp, 1, t->invert_match,
 2967                                                t->trace_rx);
 2968                 break;
 2969         }
 2970         case CHELSIO_SET_PKTSCHED: {
 2971                 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
 2972                 if (sc->open_device_map == 0)
 2973                         return (EAGAIN);
 2974                 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
 2975                     p->binding);
 2976                 break;
 2977         }
 2978         case CHELSIO_IFCONF_GETREGS: {
 2979                 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
 2980                 int reglen = cxgb_get_regs_len();
 2981                 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
 2982                 if (buf == NULL) {
 2983                         return (ENOMEM);
 2984                 }
 2985                 if (regs->len > reglen)
 2986                         regs->len = reglen;
 2987                 else if (regs->len < reglen)
 2988                         error = ENOBUFS;
 2989 
 2990                 if (!error) {
 2991                         cxgb_get_regs(sc, regs, buf);
 2992                         error = copyout(buf, regs->data, reglen);
 2993                 }
 2994                 free(buf, M_DEVBUF);
 2995 
 2996                 break;
 2997         }
 2998         case CHELSIO_SET_HW_SCHED: {
 2999                 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
 3000                 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
 3001 
 3002                 if ((sc->flags & FULL_INIT_DONE) == 0)
 3003                         return (EAGAIN);       /* need TP to be initialized */
 3004                 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
 3005                     !in_range(t->channel, 0, 1) ||
 3006                     !in_range(t->kbps, 0, 10000000) ||
 3007                     !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
 3008                     !in_range(t->flow_ipg, 0,
 3009                               dack_ticks_to_usec(sc, 0x7ff)))
 3010                         return (EINVAL);
 3011 
 3012                 if (t->kbps >= 0) {
 3013                         error = t3_config_sched(sc, t->kbps, t->sched);
 3014                         if (error < 0)
 3015                                 return (-error);
 3016                 }
 3017                 if (t->class_ipg >= 0)
 3018                         t3_set_sched_ipg(sc, t->sched, t->class_ipg);
 3019                 if (t->flow_ipg >= 0) {
 3020                         t->flow_ipg *= 1000;     /* us -> ns */
 3021                         t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
 3022                 }
 3023                 if (t->mode >= 0) {
 3024                         int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
 3025 
 3026                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3027                                          bit, t->mode ? bit : 0);
 3028                 }
 3029                 if (t->channel >= 0)
 3030                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3031                                          1 << t->sched, t->channel << t->sched);
 3032                 break;
 3033         }
 3034         case CHELSIO_GET_EEPROM: {
 3035                 int i;
 3036                 struct ch_eeprom *e = (struct ch_eeprom *)data;
 3037                 uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
 3038 
 3039                 if (buf == NULL) {
 3040                         return (ENOMEM);
 3041                 }
 3042                 e->magic = EEPROM_MAGIC;
 3043                 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
 3044                         error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
 3045 
 3046                 if (!error)
 3047                         error = copyout(buf + e->offset, e->data, e->len);
 3048 
 3049                 free(buf, M_DEVBUF);
 3050                 break;
 3051         }
 3052         case CHELSIO_CLEAR_STATS: {
 3053                 if (!(sc->flags & FULL_INIT_DONE))
 3054                         return EAGAIN;
 3055 
 3056                 PORT_LOCK(pi);
 3057                 t3_mac_update_stats(&pi->mac);
 3058                 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
 3059                 PORT_UNLOCK(pi);
 3060                 break;
 3061         }
 3062         case CHELSIO_GET_UP_LA: {
 3063                 struct ch_up_la *la = (struct ch_up_la *)data;
 3064                 uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
 3065                 if (buf == NULL) {
 3066                         return (ENOMEM);
 3067                 }
 3068                 if (la->bufsize < LA_BUFSIZE)
 3069                         error = ENOBUFS;
 3070 
 3071                 if (!error)
 3072                         error = -t3_get_up_la(sc, &la->stopped, &la->idx,
 3073                                               &la->bufsize, buf);
 3074                 if (!error)
 3075                         error = copyout(buf, la->data, la->bufsize);
 3076 
 3077                 free(buf, M_DEVBUF);
 3078                 break;
 3079         }
 3080         case CHELSIO_GET_UP_IOQS: {
 3081                 struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
 3082                 uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
 3083                 uint32_t *v;
 3084 
 3085                 if (buf == NULL) {
 3086                         return (ENOMEM);
 3087                 }
 3088                 if (ioqs->bufsize < IOQS_BUFSIZE)
 3089                         error = ENOBUFS;
 3090 
 3091                 if (!error)
 3092                         error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
 3093 
 3094                 if (!error) {
 3095                         v = (uint32_t *)buf;
 3096 
 3097                         ioqs->bufsize -= 4 * sizeof(uint32_t);
 3098                         ioqs->ioq_rx_enable = *v++;
 3099                         ioqs->ioq_tx_enable = *v++;
 3100                         ioqs->ioq_rx_status = *v++;
 3101                         ioqs->ioq_tx_status = *v++;
 3102 
 3103                         error = copyout(v, ioqs->data, ioqs->bufsize);
 3104                 }
 3105 
 3106                 free(buf, M_DEVBUF);
 3107                 break;
 3108         }
 3109         default:
 3110                 return (EOPNOTSUPP);
 3111                 break;
 3112         }
 3113 
 3114         return (error);
 3115 }
 3116 
 3117 static __inline void
 3118 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
 3119     unsigned int end)
 3120 {
 3121         uint32_t *p = (uint32_t *)(buf + start);
 3122 
 3123         for ( ; start <= end; start += sizeof(uint32_t))
 3124                 *p++ = t3_read_reg(ap, start);
 3125 }
 3126 
 3127 #define T3_REGMAP_SIZE (3 * 1024)
 3128 static int
 3129 cxgb_get_regs_len(void)
 3130 {
 3131         return T3_REGMAP_SIZE;
 3132 }
 3133 
 3134 static void
 3135 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
 3136 {           
 3137         
 3138         /*
 3139          * Version scheme:
 3140          * bits 0..9: chip version
 3141          * bits 10..15: chip revision
 3142          * bit 31: set for PCIe cards
 3143          */
 3144         regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
 3145 
 3146         /*
 3147          * We skip the MAC statistics registers because they are clear-on-read.
 3148          * Also reading multi-register stats would need to synchronize with the
 3149          * periodic mac stats accumulation.  Hard to justify the complexity.
 3150          */
 3151         memset(buf, 0, cxgb_get_regs_len());
 3152         reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
 3153         reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
 3154         reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
 3155         reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
 3156         reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
 3157         reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
 3158                        XGM_REG(A_XGM_SERDES_STAT3, 1));
 3159         reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
 3160                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
 3161 }
 3162 
 3163 
 3164 MODULE_DEPEND(if_cxgb, cxgb_t3fw, 1, 1, 1);

Cache object: de4bc83074614a4b70b76d24993794a1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.