The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_main.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007-2009, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15 
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/7.4/sys/dev/cxgb/cxgb_main.c 215368 2010-11-16 04:40:03Z sobomax $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/bus.h>
   37 #include <sys/module.h>
   38 #include <sys/pciio.h>
   39 #include <sys/conf.h>
   40 #include <machine/bus.h>
   41 #include <machine/resource.h>
   42 #include <sys/bus_dma.h>
   43 #include <sys/ktr.h>
   44 #include <sys/rman.h>
   45 #include <sys/ioccom.h>
   46 #include <sys/mbuf.h>
   47 #include <sys/linker.h>
   48 #include <sys/firmware.h>
   49 #include <sys/socket.h>
   50 #include <sys/sockio.h>
   51 #include <sys/smp.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/syslog.h>
   54 #include <sys/queue.h>
   55 #include <sys/taskqueue.h>
   56 #include <sys/proc.h>
   57 
   58 #include <net/bpf.h>
   59 #include <net/ethernet.h>
   60 #include <net/if.h>
   61 #include <net/if_arp.h>
   62 #include <net/if_dl.h>
   63 #include <net/if_media.h>
   64 #include <net/if_types.h>
   65 #include <net/if_vlan_var.h>
   66 
   67 #include <netinet/in_systm.h>
   68 #include <netinet/in.h>
   69 #include <netinet/if_ether.h>
   70 #include <netinet/ip.h>
   71 #include <netinet/ip.h>
   72 #include <netinet/tcp.h>
   73 #include <netinet/udp.h>
   74 
   75 #include <dev/pci/pcireg.h>
   76 #include <dev/pci/pcivar.h>
   77 #include <dev/pci/pci_private.h>
   78 
   79 #ifdef CONFIG_DEFINED
   80 #include <cxgb_include.h>
   81 #else
   82 #include <dev/cxgb/cxgb_include.h>
   83 #endif
   84 
   85 #ifdef PRIV_SUPPORTED
   86 #include <sys/priv.h>
   87 #endif
   88 
   89 static int cxgb_setup_interrupts(adapter_t *);
   90 static void cxgb_teardown_interrupts(adapter_t *);
   91 static void cxgb_init(void *);
   92 static int cxgb_init_locked(struct port_info *);
   93 static int cxgb_uninit_locked(struct port_info *);
   94 static int cxgb_uninit_synchronized(struct port_info *);
   95 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
   96 static int cxgb_media_change(struct ifnet *);
   97 static int cxgb_ifm_type(int);
   98 static void cxgb_build_medialist(struct port_info *);
   99 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
  100 static int setup_sge_qsets(adapter_t *);
  101 static void cxgb_async_intr(void *);
  102 static void cxgb_tick_handler(void *, int);
  103 static void cxgb_tick(void *);
  104 static void link_check_callout(void *);
  105 static void check_link_status(void *, int);
  106 static void setup_rss(adapter_t *sc);
  107 static int alloc_filters(struct adapter *);
  108 static int setup_hw_filters(struct adapter *);
  109 static int set_filter(struct adapter *, int, const struct filter_info *);
  110 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
  111     unsigned int, u64, u64);
  112 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
  113     unsigned int, u64, u64);
  114 
  115 /* Attachment glue for the PCI controller end of the device.  Each port of
  116  * the device is attached separately, as defined later.
  117  */
  118 static int cxgb_controller_probe(device_t);
  119 static int cxgb_controller_attach(device_t);
  120 static int cxgb_controller_detach(device_t);
  121 static void cxgb_free(struct adapter *);
  122 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
  123     unsigned int end);
  124 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
  125 static int cxgb_get_regs_len(void);
  126 static int offload_open(struct port_info *pi);
  127 static void touch_bars(device_t dev);
  128 static int offload_close(struct t3cdev *tdev);
  129 static void cxgb_update_mac_settings(struct port_info *p);
  130 
  131 static device_method_t cxgb_controller_methods[] = {
  132         DEVMETHOD(device_probe,         cxgb_controller_probe),
  133         DEVMETHOD(device_attach,        cxgb_controller_attach),
  134         DEVMETHOD(device_detach,        cxgb_controller_detach),
  135 
  136         /* bus interface */
  137         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  138         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  139 
  140         { 0, 0 }
  141 };
  142 
  143 static driver_t cxgb_controller_driver = {
  144         "cxgbc",
  145         cxgb_controller_methods,
  146         sizeof(struct adapter)
  147 };
  148 
  149 static devclass_t       cxgb_controller_devclass;
  150 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
  151 
  152 /*
  153  * Attachment glue for the ports.  Attachment is done directly to the
  154  * controller device.
  155  */
  156 static int cxgb_port_probe(device_t);
  157 static int cxgb_port_attach(device_t);
  158 static int cxgb_port_detach(device_t);
  159 
  160 static device_method_t cxgb_port_methods[] = {
  161         DEVMETHOD(device_probe,         cxgb_port_probe),
  162         DEVMETHOD(device_attach,        cxgb_port_attach),
  163         DEVMETHOD(device_detach,        cxgb_port_detach),
  164         { 0, 0 }
  165 };
  166 
  167 static driver_t cxgb_port_driver = {
  168         "cxgb",
  169         cxgb_port_methods,
  170         0
  171 };
  172 
  173 static d_ioctl_t cxgb_extension_ioctl;
  174 static d_open_t cxgb_extension_open;
  175 static d_close_t cxgb_extension_close;
  176 
  177 static struct cdevsw cxgb_cdevsw = {
  178        .d_version =    D_VERSION,
  179        .d_flags =      0,
  180        .d_open =       cxgb_extension_open,
  181        .d_close =      cxgb_extension_close,
  182        .d_ioctl =      cxgb_extension_ioctl,
  183        .d_name =       "cxgb",
  184 };
  185 
  186 static devclass_t       cxgb_port_devclass;
  187 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
  188 
  189 /*
  190  * The driver uses the best interrupt scheme available on a platform in the
  191  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
  192  * of these schemes the driver may consider as follows:
  193  *
  194  * msi = 2: choose from among all three options
  195  * msi = 1 : only consider MSI and pin interrupts
  196  * msi = 0: force pin interrupts
  197  */
  198 static int msi_allowed = 2;
  199 
  200 TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
  201 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
  202 SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
  203     "MSI-X, MSI, INTx selector");
  204 
  205 /*
  206  * The driver enables offload as a default.
  207  * To disable it, use ofld_disable = 1.
  208  */
  209 static int ofld_disable = 0;
  210 TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
  211 SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
  212     "disable ULP offload");
  213 
  214 /*
  215  * The driver uses an auto-queue algorithm by default.
  216  * To disable it and force a single queue-set per port, use multiq = 0
  217  */
  218 static int multiq = 1;
  219 TUNABLE_INT("hw.cxgb.multiq", &multiq);
  220 SYSCTL_UINT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
  221     "use min(ncpus/ports, 8) queue-sets per port");
  222 
  223 /*
  224  * By default the driver will not update the firmware unless
  225  * it was compiled against a newer version
  226  * 
  227  */
  228 static int force_fw_update = 0;
  229 TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
  230 SYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
  231     "update firmware even if up to date");
  232 
  233 int cxgb_use_16k_clusters = 1;
  234 TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
  235 SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
  236     &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
  237 
  238 /*
  239  * Tune the size of the output queue.
  240  */
  241 int cxgb_snd_queue_len = IFQ_MAXLEN;
  242 TUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
  243 SYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
  244     &cxgb_snd_queue_len, 0, "send queue size ");
  245 
  246 static int nfilters = -1;
  247 TUNABLE_INT("hw.cxgb.nfilters", &nfilters);
  248 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
  249     &nfilters, 0, "max number of entries in the filter table");
  250 
  251 enum {
  252         MAX_TXQ_ENTRIES      = 16384,
  253         MAX_CTRL_TXQ_ENTRIES = 1024,
  254         MAX_RSPQ_ENTRIES     = 16384,
  255         MAX_RX_BUFFERS       = 16384,
  256         MAX_RX_JUMBO_BUFFERS = 16384,
  257         MIN_TXQ_ENTRIES      = 4,
  258         MIN_CTRL_TXQ_ENTRIES = 4,
  259         MIN_RSPQ_ENTRIES     = 32,
  260         MIN_FL_ENTRIES       = 32,
  261         MIN_FL_JUMBO_ENTRIES = 32
  262 };
  263 
  264 struct filter_info {
  265         u32 sip;
  266         u32 sip_mask;
  267         u32 dip;
  268         u16 sport;
  269         u16 dport;
  270         u32 vlan:12;
  271         u32 vlan_prio:3;
  272         u32 mac_hit:1;
  273         u32 mac_idx:4;
  274         u32 mac_vld:1;
  275         u32 pkt_type:2;
  276         u32 report_filter_id:1;
  277         u32 pass:1;
  278         u32 rss:1;
  279         u32 qset:3;
  280         u32 locked:1;
  281         u32 valid:1;
  282 };
  283 
  284 enum { FILTER_NO_VLAN_PRI = 7 };
  285 
  286 #define EEPROM_MAGIC 0x38E2F10C
  287 
  288 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  289 
  290 /* Table for probing the cards.  The desc field isn't actually used */
  291 struct cxgb_ident {
  292         uint16_t        vendor;
  293         uint16_t        device;
  294         int             index;
  295         char            *desc;
  296 } cxgb_identifiers[] = {
  297         {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
  298         {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
  299         {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
  300         {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
  301         {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
  302         {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
  303         {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
  304         {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
  305         {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
  306         {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
  307         {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
  308         {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
  309         {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
  310         {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
  311         {0, 0, 0, NULL}
  312 };
  313 
  314 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
  315 
  316 
  317 static __inline char
  318 t3rev2char(struct adapter *adapter)
  319 {
  320         char rev = 'z';
  321 
  322         switch(adapter->params.rev) {
  323         case T3_REV_A:
  324                 rev = 'a';
  325                 break;
  326         case T3_REV_B:
  327         case T3_REV_B2:
  328                 rev = 'b';
  329                 break;
  330         case T3_REV_C:
  331                 rev = 'c';
  332                 break;
  333         }
  334         return rev;
  335 }
  336 
  337 static struct cxgb_ident *
  338 cxgb_get_ident(device_t dev)
  339 {
  340         struct cxgb_ident *id;
  341 
  342         for (id = cxgb_identifiers; id->desc != NULL; id++) {
  343                 if ((id->vendor == pci_get_vendor(dev)) &&
  344                     (id->device == pci_get_device(dev))) {
  345                         return (id);
  346                 }
  347         }
  348         return (NULL);
  349 }
  350 
  351 static const struct adapter_info *
  352 cxgb_get_adapter_info(device_t dev)
  353 {
  354         struct cxgb_ident *id;
  355         const struct adapter_info *ai;
  356 
  357         id = cxgb_get_ident(dev);
  358         if (id == NULL)
  359                 return (NULL);
  360 
  361         ai = t3_get_adapter_info(id->index);
  362 
  363         return (ai);
  364 }
  365 
  366 static int
  367 cxgb_controller_probe(device_t dev)
  368 {
  369         const struct adapter_info *ai;
  370         char *ports, buf[80];
  371         int nports;
  372 
  373         ai = cxgb_get_adapter_info(dev);
  374         if (ai == NULL)
  375                 return (ENXIO);
  376 
  377         nports = ai->nports0 + ai->nports1;
  378         if (nports == 1)
  379                 ports = "port";
  380         else
  381                 ports = "ports";
  382 
  383         snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
  384         device_set_desc_copy(dev, buf);
  385         return (BUS_PROBE_DEFAULT);
  386 }
  387 
  388 #define FW_FNAME "cxgb_t3fw"
  389 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
  390 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
  391 
  392 static int
  393 upgrade_fw(adapter_t *sc)
  394 {
  395 #ifdef FIRMWARE_LATEST
  396         const struct firmware *fw;
  397 #else
  398         struct firmware *fw;
  399 #endif  
  400         int status;
  401         u32 vers;
  402         
  403         if ((fw = firmware_get(FW_FNAME)) == NULL)  {
  404                 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
  405                 return (ENOENT);
  406         } else
  407                 device_printf(sc->dev, "installing firmware on card\n");
  408         status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
  409 
  410         if (status != 0) {
  411                 device_printf(sc->dev, "failed to install firmware: %d\n",
  412                     status);
  413         } else {
  414                 t3_get_fw_version(sc, &vers);
  415                 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
  416                     G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
  417                     G_FW_VERSION_MICRO(vers));
  418         }
  419 
  420         firmware_put(fw, FIRMWARE_UNLOAD);
  421 
  422         return (status);        
  423 }
  424 
  425 /*
  426  * The cxgb_controller_attach function is responsible for the initial
  427  * bringup of the device.  Its responsibilities include:
  428  *
  429  *  1. Determine if the device supports MSI or MSI-X.
  430  *  2. Allocate bus resources so that we can access the Base Address Register
  431  *  3. Create and initialize mutexes for the controller and its control
  432  *     logic such as SGE and MDIO.
  433  *  4. Call hardware specific setup routine for the adapter as a whole.
  434  *  5. Allocate the BAR for doing MSI-X.
  435  *  6. Setup the line interrupt iff MSI-X is not supported.
  436  *  7. Create the driver's taskq.
  437  *  8. Start one task queue service thread.
  438  *  9. Check if the firmware and SRAM are up-to-date.  They will be
  439  *     auto-updated later (before FULL_INIT_DONE), if required.
  440  * 10. Create a child device for each MAC (port)
  441  * 11. Initialize T3 private state.
  442  * 12. Trigger the LED
  443  * 13. Setup offload iff supported.
  444  * 14. Reset/restart the tick callout.
  445  * 15. Attach sysctls
  446  *
  447  * NOTE: Any modification or deviation from this list MUST be reflected in
  448  * the above comment.  Failure to do so will result in problems on various
  449  * error conditions including link flapping.
  450  */
  451 static int
  452 cxgb_controller_attach(device_t dev)
  453 {
  454         device_t child;
  455         const struct adapter_info *ai;
  456         struct adapter *sc;
  457         int i, error = 0;
  458         uint32_t vers;
  459         int port_qsets = 1;
  460 #ifdef MSI_SUPPORTED
  461         int msi_needed, reg;
  462 #endif
  463         char buf[80];
  464 
  465         sc = device_get_softc(dev);
  466         sc->dev = dev;
  467         sc->msi_count = 0;
  468         ai = cxgb_get_adapter_info(dev);
  469 
  470         /*
  471          * XXX not really related but a recent addition
  472          */
  473 #ifdef MSI_SUPPORTED    
  474         /* find the PCIe link width and set max read request to 4KB*/
  475         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
  476                 uint16_t lnk;
  477 
  478                 lnk = pci_read_config(dev, reg + PCIR_EXPRESS_LINK_STA, 2);
  479                 sc->link_width = (lnk & PCIM_LINK_STA_WIDTH) >> 4;
  480                 if (sc->link_width < 8 &&
  481                     (ai->caps & SUPPORTED_10000baseT_Full)) {
  482                         device_printf(sc->dev,
  483                             "PCIe x%d Link, expect reduced performance\n",
  484                             sc->link_width);
  485                 }
  486 
  487                 pci_set_max_read_req(dev, 4096);
  488         }
  489 #endif
  490         touch_bars(dev);
  491         pci_enable_busmaster(dev);
  492         /*
  493          * Allocate the registers and make them available to the driver.
  494          * The registers that we care about for NIC mode are in BAR 0
  495          */
  496         sc->regs_rid = PCIR_BAR(0);
  497         if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  498             &sc->regs_rid, RF_ACTIVE)) == NULL) {
  499                 device_printf(dev, "Cannot allocate BAR region 0\n");
  500                 return (ENXIO);
  501         }
  502         sc->udbs_rid = PCIR_BAR(2);
  503         sc->udbs_res = NULL;
  504         if (is_offload(sc) &&
  505             ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  506                    &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
  507                 device_printf(dev, "Cannot allocate BAR region 1\n");
  508                 error = ENXIO;
  509                 goto out;
  510         }
  511 
  512         snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
  513             device_get_unit(dev));
  514         ADAPTER_LOCK_INIT(sc, sc->lockbuf);
  515 
  516         snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
  517             device_get_unit(dev));
  518         snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
  519             device_get_unit(dev));
  520         snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
  521             device_get_unit(dev));
  522         
  523         MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
  524         MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
  525         MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
  526         
  527         sc->bt = rman_get_bustag(sc->regs_res);
  528         sc->bh = rman_get_bushandle(sc->regs_res);
  529         sc->mmio_len = rman_get_size(sc->regs_res);
  530 
  531         for (i = 0; i < MAX_NPORTS; i++)
  532                 sc->port[i].adapter = sc;
  533 
  534         if (t3_prep_adapter(sc, ai, 1) < 0) {
  535                 printf("prep adapter failed\n");
  536                 error = ENODEV;
  537                 goto out;
  538         }
  539         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
  540          * enough messages for the queue sets.  If that fails, try falling
  541          * back to MSI.  If that fails, then try falling back to the legacy
  542          * interrupt pin model.
  543          */
  544 #ifdef MSI_SUPPORTED
  545 
  546         sc->msix_regs_rid = 0x20;
  547         if ((msi_allowed >= 2) &&
  548             (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  549             &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
  550 
  551                 if (multiq)
  552                         port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
  553                 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
  554 
  555                 if (pci_msix_count(dev) == 0 ||
  556                     (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
  557                     sc->msi_count != msi_needed) {
  558                         device_printf(dev, "alloc msix failed - "
  559                                       "msi_count=%d, msi_needed=%d, err=%d; "
  560                                       "will try MSI\n", sc->msi_count,
  561                                       msi_needed, error);
  562                         sc->msi_count = 0;
  563                         port_qsets = 1;
  564                         pci_release_msi(dev);
  565                         bus_release_resource(dev, SYS_RES_MEMORY,
  566                             sc->msix_regs_rid, sc->msix_regs_res);
  567                         sc->msix_regs_res = NULL;
  568                 } else {
  569                         sc->flags |= USING_MSIX;
  570                         sc->cxgb_intr = cxgb_async_intr;
  571                         device_printf(dev,
  572                                       "using MSI-X interrupts (%u vectors)\n",
  573                                       sc->msi_count);
  574                 }
  575         }
  576 
  577         if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
  578                 sc->msi_count = 1;
  579                 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
  580                         device_printf(dev, "alloc msi failed - "
  581                                       "err=%d; will try INTx\n", error);
  582                         sc->msi_count = 0;
  583                         port_qsets = 1;
  584                         pci_release_msi(dev);
  585                 } else {
  586                         sc->flags |= USING_MSI;
  587                         sc->cxgb_intr = t3_intr_msi;
  588                         device_printf(dev, "using MSI interrupts\n");
  589                 }
  590         }
  591 #endif
  592         if (sc->msi_count == 0) {
  593                 device_printf(dev, "using line interrupts\n");
  594                 sc->cxgb_intr = t3b_intr;
  595         }
  596 
  597         /* Create a private taskqueue thread for handling driver events */
  598 #ifdef TASKQUEUE_CURRENT        
  599         sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
  600             taskqueue_thread_enqueue, &sc->tq);
  601 #else
  602         sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
  603             taskqueue_thread_enqueue, &sc->tq);
  604 #endif  
  605         if (sc->tq == NULL) {
  606                 device_printf(dev, "failed to allocate controller task queue\n");
  607                 goto out;
  608         }
  609 
  610         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
  611             device_get_nameunit(dev));
  612         TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
  613 
  614         
  615         /* Create a periodic callout for checking adapter status */
  616         callout_init(&sc->cxgb_tick_ch, TRUE);
  617         
  618         if (t3_check_fw_version(sc) < 0 || force_fw_update) {
  619                 /*
  620                  * Warn user that a firmware update will be attempted in init.
  621                  */
  622                 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
  623                     FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
  624                 sc->flags &= ~FW_UPTODATE;
  625         } else {
  626                 sc->flags |= FW_UPTODATE;
  627         }
  628 
  629         if (t3_check_tpsram_version(sc) < 0) {
  630                 /*
  631                  * Warn user that a firmware update will be attempted in init.
  632                  */
  633                 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
  634                     t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
  635                 sc->flags &= ~TPS_UPTODATE;
  636         } else {
  637                 sc->flags |= TPS_UPTODATE;
  638         }
  639         
  640         /*
  641          * Create a child device for each MAC.  The ethernet attachment
  642          * will be done in these children.
  643          */     
  644         for (i = 0; i < (sc)->params.nports; i++) {
  645                 struct port_info *pi;
  646                 
  647                 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
  648                         device_printf(dev, "failed to add child port\n");
  649                         error = EINVAL;
  650                         goto out;
  651                 }
  652                 pi = &sc->port[i];
  653                 pi->adapter = sc;
  654                 pi->nqsets = port_qsets;
  655                 pi->first_qset = i*port_qsets;
  656                 pi->port_id = i;
  657                 pi->tx_chan = i >= ai->nports0;
  658                 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
  659                 sc->rxpkt_map[pi->txpkt_intf] = i;
  660                 sc->port[i].tx_chan = i >= ai->nports0;
  661                 sc->portdev[i] = child;
  662                 device_set_softc(child, pi);
  663         }
  664         if ((error = bus_generic_attach(dev)) != 0)
  665                 goto out;
  666 
  667         /* initialize sge private state */
  668         t3_sge_init_adapter(sc);
  669 
  670         t3_led_ready(sc);
  671         
  672         cxgb_offload_init();
  673         if (is_offload(sc)) {
  674                 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  675                 cxgb_adapter_ofld(sc);
  676         }
  677         error = t3_get_fw_version(sc, &vers);
  678         if (error)
  679                 goto out;
  680 
  681         snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
  682             G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
  683             G_FW_VERSION_MICRO(vers));
  684 
  685         snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
  686                  ai->desc, is_offload(sc) ? "R" : "",
  687                  sc->params.vpd.ec, sc->params.vpd.sn);
  688         device_set_desc_copy(dev, buf);
  689 
  690         snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
  691                  sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
  692                  sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
  693 
  694         device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
  695         callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
  696         t3_add_attach_sysctls(sc);
  697 out:
  698         if (error)
  699                 cxgb_free(sc);
  700 
  701         return (error);
  702 }
  703 
  704 /*
  705  * The cxgb_controller_detach routine is called with the device is
  706  * unloaded from the system.
  707  */
  708 
  709 static int
  710 cxgb_controller_detach(device_t dev)
  711 {
  712         struct adapter *sc;
  713 
  714         sc = device_get_softc(dev);
  715 
  716         cxgb_free(sc);
  717 
  718         return (0);
  719 }
  720 
  721 /*
  722  * The cxgb_free() is called by the cxgb_controller_detach() routine
  723  * to tear down the structures that were built up in
  724  * cxgb_controller_attach(), and should be the final piece of work
  725  * done when fully unloading the driver.
  726  * 
  727  *
  728  *  1. Shutting down the threads started by the cxgb_controller_attach()
  729  *     routine.
  730  *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
  731  *  3. Detaching all of the port devices created during the
  732  *     cxgb_controller_attach() routine.
  733  *  4. Removing the device children created via cxgb_controller_attach().
  734  *  5. Releasing PCI resources associated with the device.
  735  *  6. Turning off the offload support, iff it was turned on.
  736  *  7. Destroying the mutexes created in cxgb_controller_attach().
  737  *
  738  */
  739 static void
  740 cxgb_free(struct adapter *sc)
  741 {
  742         int i;
  743 
  744         ADAPTER_LOCK(sc);
  745         sc->flags |= CXGB_SHUTDOWN;
  746         ADAPTER_UNLOCK(sc);
  747 
  748         cxgb_pcpu_shutdown_threads(sc);
  749 
  750         t3_sge_deinit_sw(sc);
  751         /*
  752          * Make sure all child devices are gone.
  753          */
  754         bus_generic_detach(sc->dev);
  755         for (i = 0; i < (sc)->params.nports; i++) {
  756                 if (sc->portdev[i] &&
  757                     device_delete_child(sc->dev, sc->portdev[i]) != 0)
  758                         device_printf(sc->dev, "failed to delete child port\n");
  759         }
  760 
  761         /*
  762          * At this point, it is as if cxgb_port_detach has run on all ports, and
  763          * cxgb_down has run on the adapter.  All interrupts have been silenced,
  764          * all open devices have been closed.
  765          */
  766         KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
  767                                            __func__, sc->open_device_map));
  768         for (i = 0; i < sc->params.nports; i++) {
  769                 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
  770                                                   __func__, i));
  771         }
  772 
  773         /*
  774          * Finish off the adapter's callouts.
  775          */
  776         callout_drain(&sc->cxgb_tick_ch);
  777         callout_drain(&sc->sge_timer_ch);
  778 
  779         /*
  780          * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
  781          * sysctls are cleaned up by the kernel linker.
  782          */
  783         if (sc->flags & FULL_INIT_DONE) {
  784                 t3_free_sge_resources(sc);
  785                 sc->flags &= ~FULL_INIT_DONE;
  786         }
  787 
  788         /*
  789          * Release all interrupt resources.
  790          */
  791         cxgb_teardown_interrupts(sc);
  792 
  793 #ifdef MSI_SUPPORTED
  794         if (sc->flags & (USING_MSI | USING_MSIX)) {
  795                 device_printf(sc->dev, "releasing msi message(s)\n");
  796                 pci_release_msi(sc->dev);
  797         } else {
  798                 device_printf(sc->dev, "no msi message to release\n");
  799         }
  800 
  801         if (sc->msix_regs_res != NULL) {
  802                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
  803                     sc->msix_regs_res);
  804         }
  805 #endif
  806 
  807         /*
  808          * Free the adapter's taskqueue.
  809          */
  810         if (sc->tq != NULL) {
  811                 taskqueue_free(sc->tq);
  812                 sc->tq = NULL;
  813         }
  814         
  815         if (is_offload(sc)) {
  816                 clrbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  817                 cxgb_adapter_unofld(sc);
  818         }
  819 
  820 #ifdef notyet
  821         if (sc->flags & CXGB_OFLD_INIT)
  822                 cxgb_offload_deactivate(sc);
  823 #endif
  824         free(sc->filters, M_DEVBUF);
  825         t3_sge_free(sc);
  826 
  827         cxgb_offload_exit();
  828 
  829         if (sc->udbs_res != NULL)
  830                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
  831                     sc->udbs_res);
  832 
  833         if (sc->regs_res != NULL)
  834                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
  835                     sc->regs_res);
  836 
  837         MTX_DESTROY(&sc->mdio_lock);
  838         MTX_DESTROY(&sc->sge.reg_lock);
  839         MTX_DESTROY(&sc->elmer_lock);
  840         ADAPTER_LOCK_DEINIT(sc);
  841 }
  842 
  843 /**
  844  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
  845  *      @sc: the controller softc
  846  *
  847  *      Determines how many sets of SGE queues to use and initializes them.
  848  *      We support multiple queue sets per port if we have MSI-X, otherwise
  849  *      just one queue set per port.
  850  */
  851 static int
  852 setup_sge_qsets(adapter_t *sc)
  853 {
  854         int i, j, err, irq_idx = 0, qset_idx = 0;
  855         u_int ntxq = SGE_TXQ_PER_SET;
  856 
  857         if ((err = t3_sge_alloc(sc)) != 0) {
  858                 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
  859                 return (err);
  860         }
  861 
  862         if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
  863                 irq_idx = -1;
  864 
  865         for (i = 0; i < (sc)->params.nports; i++) {
  866                 struct port_info *pi = &sc->port[i];
  867 
  868                 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
  869                         err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
  870                             (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
  871                             &sc->params.sge.qset[qset_idx], ntxq, pi);
  872                         if (err) {
  873                                 t3_free_sge_resources(sc);
  874                                 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
  875                                     err);
  876                                 return (err);
  877                         }
  878                 }
  879         }
  880 
  881         return (0);
  882 }
  883 
  884 static void
  885 cxgb_teardown_interrupts(adapter_t *sc)
  886 {
  887         int i;
  888 
  889         for (i = 0; i < SGE_QSETS; i++) {
  890                 if (sc->msix_intr_tag[i] == NULL) {
  891 
  892                         /* Should have been setup fully or not at all */
  893                         KASSERT(sc->msix_irq_res[i] == NULL &&
  894                                 sc->msix_irq_rid[i] == 0,
  895                                 ("%s: half-done interrupt (%d).", __func__, i));
  896 
  897                         continue;
  898                 }
  899 
  900                 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
  901                                   sc->msix_intr_tag[i]);
  902                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
  903                                      sc->msix_irq_res[i]);
  904 
  905                 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
  906                 sc->msix_irq_rid[i] = 0;
  907         }
  908 
  909         if (sc->intr_tag) {
  910                 KASSERT(sc->irq_res != NULL,
  911                         ("%s: half-done interrupt.", __func__));
  912 
  913                 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
  914                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
  915                                      sc->irq_res);
  916 
  917                 sc->irq_res = sc->intr_tag = NULL;
  918                 sc->irq_rid = 0;
  919         }
  920 }
  921 
  922 static int
  923 cxgb_setup_interrupts(adapter_t *sc)
  924 {
  925         struct resource *res;
  926         void *tag;
  927         int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
  928 
  929         sc->irq_rid = intr_flag ? 1 : 0;
  930         sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
  931                                              RF_SHAREABLE | RF_ACTIVE);
  932         if (sc->irq_res == NULL) {
  933                 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
  934                               intr_flag, sc->irq_rid);
  935                 err = EINVAL;
  936                 sc->irq_rid = 0;
  937         } else {
  938                 err = bus_setup_intr(sc->dev, sc->irq_res,
  939                                      INTR_MPSAFE | INTR_TYPE_NET,
  940 #ifdef INTR_FILTERS
  941                                      NULL,
  942 #endif
  943                                      sc->cxgb_intr, sc, &sc->intr_tag);
  944 
  945                 if (err) {
  946                         device_printf(sc->dev,
  947                                       "Cannot set up interrupt (%x, %u, %d)\n",
  948                                       intr_flag, sc->irq_rid, err);
  949                         bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
  950                                              sc->irq_res);
  951                         sc->irq_res = sc->intr_tag = NULL;
  952                         sc->irq_rid = 0;
  953                 }
  954         }
  955 
  956         /* That's all for INTx or MSI */
  957         if (!(intr_flag & USING_MSIX) || err)
  958                 return (err);
  959 
  960         for (i = 0; i < sc->msi_count - 1; i++) {
  961                 rid = i + 2;
  962                 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
  963                                              RF_SHAREABLE | RF_ACTIVE);
  964                 if (res == NULL) {
  965                         device_printf(sc->dev, "Cannot allocate interrupt "
  966                                       "for message %d\n", rid);
  967                         err = EINVAL;
  968                         break;
  969                 }
  970 
  971                 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
  972 #ifdef INTR_FILTERS
  973                                      NULL,
  974 #endif
  975                                      t3_intr_msix, &sc->sge.qs[i], &tag);
  976                 if (err) {
  977                         device_printf(sc->dev, "Cannot set up interrupt "
  978                                       "for message %d (%d)\n", rid, err);
  979                         bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
  980                         break;
  981                 }
  982 
  983                 sc->msix_irq_rid[i] = rid;
  984                 sc->msix_irq_res[i] = res;
  985                 sc->msix_intr_tag[i] = tag;
  986         }
  987 
  988         if (err)
  989                 cxgb_teardown_interrupts(sc);
  990 
  991         return (err);
  992 }
  993 
  994 
  995 static int
  996 cxgb_port_probe(device_t dev)
  997 {
  998         struct port_info *p;
  999         char buf[80];
 1000         const char *desc;
 1001         
 1002         p = device_get_softc(dev);
 1003         desc = p->phy.desc;
 1004         snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
 1005         device_set_desc_copy(dev, buf);
 1006         return (0);
 1007 }
 1008 
 1009 
 1010 static int
 1011 cxgb_makedev(struct port_info *pi)
 1012 {
 1013         
 1014         pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
 1015             UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
 1016         
 1017         if (pi->port_cdev == NULL)
 1018                 return (ENOMEM);
 1019 
 1020         pi->port_cdev->si_drv1 = (void *)pi;
 1021         
 1022         return (0);
 1023 }
 1024 
 1025 #ifdef TSO_SUPPORTED
 1026 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_VLAN_HWTSO)
 1027 /* Don't enable TSO6 yet */
 1028 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_VLAN_HWTSO)
 1029 #else
 1030 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
 1031 /* Don't enable TSO6 yet */
 1032 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
 1033 #define IFCAP_TSO4 0x0
 1034 #define IFCAP_TSO6 0x0
 1035 #define CSUM_TSO   0x0
 1036 #endif
 1037 
 1038 
 1039 static int
 1040 cxgb_port_attach(device_t dev)
 1041 {
 1042         struct port_info *p;
 1043         struct ifnet *ifp;
 1044         int err;
 1045         struct adapter *sc;
 1046         
 1047         
 1048         p = device_get_softc(dev);
 1049         sc = p->adapter;
 1050         snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
 1051             device_get_unit(device_get_parent(dev)), p->port_id);
 1052         PORT_LOCK_INIT(p, p->lockbuf);
 1053 
 1054         callout_init(&p->link_check_ch, CALLOUT_MPSAFE);
 1055         TASK_INIT(&p->link_check_task, 0, check_link_status, p);
 1056 
 1057         /* Allocate an ifnet object and set it up */
 1058         ifp = p->ifp = if_alloc(IFT_ETHER);
 1059         if (ifp == NULL) {
 1060                 device_printf(dev, "Cannot allocate ifnet\n");
 1061                 return (ENOMEM);
 1062         }
 1063         
 1064         /*
 1065          * Note that there is currently no watchdog timer.
 1066          */
 1067         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1068         ifp->if_init = cxgb_init;
 1069         ifp->if_softc = p;
 1070         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1071         ifp->if_ioctl = cxgb_ioctl;
 1072         ifp->if_start = cxgb_start;
 1073 
 1074 
 1075         ifp->if_timer = 0;      /* Disable ifnet watchdog */
 1076         ifp->if_watchdog = NULL;
 1077 
 1078         ifp->if_snd.ifq_drv_maxlen = max(cxgb_snd_queue_len, ifqmaxlen);
 1079         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
 1080         IFQ_SET_READY(&ifp->if_snd);
 1081 
 1082         ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
 1083         ifp->if_capabilities |= CXGB_CAP;
 1084         ifp->if_capenable |= CXGB_CAP_ENABLE;
 1085         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
 1086         /*
 1087          * disable TSO on 4-port - it isn't supported by the firmware yet
 1088          */     
 1089         if (sc->params.nports > 2) {
 1090                 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO);
 1091                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO);
 1092                 ifp->if_hwassist &= ~CSUM_TSO;
 1093         }
 1094 
 1095         ether_ifattach(ifp, p->hw_addr);
 1096 
 1097 #ifdef IFNET_MULTIQUEUE
 1098         ifp->if_transmit = cxgb_pcpu_transmit;
 1099 #endif
 1100         /*
 1101          * Only default to jumbo frames on 10GigE
 1102          */
 1103         if (p->adapter->params.nports <= 2)
 1104                 ifp->if_mtu = ETHERMTU_JUMBO;
 1105         if ((err = cxgb_makedev(p)) != 0) {
 1106                 printf("makedev failed %d\n", err);
 1107                 return (err);
 1108         }
 1109 
 1110         /* Create a list of media supported by this port */
 1111         ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
 1112             cxgb_media_status);
 1113         cxgb_build_medialist(p);
 1114       
 1115         t3_sge_init_port(p);
 1116 
 1117         return (err);
 1118 }
 1119 
 1120 /*
 1121  * cxgb_port_detach() is called via the device_detach methods when
 1122  * cxgb_free() calls the bus_generic_detach.  It is responsible for 
 1123  * removing the device from the view of the kernel, i.e. from all 
 1124  * interfaces lists etc.  This routine is only called when the driver is 
 1125  * being unloaded, not when the link goes down.
 1126  */
 1127 static int
 1128 cxgb_port_detach(device_t dev)
 1129 {
 1130         struct port_info *p;
 1131         struct adapter *sc;
 1132 
 1133         p = device_get_softc(dev);
 1134         sc = p->adapter;
 1135 
 1136         /* Tell cxgb_ioctl and if_init that the port is going away */
 1137         ADAPTER_LOCK(sc);
 1138         SET_DOOMED(p);
 1139         wakeup(&sc->flags);
 1140         while (IS_BUSY(sc))
 1141                 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
 1142         SET_BUSY(sc);
 1143         ADAPTER_UNLOCK(sc);
 1144 
 1145         if (p->port_cdev != NULL)
 1146                 destroy_dev(p->port_cdev);
 1147 
 1148         cxgb_uninit_synchronized(p);
 1149         ether_ifdetach(p->ifp);
 1150 
 1151         PORT_LOCK_DEINIT(p);
 1152         if_free(p->ifp);
 1153         p->ifp = NULL;
 1154 
 1155         ADAPTER_LOCK(sc);
 1156         CLR_BUSY(sc);
 1157         wakeup_one(&sc->flags);
 1158         ADAPTER_UNLOCK(sc);
 1159         return (0);
 1160 }
 1161 
 1162 void
 1163 t3_fatal_err(struct adapter *sc)
 1164 {
 1165         u_int fw_status[4];
 1166 
 1167         if (sc->flags & FULL_INIT_DONE) {
 1168                 t3_sge_stop(sc);
 1169                 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
 1170                 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
 1171                 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
 1172                 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
 1173                 t3_intr_disable(sc);
 1174         }
 1175         device_printf(sc->dev,"encountered fatal error, operation suspended\n");
 1176         if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
 1177                 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 1178                     fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
 1179 }
 1180 
 1181 int
 1182 t3_os_find_pci_capability(adapter_t *sc, int cap)
 1183 {
 1184         device_t dev;
 1185         struct pci_devinfo *dinfo;
 1186         pcicfgregs *cfg;
 1187         uint32_t status;
 1188         uint8_t ptr;
 1189 
 1190         dev = sc->dev;
 1191         dinfo = device_get_ivars(dev);
 1192         cfg = &dinfo->cfg;
 1193 
 1194         status = pci_read_config(dev, PCIR_STATUS, 2);
 1195         if (!(status & PCIM_STATUS_CAPPRESENT))
 1196                 return (0);
 1197 
 1198         switch (cfg->hdrtype & PCIM_HDRTYPE) {
 1199         case 0:
 1200         case 1:
 1201                 ptr = PCIR_CAP_PTR;
 1202                 break;
 1203         case 2:
 1204                 ptr = PCIR_CAP_PTR_2;
 1205                 break;
 1206         default:
 1207                 return (0);
 1208                 break;
 1209         }
 1210         ptr = pci_read_config(dev, ptr, 1);
 1211 
 1212         while (ptr != 0) {
 1213                 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
 1214                         return (ptr);
 1215                 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
 1216         }
 1217 
 1218         return (0);
 1219 }
 1220 
 1221 int
 1222 t3_os_pci_save_state(struct adapter *sc)
 1223 {
 1224         device_t dev;
 1225         struct pci_devinfo *dinfo;
 1226 
 1227         dev = sc->dev;
 1228         dinfo = device_get_ivars(dev);
 1229 
 1230         pci_cfg_save(dev, dinfo, 0);
 1231         return (0);
 1232 }
 1233 
 1234 int
 1235 t3_os_pci_restore_state(struct adapter *sc)
 1236 {
 1237         device_t dev;
 1238         struct pci_devinfo *dinfo;
 1239 
 1240         dev = sc->dev;
 1241         dinfo = device_get_ivars(dev);
 1242 
 1243         pci_cfg_restore(dev, dinfo);
 1244         return (0);
 1245 }
 1246 
 1247 /**
 1248  *      t3_os_link_changed - handle link status changes
 1249  *      @sc: the adapter associated with the link change
 1250  *      @port_id: the port index whose link status has changed
 1251  *      @link_status: the new status of the link
 1252  *      @speed: the new speed setting
 1253  *      @duplex: the new duplex setting
 1254  *      @fc: the new flow-control setting
 1255  *
 1256  *      This is the OS-dependent handler for link status changes.  The OS
 1257  *      neutral handler takes care of most of the processing for these events,
 1258  *      then calls this handler for any OS-specific processing.
 1259  */
 1260 void
 1261 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
 1262      int duplex, int fc, int mac_was_reset)
 1263 {
 1264         struct port_info *pi = &adapter->port[port_id];
 1265         struct ifnet *ifp = pi->ifp;
 1266 
 1267         /* no race with detach, so ifp should always be good */
 1268         KASSERT(ifp, ("%s: if detached.", __func__));
 1269 
 1270         /* Reapply mac settings if they were lost due to a reset */
 1271         if (mac_was_reset) {
 1272                 PORT_LOCK(pi);
 1273                 cxgb_update_mac_settings(pi);
 1274                 PORT_UNLOCK(pi);
 1275         }
 1276 
 1277         if (link_status) {
 1278                 ifp->if_baudrate = IF_Mbps(speed);
 1279                 if_link_state_change(ifp, LINK_STATE_UP);
 1280         } else
 1281                 if_link_state_change(ifp, LINK_STATE_DOWN);
 1282 }
 1283 
 1284 /**
 1285  *      t3_os_phymod_changed - handle PHY module changes
 1286  *      @phy: the PHY reporting the module change
 1287  *      @mod_type: new module type
 1288  *
 1289  *      This is the OS-dependent handler for PHY module changes.  It is
 1290  *      invoked when a PHY module is removed or inserted for any OS-specific
 1291  *      processing.
 1292  */
 1293 void t3_os_phymod_changed(struct adapter *adap, int port_id)
 1294 {
 1295         static const char *mod_str[] = {
 1296                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
 1297         };
 1298         struct port_info *pi = &adap->port[port_id];
 1299         int mod = pi->phy.modtype;
 1300 
 1301         if (mod != pi->media.ifm_cur->ifm_data)
 1302                 cxgb_build_medialist(pi);
 1303 
 1304         if (mod == phy_modtype_none)
 1305                 if_printf(pi->ifp, "PHY module unplugged\n");
 1306         else {
 1307                 KASSERT(mod < ARRAY_SIZE(mod_str),
 1308                         ("invalid PHY module type %d", mod));
 1309                 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
 1310         }
 1311 }
 1312 
 1313 void
 1314 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
 1315 {
 1316 
 1317         /*
 1318          * The ifnet might not be allocated before this gets called,
 1319          * as this is called early on in attach by t3_prep_adapter
 1320          * save the address off in the port structure
 1321          */
 1322         if (cxgb_debug)
 1323                 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
 1324         bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
 1325 }
 1326 
 1327 /*
 1328  * Programs the XGMAC based on the settings in the ifnet.  These settings
 1329  * include MTU, MAC address, mcast addresses, etc.
 1330  */
 1331 static void
 1332 cxgb_update_mac_settings(struct port_info *p)
 1333 {
 1334         struct ifnet *ifp = p->ifp;
 1335         struct t3_rx_mode rm;
 1336         struct cmac *mac = &p->mac;
 1337         int mtu, hwtagging;
 1338 
 1339         PORT_LOCK_ASSERT_OWNED(p);
 1340 
 1341         bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
 1342 
 1343         mtu = ifp->if_mtu;
 1344         if (ifp->if_capenable & IFCAP_VLAN_MTU)
 1345                 mtu += ETHER_VLAN_ENCAP_LEN;
 1346 
 1347         hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
 1348 
 1349         t3_mac_set_mtu(mac, mtu);
 1350         t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
 1351         t3_mac_set_address(mac, 0, p->hw_addr);
 1352         t3_init_rx_mode(&rm, p);
 1353         t3_mac_set_rx_mode(mac, &rm);
 1354 }
 1355 
 1356 static int
 1357 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 1358                               unsigned long n)
 1359 {
 1360         int attempts = 5;
 1361 
 1362         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 1363                 if (!--attempts)
 1364                         return (ETIMEDOUT);
 1365                 t3_os_sleep(10);
 1366         }
 1367         return 0;
 1368 }
 1369 
 1370 static int
 1371 init_tp_parity(struct adapter *adap)
 1372 {
 1373         int i;
 1374         struct mbuf *m;
 1375         struct cpl_set_tcb_field *greq;
 1376         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 1377 
 1378         t3_tp_set_offload_mode(adap, 1);
 1379 
 1380         for (i = 0; i < 16; i++) {
 1381                 struct cpl_smt_write_req *req;
 1382 
 1383                 m = m_gethdr(M_WAITOK, MT_DATA);
 1384                 req = mtod(m, struct cpl_smt_write_req *);
 1385                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1386                 memset(req, 0, sizeof(*req));
 1387                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1388                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 1389                 req->iff = i;
 1390                 t3_mgmt_tx(adap, m);
 1391         }
 1392 
 1393         for (i = 0; i < 2048; i++) {
 1394                 struct cpl_l2t_write_req *req;
 1395 
 1396                 m = m_gethdr(M_WAITOK, MT_DATA);
 1397                 req = mtod(m, struct cpl_l2t_write_req *);
 1398                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1399                 memset(req, 0, sizeof(*req));
 1400                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1401                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 1402                 req->params = htonl(V_L2T_W_IDX(i));
 1403                 t3_mgmt_tx(adap, m);
 1404         }
 1405 
 1406         for (i = 0; i < 2048; i++) {
 1407                 struct cpl_rte_write_req *req;
 1408 
 1409                 m = m_gethdr(M_WAITOK, MT_DATA);
 1410                 req = mtod(m, struct cpl_rte_write_req *);
 1411                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1412                 memset(req, 0, sizeof(*req));
 1413                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1414                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 1415                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
 1416                 t3_mgmt_tx(adap, m);
 1417         }
 1418 
 1419         m = m_gethdr(M_WAITOK, MT_DATA);
 1420         greq = mtod(m, struct cpl_set_tcb_field *);
 1421         m->m_len = m->m_pkthdr.len = sizeof(*greq);
 1422         memset(greq, 0, sizeof(*greq));
 1423         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1424         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 1425         greq->mask = htobe64(1);
 1426         t3_mgmt_tx(adap, m);
 1427 
 1428         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 1429         t3_tp_set_offload_mode(adap, 0);
 1430         return (i);
 1431 }
 1432 
 1433 /**
 1434  *      setup_rss - configure Receive Side Steering (per-queue connection demux) 
 1435  *      @adap: the adapter
 1436  *
 1437  *      Sets up RSS to distribute packets to multiple receive queues.  We
 1438  *      configure the RSS CPU lookup table to distribute to the number of HW
 1439  *      receive queues, and the response queue lookup table to narrow that
 1440  *      down to the response queues actually configured for each port.
 1441  *      We always configure the RSS mapping for two ports since the mapping
 1442  *      table has plenty of entries.
 1443  */
 1444 static void
 1445 setup_rss(adapter_t *adap)
 1446 {
 1447         int i;
 1448         u_int nq[2]; 
 1449         uint8_t cpus[SGE_QSETS + 1];
 1450         uint16_t rspq_map[RSS_TABLE_SIZE];
 1451         
 1452         for (i = 0; i < SGE_QSETS; ++i)
 1453                 cpus[i] = i;
 1454         cpus[SGE_QSETS] = 0xff;
 1455 
 1456         nq[0] = nq[1] = 0;
 1457         for_each_port(adap, i) {
 1458                 const struct port_info *pi = adap2pinfo(adap, i);
 1459 
 1460                 nq[pi->tx_chan] += pi->nqsets;
 1461         }
 1462         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 1463                 rspq_map[i] = nq[0] ? i % nq[0] : 0;
 1464                 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
 1465         }
 1466 
 1467         /* Calculate the reverse RSS map table */
 1468         for (i = 0; i < SGE_QSETS; ++i)
 1469                 adap->rrss_map[i] = 0xff;
 1470         for (i = 0; i < RSS_TABLE_SIZE; ++i)
 1471                 if (adap->rrss_map[rspq_map[i]] == 0xff)
 1472                         adap->rrss_map[rspq_map[i]] = i;
 1473 
 1474         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 1475                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
 1476                       F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
 1477                       cpus, rspq_map);
 1478 
 1479 }
 1480 
 1481 /*
 1482  * Sends an mbuf to an offload queue driver
 1483  * after dealing with any active network taps.
 1484  */
 1485 static inline int
 1486 offload_tx(struct t3cdev *tdev, struct mbuf *m)
 1487 {
 1488         int ret;
 1489 
 1490         ret = t3_offload_tx(tdev, m);
 1491         return (ret);
 1492 }
 1493 
 1494 static int
 1495 write_smt_entry(struct adapter *adapter, int idx)
 1496 {
 1497         struct port_info *pi = &adapter->port[idx];
 1498         struct cpl_smt_write_req *req;
 1499         struct mbuf *m;
 1500 
 1501         if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
 1502                 return (ENOMEM);
 1503 
 1504         req = mtod(m, struct cpl_smt_write_req *);
 1505         m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
 1506         
 1507         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1508         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 1509         req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
 1510         req->iff = idx;
 1511         memset(req->src_mac1, 0, sizeof(req->src_mac1));
 1512         memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
 1513 
 1514         m_set_priority(m, 1);
 1515 
 1516         offload_tx(&adapter->tdev, m);
 1517 
 1518         return (0);
 1519 }
 1520 
 1521 static int
 1522 init_smt(struct adapter *adapter)
 1523 {
 1524         int i;
 1525 
 1526         for_each_port(adapter, i)
 1527                 write_smt_entry(adapter, i);
 1528         return 0;
 1529 }
 1530 
 1531 static void
 1532 init_port_mtus(adapter_t *adapter)
 1533 {
 1534         unsigned int mtus = ETHERMTU | (ETHERMTU << 16);
 1535 
 1536         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 1537 }
 1538 
 1539 static void
 1540 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 1541                               int hi, int port)
 1542 {
 1543         struct mbuf *m;
 1544         struct mngt_pktsched_wr *req;
 1545 
 1546         m = m_gethdr(M_DONTWAIT, MT_DATA);
 1547         if (m) {        
 1548                 req = mtod(m, struct mngt_pktsched_wr *);
 1549                 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 1550                 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 1551                 req->sched = sched;
 1552                 req->idx = qidx;
 1553                 req->min = lo;
 1554                 req->max = hi;
 1555                 req->binding = port;
 1556                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1557                 t3_mgmt_tx(adap, m);
 1558         }
 1559 }
 1560 
 1561 static void
 1562 bind_qsets(adapter_t *sc)
 1563 {
 1564         int i, j;
 1565 
 1566         cxgb_pcpu_startup_threads(sc);
 1567         for (i = 0; i < (sc)->params.nports; ++i) {
 1568                 const struct port_info *pi = adap2pinfo(sc, i);
 1569 
 1570                 for (j = 0; j < pi->nqsets; ++j) {
 1571                         send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
 1572                                           -1, pi->tx_chan);
 1573 
 1574                 }
 1575         }
 1576 }
 1577 
 1578 static void
 1579 update_tpeeprom(struct adapter *adap)
 1580 {
 1581 #ifdef FIRMWARE_LATEST
 1582         const struct firmware *tpeeprom;
 1583 #else
 1584         struct firmware *tpeeprom;
 1585 #endif
 1586 
 1587         uint32_t version;
 1588         unsigned int major, minor;
 1589         int ret, len;
 1590         char rev, name[32];
 1591 
 1592         t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
 1593 
 1594         major = G_TP_VERSION_MAJOR(version);
 1595         minor = G_TP_VERSION_MINOR(version);
 1596         if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
 1597                 return; 
 1598 
 1599         rev = t3rev2char(adap);
 1600         snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
 1601 
 1602         tpeeprom = firmware_get(name);
 1603         if (tpeeprom == NULL) {
 1604                 device_printf(adap->dev,
 1605                               "could not load TP EEPROM: unable to load %s\n",
 1606                               name);
 1607                 return;
 1608         }
 1609 
 1610         len = tpeeprom->datasize - 4;
 1611         
 1612         ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
 1613         if (ret)
 1614                 goto release_tpeeprom;
 1615 
 1616         if (len != TP_SRAM_LEN) {
 1617                 device_printf(adap->dev,
 1618                               "%s length is wrong len=%d expected=%d\n", name,
 1619                               len, TP_SRAM_LEN);
 1620                 return;
 1621         }
 1622         
 1623         ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
 1624             TP_SRAM_OFFSET);
 1625         
 1626         if (!ret) {
 1627                 device_printf(adap->dev,
 1628                         "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
 1629                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 1630         } else 
 1631                 device_printf(adap->dev,
 1632                               "Protocol SRAM image update in EEPROM failed\n");
 1633 
 1634 release_tpeeprom:
 1635         firmware_put(tpeeprom, FIRMWARE_UNLOAD);
 1636         
 1637         return;
 1638 }
 1639 
 1640 static int
 1641 update_tpsram(struct adapter *adap)
 1642 {
 1643 #ifdef FIRMWARE_LATEST
 1644         const struct firmware *tpsram;
 1645 #else
 1646         struct firmware *tpsram;
 1647 #endif  
 1648         int ret;
 1649         char rev, name[32];
 1650 
 1651         rev = t3rev2char(adap);
 1652         snprintf(name, sizeof(name), TPSRAM_NAME, rev);
 1653 
 1654         update_tpeeprom(adap);
 1655 
 1656         tpsram = firmware_get(name);
 1657         if (tpsram == NULL){
 1658                 device_printf(adap->dev, "could not load TP SRAM\n");
 1659                 return (EINVAL);
 1660         } else
 1661                 device_printf(adap->dev, "updating TP SRAM\n");
 1662         
 1663         ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
 1664         if (ret)
 1665                 goto release_tpsram;    
 1666 
 1667         ret = t3_set_proto_sram(adap, tpsram->data);
 1668         if (ret)
 1669                 device_printf(adap->dev, "loading protocol SRAM failed\n");
 1670 
 1671 release_tpsram:
 1672         firmware_put(tpsram, FIRMWARE_UNLOAD);
 1673         
 1674         return ret;
 1675 }
 1676 
 1677 /**
 1678  *      cxgb_up - enable the adapter
 1679  *      @adap: adapter being enabled
 1680  *
 1681  *      Called when the first port is enabled, this function performs the
 1682  *      actions necessary to make an adapter operational, such as completing
 1683  *      the initialization of HW modules, and enabling interrupts.
 1684  */
 1685 static int
 1686 cxgb_up(struct adapter *sc)
 1687 {
 1688         int err = 0;
 1689         unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
 1690 
 1691         KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
 1692                                            __func__, sc->open_device_map));
 1693 
 1694         if ((sc->flags & FULL_INIT_DONE) == 0) {
 1695 
 1696                 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1697 
 1698                 if ((sc->flags & FW_UPTODATE) == 0)
 1699                         if ((err = upgrade_fw(sc)))
 1700                                 goto out;
 1701 
 1702                 if ((sc->flags & TPS_UPTODATE) == 0)
 1703                         if ((err = update_tpsram(sc)))
 1704                                 goto out;
 1705 
 1706                 if (is_offload(sc) && nfilters != 0) {
 1707                         sc->params.mc5.nservers = 0;
 1708 
 1709                         if (nfilters < 0)
 1710                                 sc->params.mc5.nfilters = mxf;
 1711                         else
 1712                                 sc->params.mc5.nfilters = min(nfilters, mxf);
 1713                 }
 1714 
 1715                 err = t3_init_hw(sc, 0);
 1716                 if (err)
 1717                         goto out;
 1718 
 1719                 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
 1720                 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 1721 
 1722                 err = setup_sge_qsets(sc);
 1723                 if (err)
 1724                         goto out;
 1725 
 1726                 alloc_filters(sc);
 1727                 setup_rss(sc);
 1728 
 1729                 t3_intr_clear(sc);
 1730                 err = cxgb_setup_interrupts(sc);
 1731                 if (err)
 1732                         goto out;
 1733 
 1734                 t3_add_configured_sysctls(sc);
 1735                 sc->flags |= FULL_INIT_DONE;
 1736         }
 1737 
 1738         t3_intr_clear(sc);
 1739         t3_sge_start(sc);
 1740         t3_intr_enable(sc);
 1741 
 1742         if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
 1743             is_offload(sc) && init_tp_parity(sc) == 0)
 1744                 sc->flags |= TP_PARITY_INIT;
 1745 
 1746         if (sc->flags & TP_PARITY_INIT) {
 1747                 t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
 1748                 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
 1749         }
 1750         
 1751         if (!(sc->flags & QUEUES_BOUND)) {
 1752                 bind_qsets(sc);
 1753                 setup_hw_filters(sc);
 1754                 sc->flags |= QUEUES_BOUND;              
 1755         }
 1756 
 1757         t3_sge_reset_adapter(sc);
 1758 out:
 1759         return (err);
 1760 }
 1761 
 1762 /*
 1763  * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
 1764  * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
 1765  * during controller_detach, not here.
 1766  */
 1767 static void
 1768 cxgb_down(struct adapter *sc)
 1769 {
 1770         t3_sge_stop(sc);
 1771         t3_intr_disable(sc);
 1772 }
 1773 
 1774 static int
 1775 offload_open(struct port_info *pi)
 1776 {
 1777         struct adapter *sc = pi->adapter;
 1778         struct t3cdev *tdev = &sc->tdev;
 1779 
 1780         setbit(&sc->open_device_map, OFFLOAD_DEVMAP_BIT);
 1781 
 1782         t3_tp_set_offload_mode(sc, 1);
 1783         tdev->lldev = pi->ifp;
 1784         init_port_mtus(sc);
 1785         t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
 1786                      sc->params.rev == 0 ?  sc->port[0].ifp->if_mtu : 0xffff);
 1787         init_smt(sc);
 1788         cxgb_add_clients(tdev);
 1789 
 1790         return (0);
 1791 }
 1792 
 1793 static int
 1794 offload_close(struct t3cdev *tdev)
 1795 {
 1796         struct adapter *adapter = tdev2adap(tdev);
 1797 
 1798         if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
 1799                 return (0);
 1800 
 1801         /* Call back all registered clients */
 1802         cxgb_remove_clients(tdev);
 1803 
 1804         tdev->lldev = NULL;
 1805         cxgb_set_dummy_ops(tdev);
 1806         t3_tp_set_offload_mode(adapter, 0);
 1807 
 1808         clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
 1809 
 1810         return (0);
 1811 }
 1812 
 1813 /*
 1814  * if_init for cxgb ports.
 1815  */
 1816 static void
 1817 cxgb_init(void *arg)
 1818 {
 1819         struct port_info *p = arg;
 1820         struct adapter *sc = p->adapter;
 1821 
 1822         ADAPTER_LOCK(sc);
 1823         cxgb_init_locked(p); /* releases adapter lock */
 1824         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1825 }
 1826 
 1827 static int
 1828 cxgb_init_locked(struct port_info *p)
 1829 {
 1830         struct adapter *sc = p->adapter;
 1831         struct ifnet *ifp = p->ifp;
 1832         struct cmac *mac = &p->mac;
 1833         int rc = 0, may_sleep = 0, gave_up_lock = 0;
 1834 
 1835         ADAPTER_LOCK_ASSERT_OWNED(sc);
 1836 
 1837         while (!IS_DOOMED(p) && IS_BUSY(sc)) {
 1838                 gave_up_lock = 1;
 1839                 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
 1840                         rc = EINTR;
 1841                         goto done;
 1842                 }
 1843         }
 1844         if (IS_DOOMED(p)) {
 1845                 rc = ENXIO;
 1846                 goto done;
 1847         }
 1848         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 1849 
 1850         /*
 1851          * The code that runs during one-time adapter initialization can sleep
 1852          * so it's important not to hold any locks across it.
 1853          */
 1854         may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
 1855 
 1856         if (may_sleep) {
 1857                 SET_BUSY(sc);
 1858                 gave_up_lock = 1;
 1859                 ADAPTER_UNLOCK(sc);
 1860         }
 1861 
 1862         if (sc->open_device_map == 0) {
 1863                 if ((rc = cxgb_up(sc)) != 0)
 1864                         goto done;
 1865 
 1866                 if (is_offload(sc) && !ofld_disable && offload_open(p))
 1867                         log(LOG_WARNING,
 1868                             "Could not initialize offload capabilities\n");
 1869         }
 1870 
 1871         PORT_LOCK(p);
 1872         if (isset(&sc->open_device_map, p->port_id) &&
 1873             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1874                 PORT_UNLOCK(p);
 1875                 goto done;
 1876         }
 1877         t3_port_intr_enable(sc, p->port_id);
 1878         if (!mac->multiport) 
 1879                 t3_mac_init(mac);
 1880         cxgb_update_mac_settings(p);
 1881         t3_link_start(&p->phy, mac, &p->link_config);
 1882         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 1883         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1884         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1885         PORT_UNLOCK(p);
 1886 
 1887         /* all ok */
 1888         setbit(&sc->open_device_map, p->port_id);
 1889         callout_reset(&p->link_check_ch,
 1890             p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
 1891             link_check_callout, p);
 1892 
 1893 done:
 1894         if (may_sleep) {
 1895                 ADAPTER_LOCK(sc);
 1896                 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 1897                 CLR_BUSY(sc);
 1898         }
 1899         if (gave_up_lock)
 1900                 wakeup_one(&sc->flags);
 1901         ADAPTER_UNLOCK(sc);
 1902         return (rc);
 1903 }
 1904 
 1905 static int
 1906 cxgb_uninit_locked(struct port_info *p)
 1907 {
 1908         struct adapter *sc = p->adapter;
 1909         int rc;
 1910 
 1911         ADAPTER_LOCK_ASSERT_OWNED(sc);
 1912 
 1913         while (!IS_DOOMED(p) && IS_BUSY(sc)) {
 1914                 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
 1915                         rc = EINTR;
 1916                         goto done;
 1917                 }
 1918         }
 1919         if (IS_DOOMED(p)) {
 1920                 rc = ENXIO;
 1921                 goto done;
 1922         }
 1923         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 1924         SET_BUSY(sc);
 1925         ADAPTER_UNLOCK(sc);
 1926 
 1927         rc = cxgb_uninit_synchronized(p);
 1928 
 1929         ADAPTER_LOCK(sc);
 1930         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 1931         CLR_BUSY(sc);
 1932         wakeup_one(&sc->flags);
 1933 done:
 1934         ADAPTER_UNLOCK(sc);
 1935         return (rc);
 1936 }
 1937 
 1938 /*
 1939  * Called on "ifconfig down", and from port_detach
 1940  */
 1941 static int
 1942 cxgb_uninit_synchronized(struct port_info *pi)
 1943 {
 1944         struct adapter *sc = pi->adapter;
 1945         struct ifnet *ifp = pi->ifp;
 1946 
 1947         /*
 1948          * taskqueue_drain may cause a deadlock if the adapter lock is held.
 1949          */
 1950         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1951 
 1952         /*
 1953          * Clear this port's bit from the open device map, and then drain all
 1954          * the tasks that can access/manipulate this port's port_info or ifp.
 1955          * We disable this port's interrupts here and so the the slow/ext
 1956          * interrupt tasks won't be enqueued.  The tick task will continue to
 1957          * be enqueued every second but the runs after this drain will not see
 1958          * this port in the open device map.
 1959          *
 1960          * A well behaved task must take open_device_map into account and ignore
 1961          * ports that are not open.
 1962          */
 1963         clrbit(&sc->open_device_map, pi->port_id);
 1964         t3_port_intr_disable(sc, pi->port_id);
 1965         taskqueue_drain(sc->tq, &sc->slow_intr_task);
 1966         taskqueue_drain(sc->tq, &sc->tick_task);
 1967 
 1968         callout_drain(&pi->link_check_ch);
 1969         taskqueue_drain(sc->tq, &pi->link_check_task);
 1970 
 1971         PORT_LOCK(pi);
 1972         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1973 
 1974         /* disable pause frames */
 1975         t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
 1976 
 1977         /* Reset RX FIFO HWM */
 1978         t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
 1979                          V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
 1980 
 1981         DELAY(100 * 1000);
 1982 
 1983         /* Wait for TXFIFO empty */
 1984         t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
 1985                         F_TXFIFO_EMPTY, 1, 20, 5);
 1986 
 1987         DELAY(100 * 1000);
 1988         t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
 1989 
 1990 
 1991         pi->phy.ops->power_down(&pi->phy, 1);
 1992 
 1993         PORT_UNLOCK(pi);
 1994 
 1995         pi->link_config.link_ok = 0;
 1996         t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
 1997 
 1998         if ((sc->open_device_map & PORT_MASK) == 0)
 1999                 offload_close(&sc->tdev);
 2000 
 2001         if (sc->open_device_map == 0)
 2002                 cxgb_down(pi->adapter);
 2003 
 2004         return (0);
 2005 }
 2006 
 2007 /*
 2008  * Mark lro enabled or disabled in all qsets for this port
 2009  */
 2010 static int
 2011 cxgb_set_lro(struct port_info *p, int enabled)
 2012 {
 2013         int i;
 2014         struct adapter *adp = p->adapter;
 2015         struct sge_qset *q;
 2016 
 2017         for (i = 0; i < p->nqsets; i++) {
 2018                 q = &adp->sge.qs[p->first_qset + i];
 2019                 q->lro.enabled = (enabled != 0);
 2020         }
 2021         return (0);
 2022 }
 2023 
 2024 static int
 2025 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
 2026 {
 2027         struct port_info *p = ifp->if_softc;
 2028         struct adapter *sc = p->adapter;
 2029         struct ifreq *ifr = (struct ifreq *)data;
 2030         int flags, error = 0, mtu;
 2031         uint32_t mask;
 2032 
 2033         switch (command) {
 2034         case SIOCSIFMTU:
 2035                 ADAPTER_LOCK(sc);
 2036                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2037                 if (error) {
 2038 fail:
 2039                         ADAPTER_UNLOCK(sc);
 2040                         return (error);
 2041                 }
 2042 
 2043                 mtu = ifr->ifr_mtu;
 2044                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
 2045                         error = EINVAL;
 2046                 } else {
 2047                         ifp->if_mtu = mtu;
 2048                         PORT_LOCK(p);
 2049                         cxgb_update_mac_settings(p);
 2050                         PORT_UNLOCK(p);
 2051                 }
 2052                 ADAPTER_UNLOCK(sc);
 2053                 break;
 2054         case SIOCSIFFLAGS:
 2055                 ADAPTER_LOCK(sc);
 2056                 if (IS_DOOMED(p)) {
 2057                         error = ENXIO;
 2058                         goto fail;
 2059                 }
 2060                 if (ifp->if_flags & IFF_UP) {
 2061                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2062                                 flags = p->if_flags;
 2063                                 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
 2064                                     ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
 2065                                         if (IS_BUSY(sc)) {
 2066                                                 error = EBUSY;
 2067                                                 goto fail;
 2068                                         }
 2069                                         PORT_LOCK(p);
 2070                                         cxgb_update_mac_settings(p);
 2071                                         PORT_UNLOCK(p);
 2072                                 }
 2073                                 ADAPTER_UNLOCK(sc);
 2074                         } else
 2075                                 error = cxgb_init_locked(p);
 2076                         p->if_flags = ifp->if_flags;
 2077                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2078                         error = cxgb_uninit_locked(p);
 2079                 else
 2080                         ADAPTER_UNLOCK(sc);
 2081 
 2082                 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 2083                 break;
 2084         case SIOCADDMULTI:
 2085         case SIOCDELMULTI:
 2086                 ADAPTER_LOCK(sc);
 2087                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2088                 if (error)
 2089                         goto fail;
 2090 
 2091                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2092                         PORT_LOCK(p);
 2093                         cxgb_update_mac_settings(p);
 2094                         PORT_UNLOCK(p);
 2095                 }
 2096                 ADAPTER_UNLOCK(sc);
 2097 
 2098                 break;
 2099         case SIOCSIFCAP:
 2100                 ADAPTER_LOCK(sc);
 2101                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2102                 if (error)
 2103                         goto fail;
 2104 
 2105                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2106                 if (mask & IFCAP_TXCSUM) {
 2107                         ifp->if_capenable ^= IFCAP_TXCSUM;
 2108                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
 2109 
 2110                         if (IFCAP_TSO & ifp->if_capenable &&
 2111                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
 2112                                 ifp->if_capenable &= ~IFCAP_TSO;
 2113                                 ifp->if_hwassist &= ~CSUM_TSO;
 2114                                 if_printf(ifp,
 2115                                     "tso disabled due to -txcsum.\n");
 2116                         }
 2117                 }
 2118                 if (mask & IFCAP_RXCSUM)
 2119                         ifp->if_capenable ^= IFCAP_RXCSUM;
 2120                 if (mask & IFCAP_TSO4) {
 2121                         ifp->if_capenable ^= IFCAP_TSO4;
 2122 
 2123                         if (IFCAP_TSO & ifp->if_capenable) {
 2124                                 if (IFCAP_TXCSUM & ifp->if_capenable)
 2125                                         ifp->if_hwassist |= CSUM_TSO;
 2126                                 else {
 2127                                         ifp->if_capenable &= ~IFCAP_TSO;
 2128                                         ifp->if_hwassist &= ~CSUM_TSO;
 2129                                         if_printf(ifp,
 2130                                             "enable txcsum first.\n");
 2131                                         error = EAGAIN;
 2132                                 }
 2133                         } else
 2134                                 ifp->if_hwassist &= ~CSUM_TSO;
 2135                 }
 2136                 if (mask & IFCAP_LRO) {
 2137                         ifp->if_capenable ^= IFCAP_LRO;
 2138 
 2139                         /* Safe to do this even if cxgb_up not called yet */
 2140                         cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
 2141                 }
 2142                 if (mask & IFCAP_VLAN_HWTAGGING) {
 2143                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2144                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2145                                 PORT_LOCK(p);
 2146                                 cxgb_update_mac_settings(p);
 2147                                 PORT_UNLOCK(p);
 2148                         }
 2149                 }
 2150                 if (mask & IFCAP_VLAN_MTU) {
 2151                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
 2152                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2153                                 PORT_LOCK(p);
 2154                                 cxgb_update_mac_settings(p);
 2155                                 PORT_UNLOCK(p);
 2156                         }
 2157                 }
 2158                 if (mask & IFCAP_VLAN_HWTSO)
 2159                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 2160                 if (mask & IFCAP_VLAN_HWCSUM)
 2161                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 2162 
 2163 #ifdef VLAN_CAPABILITIES
 2164                 VLAN_CAPABILITIES(ifp);
 2165 #endif
 2166                 ADAPTER_UNLOCK(sc);
 2167                 break;
 2168         case SIOCSIFMEDIA:
 2169         case SIOCGIFMEDIA:
 2170                 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
 2171                 break;
 2172         default:
 2173                 error = ether_ioctl(ifp, command, data);
 2174         }
 2175 
 2176         return (error);
 2177 }
 2178 
 2179 static int
 2180 cxgb_media_change(struct ifnet *ifp)
 2181 {
 2182         return (EOPNOTSUPP);
 2183 }
 2184 
 2185 /*
 2186  * Translates phy->modtype to the correct Ethernet media subtype.
 2187  */
 2188 static int
 2189 cxgb_ifm_type(int mod)
 2190 {
 2191         switch (mod) {
 2192         case phy_modtype_sr:
 2193                 return (IFM_10G_SR);
 2194         case phy_modtype_lr:
 2195                 return (IFM_10G_LR);
 2196         case phy_modtype_lrm:
 2197                 return (IFM_10G_LRM);
 2198         case phy_modtype_twinax:
 2199                 return (IFM_10G_TWINAX);
 2200         case phy_modtype_twinax_long:
 2201                 return (IFM_10G_TWINAX_LONG);
 2202         case phy_modtype_none:
 2203                 return (IFM_NONE);
 2204         case phy_modtype_unknown:
 2205                 return (IFM_UNKNOWN);
 2206         }
 2207 
 2208         KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
 2209         return (IFM_UNKNOWN);
 2210 }
 2211 
 2212 /*
 2213  * Rebuilds the ifmedia list for this port, and sets the current media.
 2214  */
 2215 static void
 2216 cxgb_build_medialist(struct port_info *p)
 2217 {
 2218         struct cphy *phy = &p->phy;
 2219         struct ifmedia *media = &p->media;
 2220         int mod = phy->modtype;
 2221         int m = IFM_ETHER | IFM_FDX;
 2222 
 2223         PORT_LOCK(p);
 2224 
 2225         ifmedia_removeall(media);
 2226         if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
 2227                 /* Copper (RJ45) */
 2228 
 2229                 if (phy->caps & SUPPORTED_10000baseT_Full)
 2230                         ifmedia_add(media, m | IFM_10G_T, mod, NULL);
 2231 
 2232                 if (phy->caps & SUPPORTED_1000baseT_Full)
 2233                         ifmedia_add(media, m | IFM_1000_T, mod, NULL);
 2234 
 2235                 if (phy->caps & SUPPORTED_100baseT_Full)
 2236                         ifmedia_add(media, m | IFM_100_TX, mod, NULL);
 2237 
 2238                 if (phy->caps & SUPPORTED_10baseT_Full)
 2239                         ifmedia_add(media, m | IFM_10_T, mod, NULL);
 2240 
 2241                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
 2242                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
 2243 
 2244         } else if (phy->caps & SUPPORTED_TP) {
 2245                 /* Copper (CX4) */
 2246 
 2247                 KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
 2248                         ("%s: unexpected cap 0x%x", __func__, phy->caps));
 2249 
 2250                 ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
 2251                 ifmedia_set(media, m | IFM_10G_CX4);
 2252 
 2253         } else if (phy->caps & SUPPORTED_FIBRE &&
 2254                    phy->caps & SUPPORTED_10000baseT_Full) {
 2255                 /* 10G optical (but includes SFP+ twinax) */
 2256 
 2257                 m |= cxgb_ifm_type(mod);
 2258                 if (IFM_SUBTYPE(m) == IFM_NONE)
 2259                         m &= ~IFM_FDX;
 2260 
 2261                 ifmedia_add(media, m, mod, NULL);
 2262                 ifmedia_set(media, m);
 2263 
 2264         } else if (phy->caps & SUPPORTED_FIBRE &&
 2265                    phy->caps & SUPPORTED_1000baseT_Full) {
 2266                 /* 1G optical */
 2267 
 2268                 /* XXX: Lie and claim to be SX, could actually be any 1G-X */
 2269                 ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
 2270                 ifmedia_set(media, m | IFM_1000_SX);
 2271 
 2272         } else {
 2273                 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
 2274                             phy->caps));
 2275         }
 2276 
 2277         PORT_UNLOCK(p);
 2278 }
 2279 
 2280 static void
 2281 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 2282 {
 2283         struct port_info *p = ifp->if_softc;
 2284         struct ifmedia_entry *cur = p->media.ifm_cur;
 2285         int speed = p->link_config.speed;
 2286 
 2287         if (cur->ifm_data != p->phy.modtype) {
 2288                 cxgb_build_medialist(p);
 2289                 cur = p->media.ifm_cur;
 2290         }
 2291 
 2292         ifmr->ifm_status = IFM_AVALID;
 2293         if (!p->link_config.link_ok)
 2294                 return;
 2295 
 2296         ifmr->ifm_status |= IFM_ACTIVE;
 2297 
 2298         /*
 2299          * active and current will differ iff current media is autoselect.  That
 2300          * can happen only for copper RJ45.
 2301          */
 2302         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
 2303                 return;
 2304         KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
 2305                 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
 2306 
 2307         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
 2308         if (speed == SPEED_10000)
 2309                 ifmr->ifm_active |= IFM_10G_T;
 2310         else if (speed == SPEED_1000)
 2311                 ifmr->ifm_active |= IFM_1000_T;
 2312         else if (speed == SPEED_100)
 2313                 ifmr->ifm_active |= IFM_100_TX;
 2314         else if (speed == SPEED_10)
 2315                 ifmr->ifm_active |= IFM_10_T;
 2316         else
 2317                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
 2318                             speed));
 2319 }
 2320 
 2321 static void
 2322 cxgb_async_intr(void *data)
 2323 {
 2324         adapter_t *sc = data;
 2325 
 2326         t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
 2327         (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
 2328         taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
 2329 }
 2330 
 2331 static void
 2332 link_check_callout(void *arg)
 2333 {
 2334         struct port_info *pi = arg;
 2335         struct adapter *sc = pi->adapter;
 2336 
 2337         if (!isset(&sc->open_device_map, pi->port_id))
 2338                 return;
 2339 
 2340         taskqueue_enqueue(sc->tq, &pi->link_check_task);
 2341 }
 2342 
 2343 static void
 2344 check_link_status(void *arg, int pending)
 2345 {
 2346         struct port_info *pi = arg;
 2347         struct adapter *sc = pi->adapter;
 2348 
 2349         if (!isset(&sc->open_device_map, pi->port_id))
 2350                 return;
 2351 
 2352         t3_link_changed(sc, pi->port_id);
 2353 
 2354         if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ))
 2355                 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
 2356 }
 2357 
 2358 void
 2359 t3_os_link_intr(struct port_info *pi)
 2360 {
 2361         /*
 2362          * Schedule a link check in the near future.  If the link is flapping
 2363          * rapidly we'll keep resetting the callout and delaying the check until
 2364          * things stabilize a bit.
 2365          */
 2366         callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
 2367 }
 2368 
 2369 static void
 2370 check_t3b2_mac(struct adapter *sc)
 2371 {
 2372         int i;
 2373 
 2374         if (sc->flags & CXGB_SHUTDOWN)
 2375                 return;
 2376 
 2377         for_each_port(sc, i) {
 2378                 struct port_info *p = &sc->port[i];
 2379                 int status;
 2380 #ifdef INVARIANTS
 2381                 struct ifnet *ifp = p->ifp;
 2382 #endif          
 2383 
 2384                 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
 2385                     !p->link_config.link_ok)
 2386                         continue;
 2387 
 2388                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
 2389                         ("%s: state mismatch (drv_flags %x, device_map %x)",
 2390                          __func__, ifp->if_drv_flags, sc->open_device_map));
 2391 
 2392                 PORT_LOCK(p);
 2393                 status = t3b2_mac_watchdog_task(&p->mac);
 2394                 if (status == 1)
 2395                         p->mac.stats.num_toggled++;
 2396                 else if (status == 2) {
 2397                         struct cmac *mac = &p->mac;
 2398 
 2399                         cxgb_update_mac_settings(p);
 2400                         t3_link_start(&p->phy, mac, &p->link_config);
 2401                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 2402                         t3_port_intr_enable(sc, p->port_id);
 2403                         p->mac.stats.num_resets++;
 2404                 }
 2405                 PORT_UNLOCK(p);
 2406         }
 2407 }
 2408 
 2409 static void
 2410 cxgb_tick(void *arg)
 2411 {
 2412         adapter_t *sc = (adapter_t *)arg;
 2413 
 2414         if (sc->flags & CXGB_SHUTDOWN)
 2415                 return;
 2416 
 2417         taskqueue_enqueue(sc->tq, &sc->tick_task);      
 2418         callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
 2419 }
 2420 
 2421 static void
 2422 cxgb_tick_handler(void *arg, int count)
 2423 {
 2424         adapter_t *sc = (adapter_t *)arg;
 2425         const struct adapter_params *p = &sc->params;
 2426         int i;
 2427         uint32_t cause, reset;
 2428 
 2429         if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
 2430                 return;
 2431 
 2432         if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) 
 2433                 check_t3b2_mac(sc);
 2434 
 2435         cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
 2436         if (cause) {
 2437                 struct sge_qset *qs = &sc->sge.qs[0];
 2438                 uint32_t mask, v;
 2439 
 2440                 v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
 2441 
 2442                 mask = 1;
 2443                 for (i = 0; i < SGE_QSETS; i++) {
 2444                         if (v & mask)
 2445                                 qs[i].rspq.starved++;
 2446                         mask <<= 1;
 2447                 }
 2448 
 2449                 mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
 2450 
 2451                 for (i = 0; i < SGE_QSETS * 2; i++) {
 2452                         if (v & mask) {
 2453                                 qs[i / 2].fl[i % 2].empty++;
 2454                         }
 2455                         mask <<= 1;
 2456                 }
 2457 
 2458                 /* clear */
 2459                 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
 2460                 t3_write_reg(sc, A_SG_INT_CAUSE, cause);
 2461         }
 2462 
 2463         for (i = 0; i < sc->params.nports; i++) {
 2464                 struct port_info *pi = &sc->port[i];
 2465                 struct ifnet *ifp = pi->ifp;
 2466                 struct cmac *mac = &pi->mac;
 2467                 struct mac_stats *mstats = &mac->stats;
 2468 
 2469                 if (!isset(&sc->open_device_map, pi->port_id))
 2470                         continue;
 2471 
 2472                 PORT_LOCK(pi);
 2473                 t3_mac_update_stats(mac);
 2474                 PORT_UNLOCK(pi);
 2475 
 2476                 ifp->if_opackets =
 2477                     mstats->tx_frames_64 +
 2478                     mstats->tx_frames_65_127 +
 2479                     mstats->tx_frames_128_255 +
 2480                     mstats->tx_frames_256_511 +
 2481                     mstats->tx_frames_512_1023 +
 2482                     mstats->tx_frames_1024_1518 +
 2483                     mstats->tx_frames_1519_max;
 2484                 
 2485                 ifp->if_ipackets =
 2486                     mstats->rx_frames_64 +
 2487                     mstats->rx_frames_65_127 +
 2488                     mstats->rx_frames_128_255 +
 2489                     mstats->rx_frames_256_511 +
 2490                     mstats->rx_frames_512_1023 +
 2491                     mstats->rx_frames_1024_1518 +
 2492                     mstats->rx_frames_1519_max;
 2493 
 2494                 ifp->if_obytes = mstats->tx_octets;
 2495                 ifp->if_ibytes = mstats->rx_octets;
 2496                 ifp->if_omcasts = mstats->tx_mcast_frames;
 2497                 ifp->if_imcasts = mstats->rx_mcast_frames;
 2498                 
 2499                 ifp->if_collisions =
 2500                     mstats->tx_total_collisions;
 2501 
 2502                 ifp->if_iqdrops = mstats->rx_cong_drops;
 2503                 
 2504                 ifp->if_oerrors =
 2505                     mstats->tx_excess_collisions +
 2506                     mstats->tx_underrun +
 2507                     mstats->tx_len_errs +
 2508                     mstats->tx_mac_internal_errs +
 2509                     mstats->tx_excess_deferral +
 2510                     mstats->tx_fcs_errs;
 2511                 ifp->if_ierrors =
 2512                     mstats->rx_jabber +
 2513                     mstats->rx_data_errs +
 2514                     mstats->rx_sequence_errs +
 2515                     mstats->rx_runt + 
 2516                     mstats->rx_too_long +
 2517                     mstats->rx_mac_internal_errs +
 2518                     mstats->rx_short +
 2519                     mstats->rx_fcs_errs;
 2520 
 2521                 if (mac->multiport)
 2522                         continue;
 2523 
 2524                 /* Count rx fifo overflows, once per second */
 2525                 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
 2526                 reset = 0;
 2527                 if (cause & F_RXFIFO_OVERFLOW) {
 2528                         mac->stats.rx_fifo_ovfl++;
 2529                         reset |= F_RXFIFO_OVERFLOW;
 2530                 }
 2531                 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
 2532         }
 2533 }
 2534 
 2535 static void
 2536 touch_bars(device_t dev)
 2537 {
 2538         /*
 2539          * Don't enable yet
 2540          */
 2541 #if !defined(__LP64__) && 0
 2542         u32 v;
 2543 
 2544         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
 2545         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
 2546         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
 2547         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
 2548         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
 2549         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
 2550 #endif
 2551 }
 2552 
 2553 static int
 2554 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
 2555 {
 2556         uint8_t *buf;
 2557         int err = 0;
 2558         u32 aligned_offset, aligned_len, *p;
 2559         struct adapter *adapter = pi->adapter;
 2560 
 2561 
 2562         aligned_offset = offset & ~3;
 2563         aligned_len = (len + (offset & 3) + 3) & ~3;
 2564 
 2565         if (aligned_offset != offset || aligned_len != len) {
 2566                 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);              
 2567                 if (!buf)
 2568                         return (ENOMEM);
 2569                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
 2570                 if (!err && aligned_len > 4)
 2571                         err = t3_seeprom_read(adapter,
 2572                                               aligned_offset + aligned_len - 4,
 2573                                               (u32 *)&buf[aligned_len - 4]);
 2574                 if (err)
 2575                         goto out;
 2576                 memcpy(buf + (offset & 3), data, len);
 2577         } else
 2578                 buf = (uint8_t *)(uintptr_t)data;
 2579 
 2580         err = t3_seeprom_wp(adapter, 0);
 2581         if (err)
 2582                 goto out;
 2583 
 2584         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
 2585                 err = t3_seeprom_write(adapter, aligned_offset, *p);
 2586                 aligned_offset += 4;
 2587         }
 2588 
 2589         if (!err)
 2590                 err = t3_seeprom_wp(adapter, 1);
 2591 out:
 2592         if (buf != data)
 2593                 free(buf, M_DEVBUF);
 2594         return err;
 2595 }
 2596 
 2597 
 2598 static int
 2599 in_range(int val, int lo, int hi)
 2600 {
 2601         return val < 0 || (val <= hi && val >= lo);
 2602 }
 2603 
 2604 static int
 2605 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
 2606 {
 2607        return (0);
 2608 }
 2609 
 2610 static int
 2611 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
 2612 {
 2613        return (0);
 2614 }
 2615 
 2616 static int
 2617 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
 2618     int fflag, struct thread *td)
 2619 {
 2620         int mmd, error = 0;
 2621         struct port_info *pi = dev->si_drv1;
 2622         adapter_t *sc = pi->adapter;
 2623 
 2624 #ifdef PRIV_SUPPORTED   
 2625         if (priv_check(td, PRIV_DRIVER)) {
 2626                 if (cxgb_debug) 
 2627                         printf("user does not have access to privileged ioctls\n");
 2628                 return (EPERM);
 2629         }
 2630 #else
 2631         if (suser(td)) {
 2632                 if (cxgb_debug)
 2633                         printf("user does not have access to privileged ioctls\n");
 2634                 return (EPERM);
 2635         }
 2636 #endif
 2637         
 2638         switch (cmd) {
 2639         case CHELSIO_GET_MIIREG: {
 2640                 uint32_t val;
 2641                 struct cphy *phy = &pi->phy;
 2642                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2643                 
 2644                 if (!phy->mdio_read)
 2645                         return (EOPNOTSUPP);
 2646                 if (is_10G(sc)) {
 2647                         mmd = mid->phy_id >> 8;
 2648                         if (!mmd)
 2649                                 mmd = MDIO_DEV_PCS;
 2650                         else if (mmd > MDIO_DEV_VEND2)
 2651                                 return (EINVAL);
 2652 
 2653                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
 2654                                              mid->reg_num, &val);
 2655                 } else
 2656                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
 2657                                              mid->reg_num & 0x1f, &val);
 2658                 if (error == 0)
 2659                         mid->val_out = val;
 2660                 break;
 2661         }
 2662         case CHELSIO_SET_MIIREG: {
 2663                 struct cphy *phy = &pi->phy;
 2664                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2665 
 2666                 if (!phy->mdio_write)
 2667                         return (EOPNOTSUPP);
 2668                 if (is_10G(sc)) {
 2669                         mmd = mid->phy_id >> 8;
 2670                         if (!mmd)
 2671                                 mmd = MDIO_DEV_PCS;
 2672                         else if (mmd > MDIO_DEV_VEND2)
 2673                                 return (EINVAL);
 2674                         
 2675                         error = phy->mdio_write(sc, mid->phy_id & 0x1f,
 2676                                               mmd, mid->reg_num, mid->val_in);
 2677                 } else
 2678                         error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
 2679                                               mid->reg_num & 0x1f,
 2680                                               mid->val_in);
 2681                 break;
 2682         }
 2683         case CHELSIO_SETREG: {
 2684                 struct ch_reg *edata = (struct ch_reg *)data;
 2685                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2686                         return (EFAULT);
 2687                 t3_write_reg(sc, edata->addr, edata->val);
 2688                 break;
 2689         }
 2690         case CHELSIO_GETREG: {
 2691                 struct ch_reg *edata = (struct ch_reg *)data;
 2692                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2693                         return (EFAULT);
 2694                 edata->val = t3_read_reg(sc, edata->addr);
 2695                 break;
 2696         }
 2697         case CHELSIO_GET_SGE_CONTEXT: {
 2698                 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
 2699                 mtx_lock_spin(&sc->sge.reg_lock);
 2700                 switch (ecntxt->cntxt_type) {
 2701                 case CNTXT_TYPE_EGRESS:
 2702                         error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
 2703                             ecntxt->data);
 2704                         break;
 2705                 case CNTXT_TYPE_FL:
 2706                         error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
 2707                             ecntxt->data);
 2708                         break;
 2709                 case CNTXT_TYPE_RSP:
 2710                         error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
 2711                             ecntxt->data);
 2712                         break;
 2713                 case CNTXT_TYPE_CQ:
 2714                         error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
 2715                             ecntxt->data);
 2716                         break;
 2717                 default:
 2718                         error = EINVAL;
 2719                         break;
 2720                 }
 2721                 mtx_unlock_spin(&sc->sge.reg_lock);
 2722                 break;
 2723         }
 2724         case CHELSIO_GET_SGE_DESC: {
 2725                 struct ch_desc *edesc = (struct ch_desc *)data;
 2726                 int ret;
 2727                 if (edesc->queue_num >= SGE_QSETS * 6)
 2728                         return (EINVAL);
 2729                 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
 2730                     edesc->queue_num % 6, edesc->idx, edesc->data);
 2731                 if (ret < 0)
 2732                         return (EINVAL);
 2733                 edesc->size = ret;
 2734                 break;
 2735         }
 2736         case CHELSIO_GET_QSET_PARAMS: {
 2737                 struct qset_params *q;
 2738                 struct ch_qset_params *t = (struct ch_qset_params *)data;
 2739                 int q1 = pi->first_qset;
 2740                 int nqsets = pi->nqsets;
 2741                 int i;
 2742 
 2743                 if (t->qset_idx >= nqsets)
 2744                         return EINVAL;
 2745 
 2746                 i = q1 + t->qset_idx;
 2747                 q = &sc->params.sge.qset[i];
 2748                 t->rspq_size   = q->rspq_size;
 2749                 t->txq_size[0] = q->txq_size[0];
 2750                 t->txq_size[1] = q->txq_size[1];
 2751                 t->txq_size[2] = q->txq_size[2];
 2752                 t->fl_size[0]  = q->fl_size;
 2753                 t->fl_size[1]  = q->jumbo_size;
 2754                 t->polling     = q->polling;
 2755                 t->lro         = q->lro;
 2756                 t->intr_lat    = q->coalesce_usecs;
 2757                 t->cong_thres  = q->cong_thres;
 2758                 t->qnum        = i;
 2759 
 2760                 if ((sc->flags & FULL_INIT_DONE) == 0)
 2761                         t->vector = 0;
 2762                 else if (sc->flags & USING_MSIX)
 2763                         t->vector = rman_get_start(sc->msix_irq_res[i]);
 2764                 else
 2765                         t->vector = rman_get_start(sc->irq_res);
 2766 
 2767                 break;
 2768         }
 2769         case CHELSIO_GET_QSET_NUM: {
 2770                 struct ch_reg *edata = (struct ch_reg *)data;
 2771                 edata->val = pi->nqsets;
 2772                 break;
 2773         }
 2774         case CHELSIO_LOAD_FW: {
 2775                 uint8_t *fw_data;
 2776                 uint32_t vers;
 2777                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2778 
 2779                 /*
 2780                  * You're allowed to load a firmware only before FULL_INIT_DONE
 2781                  *
 2782                  * FW_UPTODATE is also set so the rest of the initialization
 2783                  * will not overwrite what was loaded here.  This gives you the
 2784                  * flexibility to load any firmware (and maybe shoot yourself in
 2785                  * the foot).
 2786                  */
 2787 
 2788                 ADAPTER_LOCK(sc);
 2789                 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
 2790                         ADAPTER_UNLOCK(sc);
 2791                         return (EBUSY);
 2792                 }
 2793 
 2794                 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2795                 if (!fw_data)
 2796                         error = ENOMEM;
 2797                 else
 2798                         error = copyin(t->buf, fw_data, t->len);
 2799 
 2800                 if (!error)
 2801                         error = -t3_load_fw(sc, fw_data, t->len);
 2802 
 2803                 if (t3_get_fw_version(sc, &vers) == 0) {
 2804                         snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
 2805                             "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
 2806                             G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
 2807                 }
 2808 
 2809                 if (!error)
 2810                         sc->flags |= FW_UPTODATE;
 2811 
 2812                 free(fw_data, M_DEVBUF);
 2813                 ADAPTER_UNLOCK(sc);
 2814                 break;
 2815         }
 2816         case CHELSIO_LOAD_BOOT: {
 2817                 uint8_t *boot_data;
 2818                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2819 
 2820                 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2821                 if (!boot_data)
 2822                         return ENOMEM;
 2823 
 2824                 error = copyin(t->buf, boot_data, t->len);
 2825                 if (!error)
 2826                         error = -t3_load_boot(sc, boot_data, t->len);
 2827 
 2828                 free(boot_data, M_DEVBUF);
 2829                 break;
 2830         }
 2831         case CHELSIO_GET_PM: {
 2832                 struct ch_pm *m = (struct ch_pm *)data;
 2833                 struct tp_params *p = &sc->params.tp;
 2834 
 2835                 if (!is_offload(sc))
 2836                         return (EOPNOTSUPP);
 2837 
 2838                 m->tx_pg_sz = p->tx_pg_size;
 2839                 m->tx_num_pg = p->tx_num_pgs;
 2840                 m->rx_pg_sz  = p->rx_pg_size;
 2841                 m->rx_num_pg = p->rx_num_pgs;
 2842                 m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
 2843 
 2844                 break;
 2845         }
 2846         case CHELSIO_SET_PM: {
 2847                 struct ch_pm *m = (struct ch_pm *)data;
 2848                 struct tp_params *p = &sc->params.tp;
 2849 
 2850                 if (!is_offload(sc))
 2851                         return (EOPNOTSUPP);
 2852                 if (sc->flags & FULL_INIT_DONE)
 2853                         return (EBUSY);
 2854 
 2855                 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
 2856                     !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
 2857                         return (EINVAL);        /* not power of 2 */
 2858                 if (!(m->rx_pg_sz & 0x14000))
 2859                         return (EINVAL);        /* not 16KB or 64KB */
 2860                 if (!(m->tx_pg_sz & 0x1554000))
 2861                         return (EINVAL);
 2862                 if (m->tx_num_pg == -1)
 2863                         m->tx_num_pg = p->tx_num_pgs;
 2864                 if (m->rx_num_pg == -1)
 2865                         m->rx_num_pg = p->rx_num_pgs;
 2866                 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
 2867                         return (EINVAL);
 2868                 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
 2869                     m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
 2870                         return (EINVAL);
 2871 
 2872                 p->rx_pg_size = m->rx_pg_sz;
 2873                 p->tx_pg_size = m->tx_pg_sz;
 2874                 p->rx_num_pgs = m->rx_num_pg;
 2875                 p->tx_num_pgs = m->tx_num_pg;
 2876                 break;
 2877         }
 2878         case CHELSIO_SETMTUTAB: {
 2879                 struct ch_mtus *m = (struct ch_mtus *)data;
 2880                 int i;
 2881                 
 2882                 if (!is_offload(sc))
 2883                         return (EOPNOTSUPP);
 2884                 if (offload_running(sc))
 2885                         return (EBUSY);
 2886                 if (m->nmtus != NMTUS)
 2887                         return (EINVAL);
 2888                 if (m->mtus[0] < 81)         /* accommodate SACK */
 2889                         return (EINVAL);
 2890                 
 2891                 /*
 2892                  * MTUs must be in ascending order
 2893                  */
 2894                 for (i = 1; i < NMTUS; ++i)
 2895                         if (m->mtus[i] < m->mtus[i - 1])
 2896                                 return (EINVAL);
 2897 
 2898                 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
 2899                 break;
 2900         }
 2901         case CHELSIO_GETMTUTAB: {
 2902                 struct ch_mtus *m = (struct ch_mtus *)data;
 2903 
 2904                 if (!is_offload(sc))
 2905                         return (EOPNOTSUPP);
 2906 
 2907                 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
 2908                 m->nmtus = NMTUS;
 2909                 break;
 2910         }
 2911         case CHELSIO_GET_MEM: {
 2912                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2913                 struct mc7 *mem;
 2914                 uint8_t *useraddr;
 2915                 u64 buf[32];
 2916 
 2917                 /*
 2918                  * Use these to avoid modifying len/addr in the the return
 2919                  * struct
 2920                  */
 2921                 uint32_t len = t->len, addr = t->addr;
 2922 
 2923                 if (!is_offload(sc))
 2924                         return (EOPNOTSUPP);
 2925                 if (!(sc->flags & FULL_INIT_DONE))
 2926                         return (EIO);         /* need the memory controllers */
 2927                 if ((addr & 0x7) || (len & 0x7))
 2928                         return (EINVAL);
 2929                 if (t->mem_id == MEM_CM)
 2930                         mem = &sc->cm;
 2931                 else if (t->mem_id == MEM_PMRX)
 2932                         mem = &sc->pmrx;
 2933                 else if (t->mem_id == MEM_PMTX)
 2934                         mem = &sc->pmtx;
 2935                 else
 2936                         return (EINVAL);
 2937 
 2938                 /*
 2939                  * Version scheme:
 2940                  * bits 0..9: chip version
 2941                  * bits 10..15: chip revision
 2942                  */
 2943                 t->version = 3 | (sc->params.rev << 10);
 2944                 
 2945                 /*
 2946                  * Read 256 bytes at a time as len can be large and we don't
 2947                  * want to use huge intermediate buffers.
 2948                  */
 2949                 useraddr = (uint8_t *)t->buf; 
 2950                 while (len) {
 2951                         unsigned int chunk = min(len, sizeof(buf));
 2952 
 2953                         error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
 2954                         if (error)
 2955                                 return (-error);
 2956                         if (copyout(buf, useraddr, chunk))
 2957                                 return (EFAULT);
 2958                         useraddr += chunk;
 2959                         addr += chunk;
 2960                         len -= chunk;
 2961                 }
 2962                 break;
 2963         }
 2964         case CHELSIO_READ_TCAM_WORD: {
 2965                 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
 2966 
 2967                 if (!is_offload(sc))
 2968                         return (EOPNOTSUPP);
 2969                 if (!(sc->flags & FULL_INIT_DONE))
 2970                         return (EIO);         /* need MC5 */            
 2971                 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
 2972                 break;
 2973         }
 2974         case CHELSIO_SET_TRACE_FILTER: {
 2975                 struct ch_trace *t = (struct ch_trace *)data;
 2976                 const struct trace_params *tp;
 2977 
 2978                 tp = (const struct trace_params *)&t->sip;
 2979                 if (t->config_tx)
 2980                         t3_config_trace_filter(sc, tp, 0, t->invert_match,
 2981                                                t->trace_tx);
 2982                 if (t->config_rx)
 2983                         t3_config_trace_filter(sc, tp, 1, t->invert_match,
 2984                                                t->trace_rx);
 2985                 break;
 2986         }
 2987         case CHELSIO_SET_PKTSCHED: {
 2988                 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
 2989                 if (sc->open_device_map == 0)
 2990                         return (EAGAIN);
 2991                 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
 2992                     p->binding);
 2993                 break;
 2994         }
 2995         case CHELSIO_IFCONF_GETREGS: {
 2996                 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
 2997                 int reglen = cxgb_get_regs_len();
 2998                 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
 2999                 if (buf == NULL) {
 3000                         return (ENOMEM);
 3001                 }
 3002                 if (regs->len > reglen)
 3003                         regs->len = reglen;
 3004                 else if (regs->len < reglen)
 3005                         error = ENOBUFS;
 3006 
 3007                 if (!error) {
 3008                         cxgb_get_regs(sc, regs, buf);
 3009                         error = copyout(buf, regs->data, reglen);
 3010                 }
 3011                 free(buf, M_DEVBUF);
 3012 
 3013                 break;
 3014         }
 3015         case CHELSIO_SET_HW_SCHED: {
 3016                 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
 3017                 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
 3018 
 3019                 if ((sc->flags & FULL_INIT_DONE) == 0)
 3020                         return (EAGAIN);       /* need TP to be initialized */
 3021                 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
 3022                     !in_range(t->channel, 0, 1) ||
 3023                     !in_range(t->kbps, 0, 10000000) ||
 3024                     !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
 3025                     !in_range(t->flow_ipg, 0,
 3026                               dack_ticks_to_usec(sc, 0x7ff)))
 3027                         return (EINVAL);
 3028 
 3029                 if (t->kbps >= 0) {
 3030                         error = t3_config_sched(sc, t->kbps, t->sched);
 3031                         if (error < 0)
 3032                                 return (-error);
 3033                 }
 3034                 if (t->class_ipg >= 0)
 3035                         t3_set_sched_ipg(sc, t->sched, t->class_ipg);
 3036                 if (t->flow_ipg >= 0) {
 3037                         t->flow_ipg *= 1000;     /* us -> ns */
 3038                         t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
 3039                 }
 3040                 if (t->mode >= 0) {
 3041                         int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
 3042 
 3043                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3044                                          bit, t->mode ? bit : 0);
 3045                 }
 3046                 if (t->channel >= 0)
 3047                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3048                                          1 << t->sched, t->channel << t->sched);
 3049                 break;
 3050         }
 3051         case CHELSIO_GET_EEPROM: {
 3052                 int i;
 3053                 struct ch_eeprom *e = (struct ch_eeprom *)data;
 3054                 uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
 3055 
 3056                 if (buf == NULL) {
 3057                         return (ENOMEM);
 3058                 }
 3059                 e->magic = EEPROM_MAGIC;
 3060                 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
 3061                         error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
 3062 
 3063                 if (!error)
 3064                         error = copyout(buf + e->offset, e->data, e->len);
 3065 
 3066                 free(buf, M_DEVBUF);
 3067                 break;
 3068         }
 3069         case CHELSIO_CLEAR_STATS: {
 3070                 if (!(sc->flags & FULL_INIT_DONE))
 3071                         return EAGAIN;
 3072 
 3073                 PORT_LOCK(pi);
 3074                 t3_mac_update_stats(&pi->mac);
 3075                 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
 3076                 PORT_UNLOCK(pi);
 3077                 break;
 3078         }
 3079         case CHELSIO_GET_UP_LA: {
 3080                 struct ch_up_la *la = (struct ch_up_la *)data;
 3081                 uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
 3082                 if (buf == NULL) {
 3083                         return (ENOMEM);
 3084                 }
 3085                 if (la->bufsize < LA_BUFSIZE)
 3086                         error = ENOBUFS;
 3087 
 3088                 if (!error)
 3089                         error = -t3_get_up_la(sc, &la->stopped, &la->idx,
 3090                                               &la->bufsize, buf);
 3091                 if (!error)
 3092                         error = copyout(buf, la->data, la->bufsize);
 3093 
 3094                 free(buf, M_DEVBUF);
 3095                 break;
 3096         }
 3097         case CHELSIO_GET_UP_IOQS: {
 3098                 struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
 3099                 uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
 3100                 uint32_t *v;
 3101 
 3102                 if (buf == NULL) {
 3103                         return (ENOMEM);
 3104                 }
 3105                 if (ioqs->bufsize < IOQS_BUFSIZE)
 3106                         error = ENOBUFS;
 3107 
 3108                 if (!error)
 3109                         error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
 3110 
 3111                 if (!error) {
 3112                         v = (uint32_t *)buf;
 3113 
 3114                         ioqs->ioq_rx_enable = *v++;
 3115                         ioqs->ioq_tx_enable = *v++;
 3116                         ioqs->ioq_rx_status = *v++;
 3117                         ioqs->ioq_tx_status = *v++;
 3118 
 3119                         error = copyout(v, ioqs->data, ioqs->bufsize);
 3120                 }
 3121 
 3122                 free(buf, M_DEVBUF);
 3123                 break;
 3124         }
 3125         case CHELSIO_SET_FILTER: {
 3126                 struct ch_filter *f = (struct ch_filter *)data;;
 3127                 struct filter_info *p;
 3128                 unsigned int nfilters = sc->params.mc5.nfilters;
 3129 
 3130                 if (!is_offload(sc))
 3131                         return (EOPNOTSUPP);    /* No TCAM */
 3132                 if (!(sc->flags & FULL_INIT_DONE))
 3133                         return (EAGAIN);        /* mc5 not setup yet */
 3134                 if (nfilters == 0)
 3135                         return (EBUSY);         /* TOE will use TCAM */
 3136 
 3137                 /* sanity checks */
 3138                 if (f->filter_id >= nfilters ||
 3139                     (f->val.dip && f->mask.dip != 0xffffffff) ||
 3140                     (f->val.sport && f->mask.sport != 0xffff) ||
 3141                     (f->val.dport && f->mask.dport != 0xffff) ||
 3142                     (f->val.vlan && f->mask.vlan != 0xfff) ||
 3143                     (f->val.vlan_prio &&
 3144                         f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
 3145                     (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
 3146                     f->qset >= SGE_QSETS ||
 3147                     sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
 3148                         return (EINVAL);
 3149 
 3150                 /* Was allocated with M_WAITOK */
 3151                 KASSERT(sc->filters, ("filter table NULL\n"));
 3152 
 3153                 p = &sc->filters[f->filter_id];
 3154                 if (p->locked)
 3155                         return (EPERM);
 3156 
 3157                 bzero(p, sizeof(*p));
 3158                 p->sip = f->val.sip;
 3159                 p->sip_mask = f->mask.sip;
 3160                 p->dip = f->val.dip;
 3161                 p->sport = f->val.sport;
 3162                 p->dport = f->val.dport;
 3163                 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
 3164                 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
 3165                     FILTER_NO_VLAN_PRI;
 3166                 p->mac_hit = f->mac_hit;
 3167                 p->mac_vld = f->mac_addr_idx != 0xffff;
 3168                 p->mac_idx = f->mac_addr_idx;
 3169                 p->pkt_type = f->proto;
 3170                 p->report_filter_id = f->want_filter_id;
 3171                 p->pass = f->pass;
 3172                 p->rss = f->rss;
 3173                 p->qset = f->qset;
 3174 
 3175                 error = set_filter(sc, f->filter_id, p);
 3176                 if (error == 0)
 3177                         p->valid = 1;
 3178                 break;
 3179         }
 3180         case CHELSIO_DEL_FILTER: {
 3181                 struct ch_filter *f = (struct ch_filter *)data;
 3182                 struct filter_info *p;
 3183                 unsigned int nfilters = sc->params.mc5.nfilters;
 3184 
 3185                 if (!is_offload(sc))
 3186                         return (EOPNOTSUPP);
 3187                 if (!(sc->flags & FULL_INIT_DONE))
 3188                         return (EAGAIN);
 3189                 if (nfilters == 0 || sc->filters == NULL)
 3190                         return (EINVAL);
 3191                 if (f->filter_id >= nfilters)
 3192                        return (EINVAL);
 3193 
 3194                 p = &sc->filters[f->filter_id];
 3195                 if (p->locked)
 3196                         return (EPERM);
 3197                 if (!p->valid)
 3198                         return (EFAULT); /* Read "Bad address" as "Bad index" */
 3199 
 3200                 bzero(p, sizeof(*p));
 3201                 p->sip = p->sip_mask = 0xffffffff;
 3202                 p->vlan = 0xfff;
 3203                 p->vlan_prio = FILTER_NO_VLAN_PRI;
 3204                 p->pkt_type = 1;
 3205                 error = set_filter(sc, f->filter_id, p);
 3206                 break;
 3207         }
 3208         case CHELSIO_GET_FILTER: {
 3209                 struct ch_filter *f = (struct ch_filter *)data;
 3210                 struct filter_info *p;
 3211                 unsigned int i, nfilters = sc->params.mc5.nfilters;
 3212 
 3213                 if (!is_offload(sc))
 3214                         return (EOPNOTSUPP);
 3215                 if (!(sc->flags & FULL_INIT_DONE))
 3216                         return (EAGAIN);
 3217                 if (nfilters == 0 || sc->filters == NULL)
 3218                         return (EINVAL);
 3219 
 3220                 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
 3221                 for (; i < nfilters; i++) {
 3222                         p = &sc->filters[i];
 3223                         if (!p->valid)
 3224                                 continue;
 3225 
 3226                         bzero(f, sizeof(*f));
 3227 
 3228                         f->filter_id = i;
 3229                         f->val.sip = p->sip;
 3230                         f->mask.sip = p->sip_mask;
 3231                         f->val.dip = p->dip;
 3232                         f->mask.dip = p->dip ? 0xffffffff : 0;
 3233                         f->val.sport = p->sport;
 3234                         f->mask.sport = p->sport ? 0xffff : 0;
 3235                         f->val.dport = p->dport;
 3236                         f->mask.dport = p->dport ? 0xffff : 0;
 3237                         f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
 3238                         f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
 3239                         f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
 3240                             0 : p->vlan_prio;
 3241                         f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
 3242                             0 : FILTER_NO_VLAN_PRI;
 3243                         f->mac_hit = p->mac_hit;
 3244                         f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
 3245                         f->proto = p->pkt_type;
 3246                         f->want_filter_id = p->report_filter_id;
 3247                         f->pass = p->pass;
 3248                         f->rss = p->rss;
 3249                         f->qset = p->qset;
 3250 
 3251                         break;
 3252                 }
 3253                 
 3254                 if (i == nfilters)
 3255                         f->filter_id = 0xffffffff;
 3256                 break;
 3257         }
 3258         default:
 3259                 return (EOPNOTSUPP);
 3260                 break;
 3261         }
 3262 
 3263         return (error);
 3264 }
 3265 
 3266 static __inline void
 3267 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
 3268     unsigned int end)
 3269 {
 3270         uint32_t *p = (uint32_t *)(buf + start);
 3271 
 3272         for ( ; start <= end; start += sizeof(uint32_t))
 3273                 *p++ = t3_read_reg(ap, start);
 3274 }
 3275 
 3276 #define T3_REGMAP_SIZE (3 * 1024)
 3277 static int
 3278 cxgb_get_regs_len(void)
 3279 {
 3280         return T3_REGMAP_SIZE;
 3281 }
 3282 
 3283 static void
 3284 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
 3285 {           
 3286         
 3287         /*
 3288          * Version scheme:
 3289          * bits 0..9: chip version
 3290          * bits 10..15: chip revision
 3291          * bit 31: set for PCIe cards
 3292          */
 3293         regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
 3294 
 3295         /*
 3296          * We skip the MAC statistics registers because they are clear-on-read.
 3297          * Also reading multi-register stats would need to synchronize with the
 3298          * periodic mac stats accumulation.  Hard to justify the complexity.
 3299          */
 3300         memset(buf, 0, cxgb_get_regs_len());
 3301         reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
 3302         reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
 3303         reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
 3304         reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
 3305         reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
 3306         reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
 3307                        XGM_REG(A_XGM_SERDES_STAT3, 1));
 3308         reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
 3309                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
 3310 }
 3311 
 3312 static int
 3313 alloc_filters(struct adapter *sc)
 3314 {
 3315         struct filter_info *p;
 3316         unsigned int nfilters = sc->params.mc5.nfilters;
 3317 
 3318         if (nfilters == 0)
 3319                 return (0);
 3320 
 3321         p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
 3322         sc->filters = p;
 3323 
 3324         p = &sc->filters[nfilters - 1];
 3325         p->vlan = 0xfff;
 3326         p->vlan_prio = FILTER_NO_VLAN_PRI;
 3327         p->pass = p->rss = p->valid = p->locked = 1;
 3328 
 3329         return (0);
 3330 }
 3331 
 3332 static int
 3333 setup_hw_filters(struct adapter *sc)
 3334 {
 3335         int i, rc;
 3336         unsigned int nfilters = sc->params.mc5.nfilters;
 3337 
 3338         if (!sc->filters)
 3339                 return (0);
 3340 
 3341         t3_enable_filters(sc);
 3342 
 3343         for (i = rc = 0; i < nfilters && !rc; i++) {
 3344                 if (sc->filters[i].locked)
 3345                         rc = set_filter(sc, i, &sc->filters[i]);
 3346         }
 3347 
 3348         return (rc);
 3349 }
 3350 
 3351 static int
 3352 set_filter(struct adapter *sc, int id, const struct filter_info *f)
 3353 {
 3354         int len;
 3355         struct mbuf *m;
 3356         struct ulp_txpkt *txpkt;
 3357         struct work_request_hdr *wr;
 3358         struct cpl_pass_open_req *oreq;
 3359         struct cpl_set_tcb_field *sreq;
 3360 
 3361         len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
 3362         KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
 3363 
 3364         id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
 3365               sc->params.mc5.nfilters;
 3366 
 3367         m = m_gethdr(M_WAITOK, MT_DATA);
 3368         m->m_len = m->m_pkthdr.len = len;
 3369         bzero(mtod(m, char *), len);
 3370 
 3371         wr = mtod(m, struct work_request_hdr *);
 3372         wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
 3373 
 3374         oreq = (struct cpl_pass_open_req *)(wr + 1);
 3375         txpkt = (struct ulp_txpkt *)oreq;
 3376         txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
 3377         txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
 3378         OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
 3379         oreq->local_port = htons(f->dport);
 3380         oreq->peer_port = htons(f->sport);
 3381         oreq->local_ip = htonl(f->dip);
 3382         oreq->peer_ip = htonl(f->sip);
 3383         oreq->peer_netmask = htonl(f->sip_mask);
 3384         oreq->opt0h = 0;
 3385         oreq->opt0l = htonl(F_NO_OFFLOAD);
 3386         oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
 3387                          V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
 3388                          V_VLAN_PRI(f->vlan_prio >> 1) |
 3389                          V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
 3390                          V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
 3391                          V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
 3392 
 3393         sreq = (struct cpl_set_tcb_field *)(oreq + 1);
 3394         set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
 3395                           (f->report_filter_id << 15) | (1 << 23) |
 3396                           ((u64)f->pass << 35) | ((u64)!f->rss << 36));
 3397         set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
 3398         t3_mgmt_tx(sc, m);
 3399 
 3400         if (f->pass && !f->rss) {
 3401                 len = sizeof(*sreq);
 3402                 m = m_gethdr(M_WAITOK, MT_DATA);
 3403                 m->m_len = m->m_pkthdr.len = len;
 3404                 bzero(mtod(m, char *), len);
 3405                 sreq = mtod(m, struct cpl_set_tcb_field *);
 3406                 sreq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 3407                 mk_set_tcb_field(sreq, id, 25, 0x3f80000,
 3408                                  (u64)sc->rrss_map[f->qset] << 19);
 3409                 t3_mgmt_tx(sc, m);
 3410         }
 3411         return 0;
 3412 }
 3413 
 3414 static inline void
 3415 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
 3416     unsigned int word, u64 mask, u64 val)
 3417 {
 3418         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
 3419         req->reply = V_NO_REPLY(1);
 3420         req->cpu_idx = 0;
 3421         req->word = htons(word);
 3422         req->mask = htobe64(mask);
 3423         req->val = htobe64(val);
 3424 }
 3425 
 3426 static inline void
 3427 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
 3428     unsigned int word, u64 mask, u64 val)
 3429 {
 3430         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
 3431 
 3432         txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
 3433         txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
 3434         mk_set_tcb_field(req, tid, word, mask, val);
 3435 }

Cache object: f4e32f64679b37756f42b0ba71305695


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.