The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_main.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007-2009, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15 
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/bus.h>
   37 #include <sys/module.h>
   38 #include <sys/pciio.h>
   39 #include <sys/conf.h>
   40 #include <machine/bus.h>
   41 #include <machine/resource.h>
   42 #include <sys/bus_dma.h>
   43 #include <sys/ktr.h>
   44 #include <sys/rman.h>
   45 #include <sys/ioccom.h>
   46 #include <sys/mbuf.h>
   47 #include <sys/linker.h>
   48 #include <sys/firmware.h>
   49 #include <sys/socket.h>
   50 #include <sys/sockio.h>
   51 #include <sys/smp.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/syslog.h>
   54 #include <sys/queue.h>
   55 #include <sys/taskqueue.h>
   56 #include <sys/proc.h>
   57 
   58 #include <net/bpf.h>
   59 #include <net/ethernet.h>
   60 #include <net/if.h>
   61 #include <net/if_arp.h>
   62 #include <net/if_dl.h>
   63 #include <net/if_media.h>
   64 #include <net/if_types.h>
   65 #include <net/if_vlan_var.h>
   66 
   67 #include <netinet/in_systm.h>
   68 #include <netinet/in.h>
   69 #include <netinet/if_ether.h>
   70 #include <netinet/ip.h>
   71 #include <netinet/ip.h>
   72 #include <netinet/tcp.h>
   73 #include <netinet/udp.h>
   74 
   75 #include <dev/pci/pcireg.h>
   76 #include <dev/pci/pcivar.h>
   77 #include <dev/pci/pci_private.h>
   78 
   79 #ifdef CONFIG_DEFINED
   80 #include <cxgb_include.h>
   81 #else
   82 #include <dev/cxgb/cxgb_include.h>
   83 #endif
   84 
   85 #ifdef PRIV_SUPPORTED
   86 #include <sys/priv.h>
   87 #endif
   88 
   89 static int cxgb_setup_interrupts(adapter_t *);
   90 static void cxgb_teardown_interrupts(adapter_t *);
   91 static void cxgb_init(void *);
   92 static int cxgb_init_locked(struct port_info *);
   93 static int cxgb_uninit_locked(struct port_info *);
   94 static int cxgb_uninit_synchronized(struct port_info *);
   95 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
   96 static int cxgb_media_change(struct ifnet *);
   97 static int cxgb_ifm_type(int);
   98 static void cxgb_build_medialist(struct port_info *);
   99 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
  100 static int setup_sge_qsets(adapter_t *);
  101 static void cxgb_async_intr(void *);
  102 static void cxgb_tick_handler(void *, int);
  103 static void cxgb_tick(void *);
  104 static void link_check_callout(void *);
  105 static void check_link_status(void *, int);
  106 static void setup_rss(adapter_t *sc);
  107 static int alloc_filters(struct adapter *);
  108 static int setup_hw_filters(struct adapter *);
  109 static int set_filter(struct adapter *, int, const struct filter_info *);
  110 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
  111     unsigned int, u64, u64);
  112 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
  113     unsigned int, u64, u64);
  114 
  115 /* Attachment glue for the PCI controller end of the device.  Each port of
  116  * the device is attached separately, as defined later.
  117  */
  118 static int cxgb_controller_probe(device_t);
  119 static int cxgb_controller_attach(device_t);
  120 static int cxgb_controller_detach(device_t);
  121 static void cxgb_free(struct adapter *);
  122 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
  123     unsigned int end);
  124 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
  125 static int cxgb_get_regs_len(void);
  126 static int offload_open(struct port_info *pi);
  127 static void touch_bars(device_t dev);
  128 static int offload_close(struct t3cdev *tdev);
  129 static void cxgb_update_mac_settings(struct port_info *p);
  130 
  131 static device_method_t cxgb_controller_methods[] = {
  132         DEVMETHOD(device_probe,         cxgb_controller_probe),
  133         DEVMETHOD(device_attach,        cxgb_controller_attach),
  134         DEVMETHOD(device_detach,        cxgb_controller_detach),
  135 
  136         /* bus interface */
  137         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  138         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  139 
  140         { 0, 0 }
  141 };
  142 
  143 static driver_t cxgb_controller_driver = {
  144         "cxgbc",
  145         cxgb_controller_methods,
  146         sizeof(struct adapter)
  147 };
  148 
  149 static devclass_t       cxgb_controller_devclass;
  150 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
  151 
  152 /*
  153  * Attachment glue for the ports.  Attachment is done directly to the
  154  * controller device.
  155  */
  156 static int cxgb_port_probe(device_t);
  157 static int cxgb_port_attach(device_t);
  158 static int cxgb_port_detach(device_t);
  159 
  160 static device_method_t cxgb_port_methods[] = {
  161         DEVMETHOD(device_probe,         cxgb_port_probe),
  162         DEVMETHOD(device_attach,        cxgb_port_attach),
  163         DEVMETHOD(device_detach,        cxgb_port_detach),
  164         { 0, 0 }
  165 };
  166 
  167 static driver_t cxgb_port_driver = {
  168         "cxgb",
  169         cxgb_port_methods,
  170         0
  171 };
  172 
  173 static d_ioctl_t cxgb_extension_ioctl;
  174 static d_open_t cxgb_extension_open;
  175 static d_close_t cxgb_extension_close;
  176 
  177 static struct cdevsw cxgb_cdevsw = {
  178        .d_version =    D_VERSION,
  179        .d_flags =      0,
  180        .d_open =       cxgb_extension_open,
  181        .d_close =      cxgb_extension_close,
  182        .d_ioctl =      cxgb_extension_ioctl,
  183        .d_name =       "cxgb",
  184 };
  185 
  186 static devclass_t       cxgb_port_devclass;
  187 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
  188 
  189 /*
  190  * The driver uses the best interrupt scheme available on a platform in the
  191  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
  192  * of these schemes the driver may consider as follows:
  193  *
  194  * msi = 2: choose from among all three options
  195  * msi = 1 : only consider MSI and pin interrupts
  196  * msi = 0: force pin interrupts
  197  */
  198 static int msi_allowed = 2;
  199 
  200 TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
  201 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
  202 SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
  203     "MSI-X, MSI, INTx selector");
  204 
  205 /*
  206  * The driver enables offload as a default.
  207  * To disable it, use ofld_disable = 1.
  208  */
  209 static int ofld_disable = 0;
  210 TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
  211 SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
  212     "disable ULP offload");
  213 
  214 /*
  215  * The driver uses an auto-queue algorithm by default.
  216  * To disable it and force a single queue-set per port, use multiq = 0
  217  */
  218 static int multiq = 1;
  219 TUNABLE_INT("hw.cxgb.multiq", &multiq);
  220 SYSCTL_UINT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
  221     "use min(ncpus/ports, 8) queue-sets per port");
  222 
  223 /*
  224  * By default the driver will not update the firmware unless
  225  * it was compiled against a newer version
  226  * 
  227  */
  228 static int force_fw_update = 0;
  229 TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
  230 SYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
  231     "update firmware even if up to date");
  232 
  233 int cxgb_use_16k_clusters = 1;
  234 TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
  235 SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
  236     &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
  237 
  238 /*
  239  * Tune the size of the output queue.
  240  */
  241 int cxgb_snd_queue_len = IFQ_MAXLEN;
  242 TUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
  243 SYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
  244     &cxgb_snd_queue_len, 0, "send queue size ");
  245 
  246 static int nfilters = -1;
  247 TUNABLE_INT("hw.cxgb.nfilters", &nfilters);
  248 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
  249     &nfilters, 0, "max number of entries in the filter table");
  250 
  251 enum {
  252         MAX_TXQ_ENTRIES      = 16384,
  253         MAX_CTRL_TXQ_ENTRIES = 1024,
  254         MAX_RSPQ_ENTRIES     = 16384,
  255         MAX_RX_BUFFERS       = 16384,
  256         MAX_RX_JUMBO_BUFFERS = 16384,
  257         MIN_TXQ_ENTRIES      = 4,
  258         MIN_CTRL_TXQ_ENTRIES = 4,
  259         MIN_RSPQ_ENTRIES     = 32,
  260         MIN_FL_ENTRIES       = 32,
  261         MIN_FL_JUMBO_ENTRIES = 32
  262 };
  263 
  264 struct filter_info {
  265         u32 sip;
  266         u32 sip_mask;
  267         u32 dip;
  268         u16 sport;
  269         u16 dport;
  270         u32 vlan:12;
  271         u32 vlan_prio:3;
  272         u32 mac_hit:1;
  273         u32 mac_idx:4;
  274         u32 mac_vld:1;
  275         u32 pkt_type:2;
  276         u32 report_filter_id:1;
  277         u32 pass:1;
  278         u32 rss:1;
  279         u32 qset:3;
  280         u32 locked:1;
  281         u32 valid:1;
  282 };
  283 
  284 enum { FILTER_NO_VLAN_PRI = 7 };
  285 
  286 #define EEPROM_MAGIC 0x38E2F10C
  287 
  288 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  289 
  290 /* Table for probing the cards.  The desc field isn't actually used */
  291 struct cxgb_ident {
  292         uint16_t        vendor;
  293         uint16_t        device;
  294         int             index;
  295         char            *desc;
  296 } cxgb_identifiers[] = {
  297         {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
  298         {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
  299         {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
  300         {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
  301         {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
  302         {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
  303         {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
  304         {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
  305         {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
  306         {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
  307         {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
  308         {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
  309         {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
  310         {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
  311         {0, 0, 0, NULL}
  312 };
  313 
  314 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
  315 
  316 
  317 static __inline char
  318 t3rev2char(struct adapter *adapter)
  319 {
  320         char rev = 'z';
  321 
  322         switch(adapter->params.rev) {
  323         case T3_REV_A:
  324                 rev = 'a';
  325                 break;
  326         case T3_REV_B:
  327         case T3_REV_B2:
  328                 rev = 'b';
  329                 break;
  330         case T3_REV_C:
  331                 rev = 'c';
  332                 break;
  333         }
  334         return rev;
  335 }
  336 
  337 static struct cxgb_ident *
  338 cxgb_get_ident(device_t dev)
  339 {
  340         struct cxgb_ident *id;
  341 
  342         for (id = cxgb_identifiers; id->desc != NULL; id++) {
  343                 if ((id->vendor == pci_get_vendor(dev)) &&
  344                     (id->device == pci_get_device(dev))) {
  345                         return (id);
  346                 }
  347         }
  348         return (NULL);
  349 }
  350 
  351 static const struct adapter_info *
  352 cxgb_get_adapter_info(device_t dev)
  353 {
  354         struct cxgb_ident *id;
  355         const struct adapter_info *ai;
  356 
  357         id = cxgb_get_ident(dev);
  358         if (id == NULL)
  359                 return (NULL);
  360 
  361         ai = t3_get_adapter_info(id->index);
  362 
  363         return (ai);
  364 }
  365 
  366 static int
  367 cxgb_controller_probe(device_t dev)
  368 {
  369         const struct adapter_info *ai;
  370         char *ports, buf[80];
  371         int nports;
  372 
  373         ai = cxgb_get_adapter_info(dev);
  374         if (ai == NULL)
  375                 return (ENXIO);
  376 
  377         nports = ai->nports0 + ai->nports1;
  378         if (nports == 1)
  379                 ports = "port";
  380         else
  381                 ports = "ports";
  382 
  383         snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
  384         device_set_desc_copy(dev, buf);
  385         return (BUS_PROBE_DEFAULT);
  386 }
  387 
  388 #define FW_FNAME "cxgb_t3fw"
  389 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
  390 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
  391 
  392 static int
  393 upgrade_fw(adapter_t *sc)
  394 {
  395 #ifdef FIRMWARE_LATEST
  396         const struct firmware *fw;
  397 #else
  398         struct firmware *fw;
  399 #endif  
  400         int status;
  401         u32 vers;
  402         
  403         if ((fw = firmware_get(FW_FNAME)) == NULL)  {
  404                 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
  405                 return (ENOENT);
  406         } else
  407                 device_printf(sc->dev, "installing firmware on card\n");
  408         status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
  409 
  410         if (status != 0) {
  411                 device_printf(sc->dev, "failed to install firmware: %d\n",
  412                     status);
  413         } else {
  414                 t3_get_fw_version(sc, &vers);
  415                 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
  416                     G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
  417                     G_FW_VERSION_MICRO(vers));
  418         }
  419 
  420         firmware_put(fw, FIRMWARE_UNLOAD);
  421 
  422         return (status);        
  423 }
  424 
  425 /*
  426  * The cxgb_controller_attach function is responsible for the initial
  427  * bringup of the device.  Its responsibilities include:
  428  *
  429  *  1. Determine if the device supports MSI or MSI-X.
  430  *  2. Allocate bus resources so that we can access the Base Address Register
  431  *  3. Create and initialize mutexes for the controller and its control
  432  *     logic such as SGE and MDIO.
  433  *  4. Call hardware specific setup routine for the adapter as a whole.
  434  *  5. Allocate the BAR for doing MSI-X.
  435  *  6. Setup the line interrupt iff MSI-X is not supported.
  436  *  7. Create the driver's taskq.
  437  *  8. Start one task queue service thread.
  438  *  9. Check if the firmware and SRAM are up-to-date.  They will be
  439  *     auto-updated later (before FULL_INIT_DONE), if required.
  440  * 10. Create a child device for each MAC (port)
  441  * 11. Initialize T3 private state.
  442  * 12. Trigger the LED
  443  * 13. Setup offload iff supported.
  444  * 14. Reset/restart the tick callout.
  445  * 15. Attach sysctls
  446  *
  447  * NOTE: Any modification or deviation from this list MUST be reflected in
  448  * the above comment.  Failure to do so will result in problems on various
  449  * error conditions including link flapping.
  450  */
  451 static int
  452 cxgb_controller_attach(device_t dev)
  453 {
  454         device_t child;
  455         const struct adapter_info *ai;
  456         struct adapter *sc;
  457         int i, error = 0;
  458         uint32_t vers;
  459         int port_qsets = 1;
  460 #ifdef MSI_SUPPORTED
  461         int msi_needed, reg;
  462 #endif
  463         char buf[80];
  464 
  465         sc = device_get_softc(dev);
  466         sc->dev = dev;
  467         sc->msi_count = 0;
  468         ai = cxgb_get_adapter_info(dev);
  469 
  470         /*
  471          * XXX not really related but a recent addition
  472          */
  473 #ifdef MSI_SUPPORTED    
  474         /* find the PCIe link width and set max read request to 4KB*/
  475         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
  476                 uint16_t lnk;
  477 
  478                 lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
  479                 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
  480                 if (sc->link_width < 8 &&
  481                     (ai->caps & SUPPORTED_10000baseT_Full)) {
  482                         device_printf(sc->dev,
  483                             "PCIe x%d Link, expect reduced performance\n",
  484                             sc->link_width);
  485                 }
  486 
  487                 pci_set_max_read_req(dev, 4096);
  488         }
  489 #endif
  490         touch_bars(dev);
  491         pci_enable_busmaster(dev);
  492         /*
  493          * Allocate the registers and make them available to the driver.
  494          * The registers that we care about for NIC mode are in BAR 0
  495          */
  496         sc->regs_rid = PCIR_BAR(0);
  497         if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  498             &sc->regs_rid, RF_ACTIVE)) == NULL) {
  499                 device_printf(dev, "Cannot allocate BAR region 0\n");
  500                 return (ENXIO);
  501         }
  502         sc->udbs_rid = PCIR_BAR(2);
  503         sc->udbs_res = NULL;
  504         if (is_offload(sc) &&
  505             ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  506                    &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
  507                 device_printf(dev, "Cannot allocate BAR region 1\n");
  508                 error = ENXIO;
  509                 goto out;
  510         }
  511 
  512         snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
  513             device_get_unit(dev));
  514         ADAPTER_LOCK_INIT(sc, sc->lockbuf);
  515 
  516         snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
  517             device_get_unit(dev));
  518         snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
  519             device_get_unit(dev));
  520         snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
  521             device_get_unit(dev));
  522         
  523         MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
  524         MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
  525         MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
  526         
  527         sc->bt = rman_get_bustag(sc->regs_res);
  528         sc->bh = rman_get_bushandle(sc->regs_res);
  529         sc->mmio_len = rman_get_size(sc->regs_res);
  530 
  531         for (i = 0; i < MAX_NPORTS; i++)
  532                 sc->port[i].adapter = sc;
  533 
  534         if (t3_prep_adapter(sc, ai, 1) < 0) {
  535                 printf("prep adapter failed\n");
  536                 error = ENODEV;
  537                 goto out;
  538         }
  539         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
  540          * enough messages for the queue sets.  If that fails, try falling
  541          * back to MSI.  If that fails, then try falling back to the legacy
  542          * interrupt pin model.
  543          */
  544 #ifdef MSI_SUPPORTED
  545 
  546         sc->msix_regs_rid = 0x20;
  547         if ((msi_allowed >= 2) &&
  548             (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  549             &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
  550 
  551                 if (multiq)
  552                         port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
  553                 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
  554 
  555                 if (pci_msix_count(dev) == 0 ||
  556                     (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
  557                     sc->msi_count != msi_needed) {
  558                         device_printf(dev, "alloc msix failed - "
  559                                       "msi_count=%d, msi_needed=%d, err=%d; "
  560                                       "will try MSI\n", sc->msi_count,
  561                                       msi_needed, error);
  562                         sc->msi_count = 0;
  563                         port_qsets = 1;
  564                         pci_release_msi(dev);
  565                         bus_release_resource(dev, SYS_RES_MEMORY,
  566                             sc->msix_regs_rid, sc->msix_regs_res);
  567                         sc->msix_regs_res = NULL;
  568                 } else {
  569                         sc->flags |= USING_MSIX;
  570                         sc->cxgb_intr = cxgb_async_intr;
  571                         device_printf(dev,
  572                                       "using MSI-X interrupts (%u vectors)\n",
  573                                       sc->msi_count);
  574                 }
  575         }
  576 
  577         if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
  578                 sc->msi_count = 1;
  579                 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
  580                         device_printf(dev, "alloc msi failed - "
  581                                       "err=%d; will try INTx\n", error);
  582                         sc->msi_count = 0;
  583                         port_qsets = 1;
  584                         pci_release_msi(dev);
  585                 } else {
  586                         sc->flags |= USING_MSI;
  587                         sc->cxgb_intr = t3_intr_msi;
  588                         device_printf(dev, "using MSI interrupts\n");
  589                 }
  590         }
  591 #endif
  592         if (sc->msi_count == 0) {
  593                 device_printf(dev, "using line interrupts\n");
  594                 sc->cxgb_intr = t3b_intr;
  595         }
  596 
  597         /* Create a private taskqueue thread for handling driver events */
  598 #ifdef TASKQUEUE_CURRENT        
  599         sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
  600             taskqueue_thread_enqueue, &sc->tq);
  601 #else
  602         sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
  603             taskqueue_thread_enqueue, &sc->tq);
  604 #endif  
  605         if (sc->tq == NULL) {
  606                 device_printf(dev, "failed to allocate controller task queue\n");
  607                 goto out;
  608         }
  609 
  610         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
  611             device_get_nameunit(dev));
  612         TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
  613 
  614         
  615         /* Create a periodic callout for checking adapter status */
  616         callout_init(&sc->cxgb_tick_ch, TRUE);
  617         
  618         if (t3_check_fw_version(sc) < 0 || force_fw_update) {
  619                 /*
  620                  * Warn user that a firmware update will be attempted in init.
  621                  */
  622                 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
  623                     FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
  624                 sc->flags &= ~FW_UPTODATE;
  625         } else {
  626                 sc->flags |= FW_UPTODATE;
  627         }
  628 
  629         if (t3_check_tpsram_version(sc) < 0) {
  630                 /*
  631                  * Warn user that a firmware update will be attempted in init.
  632                  */
  633                 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
  634                     t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
  635                 sc->flags &= ~TPS_UPTODATE;
  636         } else {
  637                 sc->flags |= TPS_UPTODATE;
  638         }
  639         
  640         /*
  641          * Create a child device for each MAC.  The ethernet attachment
  642          * will be done in these children.
  643          */     
  644         for (i = 0; i < (sc)->params.nports; i++) {
  645                 struct port_info *pi;
  646                 
  647                 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
  648                         device_printf(dev, "failed to add child port\n");
  649                         error = EINVAL;
  650                         goto out;
  651                 }
  652                 pi = &sc->port[i];
  653                 pi->adapter = sc;
  654                 pi->nqsets = port_qsets;
  655                 pi->first_qset = i*port_qsets;
  656                 pi->port_id = i;
  657                 pi->tx_chan = i >= ai->nports0;
  658                 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
  659                 sc->rxpkt_map[pi->txpkt_intf] = i;
  660                 sc->port[i].tx_chan = i >= ai->nports0;
  661                 sc->portdev[i] = child;
  662                 device_set_softc(child, pi);
  663         }
  664         if ((error = bus_generic_attach(dev)) != 0)
  665                 goto out;
  666 
  667         /* initialize sge private state */
  668         t3_sge_init_adapter(sc);
  669 
  670         t3_led_ready(sc);
  671         
  672         cxgb_offload_init();
  673         if (is_offload(sc)) {
  674                 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  675                 cxgb_adapter_ofld(sc);
  676         }
  677         error = t3_get_fw_version(sc, &vers);
  678         if (error)
  679                 goto out;
  680 
  681         snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
  682             G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
  683             G_FW_VERSION_MICRO(vers));
  684 
  685         snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
  686                  ai->desc, is_offload(sc) ? "R" : "",
  687                  sc->params.vpd.ec, sc->params.vpd.sn);
  688         device_set_desc_copy(dev, buf);
  689 
  690         snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
  691                  sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
  692                  sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
  693 
  694         device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
  695         callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
  696         t3_add_attach_sysctls(sc);
  697 out:
  698         if (error)
  699                 cxgb_free(sc);
  700 
  701         return (error);
  702 }
  703 
  704 /*
  705  * The cxgb_controller_detach routine is called with the device is
  706  * unloaded from the system.
  707  */
  708 
  709 static int
  710 cxgb_controller_detach(device_t dev)
  711 {
  712         struct adapter *sc;
  713 
  714         sc = device_get_softc(dev);
  715 
  716         cxgb_free(sc);
  717 
  718         return (0);
  719 }
  720 
  721 /*
  722  * The cxgb_free() is called by the cxgb_controller_detach() routine
  723  * to tear down the structures that were built up in
  724  * cxgb_controller_attach(), and should be the final piece of work
  725  * done when fully unloading the driver.
  726  * 
  727  *
  728  *  1. Shutting down the threads started by the cxgb_controller_attach()
  729  *     routine.
  730  *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
  731  *  3. Detaching all of the port devices created during the
  732  *     cxgb_controller_attach() routine.
  733  *  4. Removing the device children created via cxgb_controller_attach().
  734  *  5. Releasing PCI resources associated with the device.
  735  *  6. Turning off the offload support, iff it was turned on.
  736  *  7. Destroying the mutexes created in cxgb_controller_attach().
  737  *
  738  */
  739 static void
  740 cxgb_free(struct adapter *sc)
  741 {
  742         int i, nqsets = 0;
  743 
  744         ADAPTER_LOCK(sc);
  745         sc->flags |= CXGB_SHUTDOWN;
  746         ADAPTER_UNLOCK(sc);
  747 
  748         cxgb_pcpu_shutdown_threads(sc);
  749 
  750         t3_sge_deinit_sw(sc);
  751         /*
  752          * Make sure all child devices are gone.
  753          */
  754         bus_generic_detach(sc->dev);
  755         for (i = 0; i < (sc)->params.nports; i++) {
  756                 if (sc->portdev[i] &&
  757                     device_delete_child(sc->dev, sc->portdev[i]) != 0)
  758                         device_printf(sc->dev, "failed to delete child port\n");
  759                 nqsets += sc->port[i].nqsets;
  760         }
  761 
  762         /*
  763          * At this point, it is as if cxgb_port_detach has run on all ports, and
  764          * cxgb_down has run on the adapter.  All interrupts have been silenced,
  765          * all open devices have been closed.
  766          */
  767         KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
  768                                            __func__, sc->open_device_map));
  769         for (i = 0; i < sc->params.nports; i++) {
  770                 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
  771                                                   __func__, i));
  772         }
  773 
  774         /*
  775          * Finish off the adapter's callouts.
  776          */
  777         callout_drain(&sc->cxgb_tick_ch);
  778         callout_drain(&sc->sge_timer_ch);
  779 
  780         /*
  781          * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
  782          * sysctls are cleaned up by the kernel linker.
  783          */
  784         if (sc->flags & FULL_INIT_DONE) {
  785                 t3_free_sge_resources(sc, nqsets);
  786                 sc->flags &= ~FULL_INIT_DONE;
  787         }
  788 
  789         /*
  790          * Release all interrupt resources.
  791          */
  792         cxgb_teardown_interrupts(sc);
  793 
  794 #ifdef MSI_SUPPORTED
  795         if (sc->flags & (USING_MSI | USING_MSIX)) {
  796                 device_printf(sc->dev, "releasing msi message(s)\n");
  797                 pci_release_msi(sc->dev);
  798         } else {
  799                 device_printf(sc->dev, "no msi message to release\n");
  800         }
  801 
  802         if (sc->msix_regs_res != NULL) {
  803                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
  804                     sc->msix_regs_res);
  805         }
  806 #endif
  807 
  808         /*
  809          * Free the adapter's taskqueue.
  810          */
  811         if (sc->tq != NULL) {
  812                 taskqueue_free(sc->tq);
  813                 sc->tq = NULL;
  814         }
  815         
  816         if (is_offload(sc)) {
  817                 clrbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
  818                 cxgb_adapter_unofld(sc);
  819         }
  820 
  821 #ifdef notyet
  822         if (sc->flags & CXGB_OFLD_INIT)
  823                 cxgb_offload_deactivate(sc);
  824 #endif
  825         free(sc->filters, M_DEVBUF);
  826         t3_sge_free(sc);
  827 
  828         cxgb_offload_exit();
  829 
  830         if (sc->udbs_res != NULL)
  831                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
  832                     sc->udbs_res);
  833 
  834         if (sc->regs_res != NULL)
  835                 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
  836                     sc->regs_res);
  837 
  838         MTX_DESTROY(&sc->mdio_lock);
  839         MTX_DESTROY(&sc->sge.reg_lock);
  840         MTX_DESTROY(&sc->elmer_lock);
  841         ADAPTER_LOCK_DEINIT(sc);
  842 }
  843 
  844 /**
  845  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
  846  *      @sc: the controller softc
  847  *
  848  *      Determines how many sets of SGE queues to use and initializes them.
  849  *      We support multiple queue sets per port if we have MSI-X, otherwise
  850  *      just one queue set per port.
  851  */
  852 static int
  853 setup_sge_qsets(adapter_t *sc)
  854 {
  855         int i, j, err, irq_idx = 0, qset_idx = 0;
  856         u_int ntxq = SGE_TXQ_PER_SET;
  857 
  858         if ((err = t3_sge_alloc(sc)) != 0) {
  859                 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
  860                 return (err);
  861         }
  862 
  863         if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
  864                 irq_idx = -1;
  865 
  866         for (i = 0; i < (sc)->params.nports; i++) {
  867                 struct port_info *pi = &sc->port[i];
  868 
  869                 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
  870                         err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
  871                             (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
  872                             &sc->params.sge.qset[qset_idx], ntxq, pi);
  873                         if (err) {
  874                                 t3_free_sge_resources(sc, qset_idx);
  875                                 device_printf(sc->dev,
  876                                     "t3_sge_alloc_qset failed with %d\n", err);
  877                                 return (err);
  878                         }
  879                 }
  880         }
  881 
  882         return (0);
  883 }
  884 
  885 static void
  886 cxgb_teardown_interrupts(adapter_t *sc)
  887 {
  888         int i;
  889 
  890         for (i = 0; i < SGE_QSETS; i++) {
  891                 if (sc->msix_intr_tag[i] == NULL) {
  892 
  893                         /* Should have been setup fully or not at all */
  894                         KASSERT(sc->msix_irq_res[i] == NULL &&
  895                                 sc->msix_irq_rid[i] == 0,
  896                                 ("%s: half-done interrupt (%d).", __func__, i));
  897 
  898                         continue;
  899                 }
  900 
  901                 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
  902                                   sc->msix_intr_tag[i]);
  903                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
  904                                      sc->msix_irq_res[i]);
  905 
  906                 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
  907                 sc->msix_irq_rid[i] = 0;
  908         }
  909 
  910         if (sc->intr_tag) {
  911                 KASSERT(sc->irq_res != NULL,
  912                         ("%s: half-done interrupt.", __func__));
  913 
  914                 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
  915                 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
  916                                      sc->irq_res);
  917 
  918                 sc->irq_res = sc->intr_tag = NULL;
  919                 sc->irq_rid = 0;
  920         }
  921 }
  922 
  923 static int
  924 cxgb_setup_interrupts(adapter_t *sc)
  925 {
  926         struct resource *res;
  927         void *tag;
  928         int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
  929 
  930         sc->irq_rid = intr_flag ? 1 : 0;
  931         sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
  932                                              RF_SHAREABLE | RF_ACTIVE);
  933         if (sc->irq_res == NULL) {
  934                 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
  935                               intr_flag, sc->irq_rid);
  936                 err = EINVAL;
  937                 sc->irq_rid = 0;
  938         } else {
  939                 err = bus_setup_intr(sc->dev, sc->irq_res,
  940                                      INTR_MPSAFE | INTR_TYPE_NET,
  941 #ifdef INTR_FILTERS
  942                                      NULL,
  943 #endif
  944                                      sc->cxgb_intr, sc, &sc->intr_tag);
  945 
  946                 if (err) {
  947                         device_printf(sc->dev,
  948                                       "Cannot set up interrupt (%x, %u, %d)\n",
  949                                       intr_flag, sc->irq_rid, err);
  950                         bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
  951                                              sc->irq_res);
  952                         sc->irq_res = sc->intr_tag = NULL;
  953                         sc->irq_rid = 0;
  954                 }
  955         }
  956 
  957         /* That's all for INTx or MSI */
  958         if (!(intr_flag & USING_MSIX) || err)
  959                 return (err);
  960 
  961         for (i = 0; i < sc->msi_count - 1; i++) {
  962                 rid = i + 2;
  963                 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
  964                                              RF_SHAREABLE | RF_ACTIVE);
  965                 if (res == NULL) {
  966                         device_printf(sc->dev, "Cannot allocate interrupt "
  967                                       "for message %d\n", rid);
  968                         err = EINVAL;
  969                         break;
  970                 }
  971 
  972                 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
  973 #ifdef INTR_FILTERS
  974                                      NULL,
  975 #endif
  976                                      t3_intr_msix, &sc->sge.qs[i], &tag);
  977                 if (err) {
  978                         device_printf(sc->dev, "Cannot set up interrupt "
  979                                       "for message %d (%d)\n", rid, err);
  980                         bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
  981                         break;
  982                 }
  983 
  984                 sc->msix_irq_rid[i] = rid;
  985                 sc->msix_irq_res[i] = res;
  986                 sc->msix_intr_tag[i] = tag;
  987         }
  988 
  989         if (err)
  990                 cxgb_teardown_interrupts(sc);
  991 
  992         return (err);
  993 }
  994 
  995 
  996 static int
  997 cxgb_port_probe(device_t dev)
  998 {
  999         struct port_info *p;
 1000         char buf[80];
 1001         const char *desc;
 1002         
 1003         p = device_get_softc(dev);
 1004         desc = p->phy.desc;
 1005         snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
 1006         device_set_desc_copy(dev, buf);
 1007         return (0);
 1008 }
 1009 
 1010 
 1011 static int
 1012 cxgb_makedev(struct port_info *pi)
 1013 {
 1014         
 1015         pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
 1016             UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
 1017         
 1018         if (pi->port_cdev == NULL)
 1019                 return (ENOMEM);
 1020 
 1021         pi->port_cdev->si_drv1 = (void *)pi;
 1022         
 1023         return (0);
 1024 }
 1025 
 1026 #ifdef TSO_SUPPORTED
 1027 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_VLAN_HWTSO)
 1028 /* Don't enable TSO6 yet */
 1029 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_VLAN_HWTSO)
 1030 #else
 1031 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
 1032 /* Don't enable TSO6 yet */
 1033 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
 1034 #define IFCAP_TSO4 0x0
 1035 #define IFCAP_TSO6 0x0
 1036 #define CSUM_TSO   0x0
 1037 #endif
 1038 
 1039 
 1040 static int
 1041 cxgb_port_attach(device_t dev)
 1042 {
 1043         struct port_info *p;
 1044         struct ifnet *ifp;
 1045         int err;
 1046         struct adapter *sc;
 1047         
 1048         
 1049         p = device_get_softc(dev);
 1050         sc = p->adapter;
 1051         snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
 1052             device_get_unit(device_get_parent(dev)), p->port_id);
 1053         PORT_LOCK_INIT(p, p->lockbuf);
 1054 
 1055         callout_init(&p->link_check_ch, CALLOUT_MPSAFE);
 1056         TASK_INIT(&p->link_check_task, 0, check_link_status, p);
 1057 
 1058         /* Allocate an ifnet object and set it up */
 1059         ifp = p->ifp = if_alloc(IFT_ETHER);
 1060         if (ifp == NULL) {
 1061                 device_printf(dev, "Cannot allocate ifnet\n");
 1062                 return (ENOMEM);
 1063         }
 1064         
 1065         /*
 1066          * Note that there is currently no watchdog timer.
 1067          */
 1068         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1069         ifp->if_init = cxgb_init;
 1070         ifp->if_softc = p;
 1071         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1072         ifp->if_ioctl = cxgb_ioctl;
 1073         ifp->if_start = cxgb_start;
 1074 
 1075 
 1076         ifp->if_timer = 0;      /* Disable ifnet watchdog */
 1077         ifp->if_watchdog = NULL;
 1078 
 1079         ifp->if_snd.ifq_drv_maxlen = max(cxgb_snd_queue_len, ifqmaxlen);
 1080         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
 1081         IFQ_SET_READY(&ifp->if_snd);
 1082 
 1083         ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
 1084         ifp->if_capabilities |= CXGB_CAP;
 1085         ifp->if_capenable |= CXGB_CAP_ENABLE;
 1086         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
 1087         /*
 1088          * disable TSO on 4-port - it isn't supported by the firmware yet
 1089          */     
 1090         if (sc->params.nports > 2) {
 1091                 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO);
 1092                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO);
 1093                 ifp->if_hwassist &= ~CSUM_TSO;
 1094         }
 1095 
 1096         ether_ifattach(ifp, p->hw_addr);
 1097 
 1098 #ifdef IFNET_MULTIQUEUE
 1099         ifp->if_transmit = cxgb_pcpu_transmit;
 1100 #endif
 1101         /*
 1102          * Only default to jumbo frames on 10GigE
 1103          */
 1104         if (p->adapter->params.nports <= 2)
 1105                 ifp->if_mtu = ETHERMTU_JUMBO;
 1106         if ((err = cxgb_makedev(p)) != 0) {
 1107                 printf("makedev failed %d\n", err);
 1108                 return (err);
 1109         }
 1110 
 1111         /* Create a list of media supported by this port */
 1112         ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
 1113             cxgb_media_status);
 1114         cxgb_build_medialist(p);
 1115       
 1116         t3_sge_init_port(p);
 1117 
 1118         return (err);
 1119 }
 1120 
 1121 /*
 1122  * cxgb_port_detach() is called via the device_detach methods when
 1123  * cxgb_free() calls the bus_generic_detach.  It is responsible for 
 1124  * removing the device from the view of the kernel, i.e. from all 
 1125  * interfaces lists etc.  This routine is only called when the driver is 
 1126  * being unloaded, not when the link goes down.
 1127  */
 1128 static int
 1129 cxgb_port_detach(device_t dev)
 1130 {
 1131         struct port_info *p;
 1132         struct adapter *sc;
 1133 
 1134         p = device_get_softc(dev);
 1135         sc = p->adapter;
 1136 
 1137         /* Tell cxgb_ioctl and if_init that the port is going away */
 1138         ADAPTER_LOCK(sc);
 1139         SET_DOOMED(p);
 1140         wakeup(&sc->flags);
 1141         while (IS_BUSY(sc))
 1142                 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
 1143         SET_BUSY(sc);
 1144         ADAPTER_UNLOCK(sc);
 1145 
 1146         if (p->port_cdev != NULL)
 1147                 destroy_dev(p->port_cdev);
 1148 
 1149         cxgb_uninit_synchronized(p);
 1150         ether_ifdetach(p->ifp);
 1151 
 1152         PORT_LOCK_DEINIT(p);
 1153         if_free(p->ifp);
 1154         p->ifp = NULL;
 1155 
 1156         ADAPTER_LOCK(sc);
 1157         CLR_BUSY(sc);
 1158         wakeup_one(&sc->flags);
 1159         ADAPTER_UNLOCK(sc);
 1160         return (0);
 1161 }
 1162 
 1163 void
 1164 t3_fatal_err(struct adapter *sc)
 1165 {
 1166         u_int fw_status[4];
 1167 
 1168         if (sc->flags & FULL_INIT_DONE) {
 1169                 t3_sge_stop(sc);
 1170                 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
 1171                 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
 1172                 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
 1173                 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
 1174                 t3_intr_disable(sc);
 1175         }
 1176         device_printf(sc->dev,"encountered fatal error, operation suspended\n");
 1177         if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
 1178                 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 1179                     fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
 1180 }
 1181 
 1182 int
 1183 t3_os_find_pci_capability(adapter_t *sc, int cap)
 1184 {
 1185         device_t dev;
 1186         struct pci_devinfo *dinfo;
 1187         pcicfgregs *cfg;
 1188         uint32_t status;
 1189         uint8_t ptr;
 1190 
 1191         dev = sc->dev;
 1192         dinfo = device_get_ivars(dev);
 1193         cfg = &dinfo->cfg;
 1194 
 1195         status = pci_read_config(dev, PCIR_STATUS, 2);
 1196         if (!(status & PCIM_STATUS_CAPPRESENT))
 1197                 return (0);
 1198 
 1199         switch (cfg->hdrtype & PCIM_HDRTYPE) {
 1200         case 0:
 1201         case 1:
 1202                 ptr = PCIR_CAP_PTR;
 1203                 break;
 1204         case 2:
 1205                 ptr = PCIR_CAP_PTR_2;
 1206                 break;
 1207         default:
 1208                 return (0);
 1209                 break;
 1210         }
 1211         ptr = pci_read_config(dev, ptr, 1);
 1212 
 1213         while (ptr != 0) {
 1214                 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
 1215                         return (ptr);
 1216                 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
 1217         }
 1218 
 1219         return (0);
 1220 }
 1221 
 1222 int
 1223 t3_os_pci_save_state(struct adapter *sc)
 1224 {
 1225         device_t dev;
 1226         struct pci_devinfo *dinfo;
 1227 
 1228         dev = sc->dev;
 1229         dinfo = device_get_ivars(dev);
 1230 
 1231         pci_cfg_save(dev, dinfo, 0);
 1232         return (0);
 1233 }
 1234 
 1235 int
 1236 t3_os_pci_restore_state(struct adapter *sc)
 1237 {
 1238         device_t dev;
 1239         struct pci_devinfo *dinfo;
 1240 
 1241         dev = sc->dev;
 1242         dinfo = device_get_ivars(dev);
 1243 
 1244         pci_cfg_restore(dev, dinfo);
 1245         return (0);
 1246 }
 1247 
 1248 /**
 1249  *      t3_os_link_changed - handle link status changes
 1250  *      @sc: the adapter associated with the link change
 1251  *      @port_id: the port index whose link status has changed
 1252  *      @link_status: the new status of the link
 1253  *      @speed: the new speed setting
 1254  *      @duplex: the new duplex setting
 1255  *      @fc: the new flow-control setting
 1256  *
 1257  *      This is the OS-dependent handler for link status changes.  The OS
 1258  *      neutral handler takes care of most of the processing for these events,
 1259  *      then calls this handler for any OS-specific processing.
 1260  */
 1261 void
 1262 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
 1263      int duplex, int fc, int mac_was_reset)
 1264 {
 1265         struct port_info *pi = &adapter->port[port_id];
 1266         struct ifnet *ifp = pi->ifp;
 1267 
 1268         /* no race with detach, so ifp should always be good */
 1269         KASSERT(ifp, ("%s: if detached.", __func__));
 1270 
 1271         /* Reapply mac settings if they were lost due to a reset */
 1272         if (mac_was_reset) {
 1273                 PORT_LOCK(pi);
 1274                 cxgb_update_mac_settings(pi);
 1275                 PORT_UNLOCK(pi);
 1276         }
 1277 
 1278         if (link_status) {
 1279                 ifp->if_baudrate = IF_Mbps(speed);
 1280                 if_link_state_change(ifp, LINK_STATE_UP);
 1281         } else
 1282                 if_link_state_change(ifp, LINK_STATE_DOWN);
 1283 }
 1284 
 1285 /**
 1286  *      t3_os_phymod_changed - handle PHY module changes
 1287  *      @phy: the PHY reporting the module change
 1288  *      @mod_type: new module type
 1289  *
 1290  *      This is the OS-dependent handler for PHY module changes.  It is
 1291  *      invoked when a PHY module is removed or inserted for any OS-specific
 1292  *      processing.
 1293  */
 1294 void t3_os_phymod_changed(struct adapter *adap, int port_id)
 1295 {
 1296         static const char *mod_str[] = {
 1297                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
 1298         };
 1299         struct port_info *pi = &adap->port[port_id];
 1300         int mod = pi->phy.modtype;
 1301 
 1302         if (mod != pi->media.ifm_cur->ifm_data)
 1303                 cxgb_build_medialist(pi);
 1304 
 1305         if (mod == phy_modtype_none)
 1306                 if_printf(pi->ifp, "PHY module unplugged\n");
 1307         else {
 1308                 KASSERT(mod < ARRAY_SIZE(mod_str),
 1309                         ("invalid PHY module type %d", mod));
 1310                 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
 1311         }
 1312 }
 1313 
 1314 void
 1315 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
 1316 {
 1317 
 1318         /*
 1319          * The ifnet might not be allocated before this gets called,
 1320          * as this is called early on in attach by t3_prep_adapter
 1321          * save the address off in the port structure
 1322          */
 1323         if (cxgb_debug)
 1324                 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
 1325         bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
 1326 }
 1327 
 1328 /*
 1329  * Programs the XGMAC based on the settings in the ifnet.  These settings
 1330  * include MTU, MAC address, mcast addresses, etc.
 1331  */
 1332 static void
 1333 cxgb_update_mac_settings(struct port_info *p)
 1334 {
 1335         struct ifnet *ifp = p->ifp;
 1336         struct t3_rx_mode rm;
 1337         struct cmac *mac = &p->mac;
 1338         int mtu, hwtagging;
 1339 
 1340         PORT_LOCK_ASSERT_OWNED(p);
 1341 
 1342         bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
 1343 
 1344         mtu = ifp->if_mtu;
 1345         if (ifp->if_capenable & IFCAP_VLAN_MTU)
 1346                 mtu += ETHER_VLAN_ENCAP_LEN;
 1347 
 1348         hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
 1349 
 1350         t3_mac_set_mtu(mac, mtu);
 1351         t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
 1352         t3_mac_set_address(mac, 0, p->hw_addr);
 1353         t3_init_rx_mode(&rm, p);
 1354         t3_mac_set_rx_mode(mac, &rm);
 1355 }
 1356 
 1357 static int
 1358 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 1359                               unsigned long n)
 1360 {
 1361         int attempts = 5;
 1362 
 1363         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 1364                 if (!--attempts)
 1365                         return (ETIMEDOUT);
 1366                 t3_os_sleep(10);
 1367         }
 1368         return 0;
 1369 }
 1370 
 1371 static int
 1372 init_tp_parity(struct adapter *adap)
 1373 {
 1374         int i;
 1375         struct mbuf *m;
 1376         struct cpl_set_tcb_field *greq;
 1377         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 1378 
 1379         t3_tp_set_offload_mode(adap, 1);
 1380 
 1381         for (i = 0; i < 16; i++) {
 1382                 struct cpl_smt_write_req *req;
 1383 
 1384                 m = m_gethdr(M_WAITOK, MT_DATA);
 1385                 req = mtod(m, struct cpl_smt_write_req *);
 1386                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1387                 memset(req, 0, sizeof(*req));
 1388                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1389                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 1390                 req->iff = i;
 1391                 t3_mgmt_tx(adap, m);
 1392         }
 1393 
 1394         for (i = 0; i < 2048; i++) {
 1395                 struct cpl_l2t_write_req *req;
 1396 
 1397                 m = m_gethdr(M_WAITOK, MT_DATA);
 1398                 req = mtod(m, struct cpl_l2t_write_req *);
 1399                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1400                 memset(req, 0, sizeof(*req));
 1401                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1402                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 1403                 req->params = htonl(V_L2T_W_IDX(i));
 1404                 t3_mgmt_tx(adap, m);
 1405         }
 1406 
 1407         for (i = 0; i < 2048; i++) {
 1408                 struct cpl_rte_write_req *req;
 1409 
 1410                 m = m_gethdr(M_WAITOK, MT_DATA);
 1411                 req = mtod(m, struct cpl_rte_write_req *);
 1412                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1413                 memset(req, 0, sizeof(*req));
 1414                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1415                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 1416                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
 1417                 t3_mgmt_tx(adap, m);
 1418         }
 1419 
 1420         m = m_gethdr(M_WAITOK, MT_DATA);
 1421         greq = mtod(m, struct cpl_set_tcb_field *);
 1422         m->m_len = m->m_pkthdr.len = sizeof(*greq);
 1423         memset(greq, 0, sizeof(*greq));
 1424         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1425         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 1426         greq->mask = htobe64(1);
 1427         t3_mgmt_tx(adap, m);
 1428 
 1429         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 1430         t3_tp_set_offload_mode(adap, 0);
 1431         return (i);
 1432 }
 1433 
 1434 /**
 1435  *      setup_rss - configure Receive Side Steering (per-queue connection demux) 
 1436  *      @adap: the adapter
 1437  *
 1438  *      Sets up RSS to distribute packets to multiple receive queues.  We
 1439  *      configure the RSS CPU lookup table to distribute to the number of HW
 1440  *      receive queues, and the response queue lookup table to narrow that
 1441  *      down to the response queues actually configured for each port.
 1442  *      We always configure the RSS mapping for two ports since the mapping
 1443  *      table has plenty of entries.
 1444  */
 1445 static void
 1446 setup_rss(adapter_t *adap)
 1447 {
 1448         int i;
 1449         u_int nq[2]; 
 1450         uint8_t cpus[SGE_QSETS + 1];
 1451         uint16_t rspq_map[RSS_TABLE_SIZE];
 1452         
 1453         for (i = 0; i < SGE_QSETS; ++i)
 1454                 cpus[i] = i;
 1455         cpus[SGE_QSETS] = 0xff;
 1456 
 1457         nq[0] = nq[1] = 0;
 1458         for_each_port(adap, i) {
 1459                 const struct port_info *pi = adap2pinfo(adap, i);
 1460 
 1461                 nq[pi->tx_chan] += pi->nqsets;
 1462         }
 1463         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 1464                 rspq_map[i] = nq[0] ? i % nq[0] : 0;
 1465                 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
 1466         }
 1467 
 1468         /* Calculate the reverse RSS map table */
 1469         for (i = 0; i < SGE_QSETS; ++i)
 1470                 adap->rrss_map[i] = 0xff;
 1471         for (i = 0; i < RSS_TABLE_SIZE; ++i)
 1472                 if (adap->rrss_map[rspq_map[i]] == 0xff)
 1473                         adap->rrss_map[rspq_map[i]] = i;
 1474 
 1475         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 1476                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
 1477                       F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
 1478                       cpus, rspq_map);
 1479 
 1480 }
 1481 
 1482 /*
 1483  * Sends an mbuf to an offload queue driver
 1484  * after dealing with any active network taps.
 1485  */
 1486 static inline int
 1487 offload_tx(struct t3cdev *tdev, struct mbuf *m)
 1488 {
 1489         int ret;
 1490 
 1491         ret = t3_offload_tx(tdev, m);
 1492         return (ret);
 1493 }
 1494 
 1495 static int
 1496 write_smt_entry(struct adapter *adapter, int idx)
 1497 {
 1498         struct port_info *pi = &adapter->port[idx];
 1499         struct cpl_smt_write_req *req;
 1500         struct mbuf *m;
 1501 
 1502         if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
 1503                 return (ENOMEM);
 1504 
 1505         req = mtod(m, struct cpl_smt_write_req *);
 1506         m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
 1507         
 1508         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 1509         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 1510         req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
 1511         req->iff = idx;
 1512         memset(req->src_mac1, 0, sizeof(req->src_mac1));
 1513         memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
 1514 
 1515         m_set_priority(m, 1);
 1516 
 1517         offload_tx(&adapter->tdev, m);
 1518 
 1519         return (0);
 1520 }
 1521 
 1522 static int
 1523 init_smt(struct adapter *adapter)
 1524 {
 1525         int i;
 1526 
 1527         for_each_port(adapter, i)
 1528                 write_smt_entry(adapter, i);
 1529         return 0;
 1530 }
 1531 
 1532 static void
 1533 init_port_mtus(adapter_t *adapter)
 1534 {
 1535         unsigned int mtus = ETHERMTU | (ETHERMTU << 16);
 1536 
 1537         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 1538 }
 1539 
 1540 static void
 1541 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 1542                               int hi, int port)
 1543 {
 1544         struct mbuf *m;
 1545         struct mngt_pktsched_wr *req;
 1546 
 1547         m = m_gethdr(M_DONTWAIT, MT_DATA);
 1548         if (m) {        
 1549                 req = mtod(m, struct mngt_pktsched_wr *);
 1550                 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 1551                 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 1552                 req->sched = sched;
 1553                 req->idx = qidx;
 1554                 req->min = lo;
 1555                 req->max = hi;
 1556                 req->binding = port;
 1557                 m->m_len = m->m_pkthdr.len = sizeof(*req);
 1558                 t3_mgmt_tx(adap, m);
 1559         }
 1560 }
 1561 
 1562 static void
 1563 bind_qsets(adapter_t *sc)
 1564 {
 1565         int i, j;
 1566 
 1567         cxgb_pcpu_startup_threads(sc);
 1568         for (i = 0; i < (sc)->params.nports; ++i) {
 1569                 const struct port_info *pi = adap2pinfo(sc, i);
 1570 
 1571                 for (j = 0; j < pi->nqsets; ++j) {
 1572                         send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
 1573                                           -1, pi->tx_chan);
 1574 
 1575                 }
 1576         }
 1577 }
 1578 
 1579 static void
 1580 update_tpeeprom(struct adapter *adap)
 1581 {
 1582 #ifdef FIRMWARE_LATEST
 1583         const struct firmware *tpeeprom;
 1584 #else
 1585         struct firmware *tpeeprom;
 1586 #endif
 1587 
 1588         uint32_t version;
 1589         unsigned int major, minor;
 1590         int ret, len;
 1591         char rev, name[32];
 1592 
 1593         t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
 1594 
 1595         major = G_TP_VERSION_MAJOR(version);
 1596         minor = G_TP_VERSION_MINOR(version);
 1597         if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
 1598                 return; 
 1599 
 1600         rev = t3rev2char(adap);
 1601         snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
 1602 
 1603         tpeeprom = firmware_get(name);
 1604         if (tpeeprom == NULL) {
 1605                 device_printf(adap->dev,
 1606                               "could not load TP EEPROM: unable to load %s\n",
 1607                               name);
 1608                 return;
 1609         }
 1610 
 1611         len = tpeeprom->datasize - 4;
 1612         
 1613         ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
 1614         if (ret)
 1615                 goto release_tpeeprom;
 1616 
 1617         if (len != TP_SRAM_LEN) {
 1618                 device_printf(adap->dev,
 1619                               "%s length is wrong len=%d expected=%d\n", name,
 1620                               len, TP_SRAM_LEN);
 1621                 return;
 1622         }
 1623         
 1624         ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
 1625             TP_SRAM_OFFSET);
 1626         
 1627         if (!ret) {
 1628                 device_printf(adap->dev,
 1629                         "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
 1630                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 1631         } else 
 1632                 device_printf(adap->dev,
 1633                               "Protocol SRAM image update in EEPROM failed\n");
 1634 
 1635 release_tpeeprom:
 1636         firmware_put(tpeeprom, FIRMWARE_UNLOAD);
 1637         
 1638         return;
 1639 }
 1640 
 1641 static int
 1642 update_tpsram(struct adapter *adap)
 1643 {
 1644 #ifdef FIRMWARE_LATEST
 1645         const struct firmware *tpsram;
 1646 #else
 1647         struct firmware *tpsram;
 1648 #endif  
 1649         int ret;
 1650         char rev, name[32];
 1651 
 1652         rev = t3rev2char(adap);
 1653         snprintf(name, sizeof(name), TPSRAM_NAME, rev);
 1654 
 1655         update_tpeeprom(adap);
 1656 
 1657         tpsram = firmware_get(name);
 1658         if (tpsram == NULL){
 1659                 device_printf(adap->dev, "could not load TP SRAM\n");
 1660                 return (EINVAL);
 1661         } else
 1662                 device_printf(adap->dev, "updating TP SRAM\n");
 1663         
 1664         ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
 1665         if (ret)
 1666                 goto release_tpsram;    
 1667 
 1668         ret = t3_set_proto_sram(adap, tpsram->data);
 1669         if (ret)
 1670                 device_printf(adap->dev, "loading protocol SRAM failed\n");
 1671 
 1672 release_tpsram:
 1673         firmware_put(tpsram, FIRMWARE_UNLOAD);
 1674         
 1675         return ret;
 1676 }
 1677 
 1678 /**
 1679  *      cxgb_up - enable the adapter
 1680  *      @adap: adapter being enabled
 1681  *
 1682  *      Called when the first port is enabled, this function performs the
 1683  *      actions necessary to make an adapter operational, such as completing
 1684  *      the initialization of HW modules, and enabling interrupts.
 1685  */
 1686 static int
 1687 cxgb_up(struct adapter *sc)
 1688 {
 1689         int err = 0;
 1690         unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
 1691 
 1692         KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
 1693                                            __func__, sc->open_device_map));
 1694 
 1695         if ((sc->flags & FULL_INIT_DONE) == 0) {
 1696 
 1697                 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1698 
 1699                 if ((sc->flags & FW_UPTODATE) == 0)
 1700                         if ((err = upgrade_fw(sc)))
 1701                                 goto out;
 1702 
 1703                 if ((sc->flags & TPS_UPTODATE) == 0)
 1704                         if ((err = update_tpsram(sc)))
 1705                                 goto out;
 1706 
 1707                 if (is_offload(sc) && nfilters != 0) {
 1708                         sc->params.mc5.nservers = 0;
 1709 
 1710                         if (nfilters < 0)
 1711                                 sc->params.mc5.nfilters = mxf;
 1712                         else
 1713                                 sc->params.mc5.nfilters = min(nfilters, mxf);
 1714                 }
 1715 
 1716                 err = t3_init_hw(sc, 0);
 1717                 if (err)
 1718                         goto out;
 1719 
 1720                 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
 1721                 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 1722 
 1723                 err = setup_sge_qsets(sc);
 1724                 if (err)
 1725                         goto out;
 1726 
 1727                 alloc_filters(sc);
 1728                 setup_rss(sc);
 1729 
 1730                 t3_intr_clear(sc);
 1731                 err = cxgb_setup_interrupts(sc);
 1732                 if (err)
 1733                         goto out;
 1734 
 1735                 t3_add_configured_sysctls(sc);
 1736                 sc->flags |= FULL_INIT_DONE;
 1737         }
 1738 
 1739         t3_intr_clear(sc);
 1740         t3_sge_start(sc);
 1741         t3_intr_enable(sc);
 1742 
 1743         if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
 1744             is_offload(sc) && init_tp_parity(sc) == 0)
 1745                 sc->flags |= TP_PARITY_INIT;
 1746 
 1747         if (sc->flags & TP_PARITY_INIT) {
 1748                 t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
 1749                 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
 1750         }
 1751         
 1752         if (!(sc->flags & QUEUES_BOUND)) {
 1753                 bind_qsets(sc);
 1754                 setup_hw_filters(sc);
 1755                 sc->flags |= QUEUES_BOUND;              
 1756         }
 1757 
 1758         t3_sge_reset_adapter(sc);
 1759 out:
 1760         return (err);
 1761 }
 1762 
 1763 /*
 1764  * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
 1765  * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
 1766  * during controller_detach, not here.
 1767  */
 1768 static void
 1769 cxgb_down(struct adapter *sc)
 1770 {
 1771         t3_sge_stop(sc);
 1772         t3_intr_disable(sc);
 1773 }
 1774 
 1775 static int
 1776 offload_open(struct port_info *pi)
 1777 {
 1778         struct adapter *sc = pi->adapter;
 1779         struct t3cdev *tdev = &sc->tdev;
 1780 
 1781         setbit(&sc->open_device_map, OFFLOAD_DEVMAP_BIT);
 1782 
 1783         t3_tp_set_offload_mode(sc, 1);
 1784         tdev->lldev = pi->ifp;
 1785         init_port_mtus(sc);
 1786         t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
 1787                      sc->params.rev == 0 ?  sc->port[0].ifp->if_mtu : 0xffff);
 1788         init_smt(sc);
 1789         cxgb_add_clients(tdev);
 1790 
 1791         return (0);
 1792 }
 1793 
 1794 static int
 1795 offload_close(struct t3cdev *tdev)
 1796 {
 1797         struct adapter *adapter = tdev2adap(tdev);
 1798 
 1799         if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
 1800                 return (0);
 1801 
 1802         /* Call back all registered clients */
 1803         cxgb_remove_clients(tdev);
 1804 
 1805         tdev->lldev = NULL;
 1806         cxgb_set_dummy_ops(tdev);
 1807         t3_tp_set_offload_mode(adapter, 0);
 1808 
 1809         clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
 1810 
 1811         return (0);
 1812 }
 1813 
 1814 /*
 1815  * if_init for cxgb ports.
 1816  */
 1817 static void
 1818 cxgb_init(void *arg)
 1819 {
 1820         struct port_info *p = arg;
 1821         struct adapter *sc = p->adapter;
 1822 
 1823         ADAPTER_LOCK(sc);
 1824         cxgb_init_locked(p); /* releases adapter lock */
 1825         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1826 }
 1827 
 1828 static int
 1829 cxgb_init_locked(struct port_info *p)
 1830 {
 1831         struct adapter *sc = p->adapter;
 1832         struct ifnet *ifp = p->ifp;
 1833         struct cmac *mac = &p->mac;
 1834         int rc = 0, may_sleep = 0, gave_up_lock = 0;
 1835 
 1836         ADAPTER_LOCK_ASSERT_OWNED(sc);
 1837 
 1838         while (!IS_DOOMED(p) && IS_BUSY(sc)) {
 1839                 gave_up_lock = 1;
 1840                 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
 1841                         rc = EINTR;
 1842                         goto done;
 1843                 }
 1844         }
 1845         if (IS_DOOMED(p)) {
 1846                 rc = ENXIO;
 1847                 goto done;
 1848         }
 1849         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 1850 
 1851         /*
 1852          * The code that runs during one-time adapter initialization can sleep
 1853          * so it's important not to hold any locks across it.
 1854          */
 1855         may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
 1856 
 1857         if (may_sleep) {
 1858                 SET_BUSY(sc);
 1859                 gave_up_lock = 1;
 1860                 ADAPTER_UNLOCK(sc);
 1861         }
 1862 
 1863         if (sc->open_device_map == 0) {
 1864                 if ((rc = cxgb_up(sc)) != 0)
 1865                         goto done;
 1866 
 1867                 if (is_offload(sc) && !ofld_disable && offload_open(p))
 1868                         log(LOG_WARNING,
 1869                             "Could not initialize offload capabilities\n");
 1870         }
 1871 
 1872         PORT_LOCK(p);
 1873         if (isset(&sc->open_device_map, p->port_id) &&
 1874             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1875                 PORT_UNLOCK(p);
 1876                 goto done;
 1877         }
 1878         t3_port_intr_enable(sc, p->port_id);
 1879         if (!mac->multiport) 
 1880                 t3_mac_init(mac);
 1881         cxgb_update_mac_settings(p);
 1882         t3_link_start(&p->phy, mac, &p->link_config);
 1883         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 1884         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1885         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1886         PORT_UNLOCK(p);
 1887 
 1888         /* all ok */
 1889         setbit(&sc->open_device_map, p->port_id);
 1890         callout_reset(&p->link_check_ch,
 1891             p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
 1892             link_check_callout, p);
 1893 
 1894 done:
 1895         if (may_sleep) {
 1896                 ADAPTER_LOCK(sc);
 1897                 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 1898                 CLR_BUSY(sc);
 1899         }
 1900         if (gave_up_lock)
 1901                 wakeup_one(&sc->flags);
 1902         ADAPTER_UNLOCK(sc);
 1903         return (rc);
 1904 }
 1905 
 1906 static int
 1907 cxgb_uninit_locked(struct port_info *p)
 1908 {
 1909         struct adapter *sc = p->adapter;
 1910         int rc;
 1911 
 1912         ADAPTER_LOCK_ASSERT_OWNED(sc);
 1913 
 1914         while (!IS_DOOMED(p) && IS_BUSY(sc)) {
 1915                 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
 1916                         rc = EINTR;
 1917                         goto done;
 1918                 }
 1919         }
 1920         if (IS_DOOMED(p)) {
 1921                 rc = ENXIO;
 1922                 goto done;
 1923         }
 1924         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 1925         SET_BUSY(sc);
 1926         ADAPTER_UNLOCK(sc);
 1927 
 1928         rc = cxgb_uninit_synchronized(p);
 1929 
 1930         ADAPTER_LOCK(sc);
 1931         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 1932         CLR_BUSY(sc);
 1933         wakeup_one(&sc->flags);
 1934 done:
 1935         ADAPTER_UNLOCK(sc);
 1936         return (rc);
 1937 }
 1938 
 1939 /*
 1940  * Called on "ifconfig down", and from port_detach
 1941  */
 1942 static int
 1943 cxgb_uninit_synchronized(struct port_info *pi)
 1944 {
 1945         struct adapter *sc = pi->adapter;
 1946         struct ifnet *ifp = pi->ifp;
 1947 
 1948         /*
 1949          * taskqueue_drain may cause a deadlock if the adapter lock is held.
 1950          */
 1951         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 1952 
 1953         /*
 1954          * Clear this port's bit from the open device map, and then drain all
 1955          * the tasks that can access/manipulate this port's port_info or ifp.
 1956          * We disable this port's interrupts here and so the slow/ext
 1957          * interrupt tasks won't be enqueued.  The tick task will continue to
 1958          * be enqueued every second but the runs after this drain will not see
 1959          * this port in the open device map.
 1960          *
 1961          * A well behaved task must take open_device_map into account and ignore
 1962          * ports that are not open.
 1963          */
 1964         clrbit(&sc->open_device_map, pi->port_id);
 1965         t3_port_intr_disable(sc, pi->port_id);
 1966         taskqueue_drain(sc->tq, &sc->slow_intr_task);
 1967         taskqueue_drain(sc->tq, &sc->tick_task);
 1968 
 1969         callout_drain(&pi->link_check_ch);
 1970         taskqueue_drain(sc->tq, &pi->link_check_task);
 1971 
 1972         PORT_LOCK(pi);
 1973         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1974 
 1975         /* disable pause frames */
 1976         t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
 1977 
 1978         /* Reset RX FIFO HWM */
 1979         t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
 1980                          V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
 1981 
 1982         DELAY(100 * 1000);
 1983 
 1984         /* Wait for TXFIFO empty */
 1985         t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
 1986                         F_TXFIFO_EMPTY, 1, 20, 5);
 1987 
 1988         DELAY(100 * 1000);
 1989         t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
 1990 
 1991 
 1992         pi->phy.ops->power_down(&pi->phy, 1);
 1993 
 1994         PORT_UNLOCK(pi);
 1995 
 1996         pi->link_config.link_ok = 0;
 1997         t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
 1998 
 1999         if ((sc->open_device_map & PORT_MASK) == 0)
 2000                 offload_close(&sc->tdev);
 2001 
 2002         if (sc->open_device_map == 0)
 2003                 cxgb_down(pi->adapter);
 2004 
 2005         return (0);
 2006 }
 2007 
 2008 /*
 2009  * Mark lro enabled or disabled in all qsets for this port
 2010  */
 2011 static int
 2012 cxgb_set_lro(struct port_info *p, int enabled)
 2013 {
 2014         int i;
 2015         struct adapter *adp = p->adapter;
 2016         struct sge_qset *q;
 2017 
 2018         for (i = 0; i < p->nqsets; i++) {
 2019                 q = &adp->sge.qs[p->first_qset + i];
 2020                 q->lro.enabled = (enabled != 0);
 2021         }
 2022         return (0);
 2023 }
 2024 
 2025 static int
 2026 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
 2027 {
 2028         struct port_info *p = ifp->if_softc;
 2029         struct adapter *sc = p->adapter;
 2030         struct ifreq *ifr = (struct ifreq *)data;
 2031         int flags, error = 0, mtu;
 2032         uint32_t mask;
 2033 
 2034         switch (command) {
 2035         case SIOCSIFMTU:
 2036                 ADAPTER_LOCK(sc);
 2037                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2038                 if (error) {
 2039 fail:
 2040                         ADAPTER_UNLOCK(sc);
 2041                         return (error);
 2042                 }
 2043 
 2044                 mtu = ifr->ifr_mtu;
 2045                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
 2046                         error = EINVAL;
 2047                 } else {
 2048                         ifp->if_mtu = mtu;
 2049                         PORT_LOCK(p);
 2050                         cxgb_update_mac_settings(p);
 2051                         PORT_UNLOCK(p);
 2052                 }
 2053                 ADAPTER_UNLOCK(sc);
 2054                 break;
 2055         case SIOCSIFFLAGS:
 2056                 ADAPTER_LOCK(sc);
 2057                 if (IS_DOOMED(p)) {
 2058                         error = ENXIO;
 2059                         goto fail;
 2060                 }
 2061                 if (ifp->if_flags & IFF_UP) {
 2062                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2063                                 flags = p->if_flags;
 2064                                 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
 2065                                     ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
 2066                                         if (IS_BUSY(sc)) {
 2067                                                 error = EBUSY;
 2068                                                 goto fail;
 2069                                         }
 2070                                         PORT_LOCK(p);
 2071                                         cxgb_update_mac_settings(p);
 2072                                         PORT_UNLOCK(p);
 2073                                 }
 2074                                 ADAPTER_UNLOCK(sc);
 2075                         } else
 2076                                 error = cxgb_init_locked(p);
 2077                         p->if_flags = ifp->if_flags;
 2078                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2079                         error = cxgb_uninit_locked(p);
 2080                 else
 2081                         ADAPTER_UNLOCK(sc);
 2082 
 2083                 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 2084                 break;
 2085         case SIOCADDMULTI:
 2086         case SIOCDELMULTI:
 2087                 ADAPTER_LOCK(sc);
 2088                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2089                 if (error)
 2090                         goto fail;
 2091 
 2092                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2093                         PORT_LOCK(p);
 2094                         cxgb_update_mac_settings(p);
 2095                         PORT_UNLOCK(p);
 2096                 }
 2097                 ADAPTER_UNLOCK(sc);
 2098 
 2099                 break;
 2100         case SIOCSIFCAP:
 2101                 ADAPTER_LOCK(sc);
 2102                 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
 2103                 if (error)
 2104                         goto fail;
 2105 
 2106                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2107                 if (mask & IFCAP_TXCSUM) {
 2108                         ifp->if_capenable ^= IFCAP_TXCSUM;
 2109                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
 2110 
 2111                         if (IFCAP_TSO & ifp->if_capenable &&
 2112                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
 2113                                 ifp->if_capenable &= ~IFCAP_TSO;
 2114                                 ifp->if_hwassist &= ~CSUM_TSO;
 2115                                 if_printf(ifp,
 2116                                     "tso disabled due to -txcsum.\n");
 2117                         }
 2118                 }
 2119                 if (mask & IFCAP_RXCSUM)
 2120                         ifp->if_capenable ^= IFCAP_RXCSUM;
 2121                 if (mask & IFCAP_TSO4) {
 2122                         ifp->if_capenable ^= IFCAP_TSO4;
 2123 
 2124                         if (IFCAP_TSO & ifp->if_capenable) {
 2125                                 if (IFCAP_TXCSUM & ifp->if_capenable)
 2126                                         ifp->if_hwassist |= CSUM_TSO;
 2127                                 else {
 2128                                         ifp->if_capenable &= ~IFCAP_TSO;
 2129                                         ifp->if_hwassist &= ~CSUM_TSO;
 2130                                         if_printf(ifp,
 2131                                             "enable txcsum first.\n");
 2132                                         error = EAGAIN;
 2133                                 }
 2134                         } else
 2135                                 ifp->if_hwassist &= ~CSUM_TSO;
 2136                 }
 2137                 if (mask & IFCAP_LRO) {
 2138                         ifp->if_capenable ^= IFCAP_LRO;
 2139 
 2140                         /* Safe to do this even if cxgb_up not called yet */
 2141                         cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
 2142                 }
 2143                 if (mask & IFCAP_VLAN_HWTAGGING) {
 2144                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2145                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2146                                 PORT_LOCK(p);
 2147                                 cxgb_update_mac_settings(p);
 2148                                 PORT_UNLOCK(p);
 2149                         }
 2150                 }
 2151                 if (mask & IFCAP_VLAN_MTU) {
 2152                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
 2153                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2154                                 PORT_LOCK(p);
 2155                                 cxgb_update_mac_settings(p);
 2156                                 PORT_UNLOCK(p);
 2157                         }
 2158                 }
 2159                 if (mask & IFCAP_VLAN_HWTSO)
 2160                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 2161                 if (mask & IFCAP_VLAN_HWCSUM)
 2162                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 2163 
 2164 #ifdef VLAN_CAPABILITIES
 2165                 VLAN_CAPABILITIES(ifp);
 2166 #endif
 2167                 ADAPTER_UNLOCK(sc);
 2168                 break;
 2169         case SIOCSIFMEDIA:
 2170         case SIOCGIFMEDIA:
 2171                 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
 2172                 break;
 2173         default:
 2174                 error = ether_ioctl(ifp, command, data);
 2175         }
 2176 
 2177         return (error);
 2178 }
 2179 
 2180 static int
 2181 cxgb_media_change(struct ifnet *ifp)
 2182 {
 2183         return (EOPNOTSUPP);
 2184 }
 2185 
 2186 /*
 2187  * Translates phy->modtype to the correct Ethernet media subtype.
 2188  */
 2189 static int
 2190 cxgb_ifm_type(int mod)
 2191 {
 2192         switch (mod) {
 2193         case phy_modtype_sr:
 2194                 return (IFM_10G_SR);
 2195         case phy_modtype_lr:
 2196                 return (IFM_10G_LR);
 2197         case phy_modtype_lrm:
 2198                 return (IFM_10G_LRM);
 2199         case phy_modtype_twinax:
 2200                 return (IFM_10G_TWINAX);
 2201         case phy_modtype_twinax_long:
 2202                 return (IFM_10G_TWINAX_LONG);
 2203         case phy_modtype_none:
 2204                 return (IFM_NONE);
 2205         case phy_modtype_unknown:
 2206                 return (IFM_UNKNOWN);
 2207         }
 2208 
 2209         KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
 2210         return (IFM_UNKNOWN);
 2211 }
 2212 
 2213 /*
 2214  * Rebuilds the ifmedia list for this port, and sets the current media.
 2215  */
 2216 static void
 2217 cxgb_build_medialist(struct port_info *p)
 2218 {
 2219         struct cphy *phy = &p->phy;
 2220         struct ifmedia *media = &p->media;
 2221         int mod = phy->modtype;
 2222         int m = IFM_ETHER | IFM_FDX;
 2223 
 2224         PORT_LOCK(p);
 2225 
 2226         ifmedia_removeall(media);
 2227         if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
 2228                 /* Copper (RJ45) */
 2229 
 2230                 if (phy->caps & SUPPORTED_10000baseT_Full)
 2231                         ifmedia_add(media, m | IFM_10G_T, mod, NULL);
 2232 
 2233                 if (phy->caps & SUPPORTED_1000baseT_Full)
 2234                         ifmedia_add(media, m | IFM_1000_T, mod, NULL);
 2235 
 2236                 if (phy->caps & SUPPORTED_100baseT_Full)
 2237                         ifmedia_add(media, m | IFM_100_TX, mod, NULL);
 2238 
 2239                 if (phy->caps & SUPPORTED_10baseT_Full)
 2240                         ifmedia_add(media, m | IFM_10_T, mod, NULL);
 2241 
 2242                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
 2243                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
 2244 
 2245         } else if (phy->caps & SUPPORTED_TP) {
 2246                 /* Copper (CX4) */
 2247 
 2248                 KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
 2249                         ("%s: unexpected cap 0x%x", __func__, phy->caps));
 2250 
 2251                 ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
 2252                 ifmedia_set(media, m | IFM_10G_CX4);
 2253 
 2254         } else if (phy->caps & SUPPORTED_FIBRE &&
 2255                    phy->caps & SUPPORTED_10000baseT_Full) {
 2256                 /* 10G optical (but includes SFP+ twinax) */
 2257 
 2258                 m |= cxgb_ifm_type(mod);
 2259                 if (IFM_SUBTYPE(m) == IFM_NONE)
 2260                         m &= ~IFM_FDX;
 2261 
 2262                 ifmedia_add(media, m, mod, NULL);
 2263                 ifmedia_set(media, m);
 2264 
 2265         } else if (phy->caps & SUPPORTED_FIBRE &&
 2266                    phy->caps & SUPPORTED_1000baseT_Full) {
 2267                 /* 1G optical */
 2268 
 2269                 /* XXX: Lie and claim to be SX, could actually be any 1G-X */
 2270                 ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
 2271                 ifmedia_set(media, m | IFM_1000_SX);
 2272 
 2273         } else {
 2274                 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
 2275                             phy->caps));
 2276         }
 2277 
 2278         PORT_UNLOCK(p);
 2279 }
 2280 
 2281 static void
 2282 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 2283 {
 2284         struct port_info *p = ifp->if_softc;
 2285         struct ifmedia_entry *cur = p->media.ifm_cur;
 2286         int speed = p->link_config.speed;
 2287 
 2288         if (cur->ifm_data != p->phy.modtype) {
 2289                 cxgb_build_medialist(p);
 2290                 cur = p->media.ifm_cur;
 2291         }
 2292 
 2293         ifmr->ifm_status = IFM_AVALID;
 2294         if (!p->link_config.link_ok)
 2295                 return;
 2296 
 2297         ifmr->ifm_status |= IFM_ACTIVE;
 2298 
 2299         /*
 2300          * active and current will differ iff current media is autoselect.  That
 2301          * can happen only for copper RJ45.
 2302          */
 2303         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
 2304                 return;
 2305         KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
 2306                 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
 2307 
 2308         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
 2309         if (speed == SPEED_10000)
 2310                 ifmr->ifm_active |= IFM_10G_T;
 2311         else if (speed == SPEED_1000)
 2312                 ifmr->ifm_active |= IFM_1000_T;
 2313         else if (speed == SPEED_100)
 2314                 ifmr->ifm_active |= IFM_100_TX;
 2315         else if (speed == SPEED_10)
 2316                 ifmr->ifm_active |= IFM_10_T;
 2317         else
 2318                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
 2319                             speed));
 2320 }
 2321 
 2322 static void
 2323 cxgb_async_intr(void *data)
 2324 {
 2325         adapter_t *sc = data;
 2326 
 2327         t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
 2328         (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
 2329         taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
 2330 }
 2331 
 2332 static void
 2333 link_check_callout(void *arg)
 2334 {
 2335         struct port_info *pi = arg;
 2336         struct adapter *sc = pi->adapter;
 2337 
 2338         if (!isset(&sc->open_device_map, pi->port_id))
 2339                 return;
 2340 
 2341         taskqueue_enqueue(sc->tq, &pi->link_check_task);
 2342 }
 2343 
 2344 static void
 2345 check_link_status(void *arg, int pending)
 2346 {
 2347         struct port_info *pi = arg;
 2348         struct adapter *sc = pi->adapter;
 2349 
 2350         if (!isset(&sc->open_device_map, pi->port_id))
 2351                 return;
 2352 
 2353         t3_link_changed(sc, pi->port_id);
 2354 
 2355         if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ))
 2356                 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
 2357 }
 2358 
 2359 void
 2360 t3_os_link_intr(struct port_info *pi)
 2361 {
 2362         /*
 2363          * Schedule a link check in the near future.  If the link is flapping
 2364          * rapidly we'll keep resetting the callout and delaying the check until
 2365          * things stabilize a bit.
 2366          */
 2367         callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
 2368 }
 2369 
 2370 static void
 2371 check_t3b2_mac(struct adapter *sc)
 2372 {
 2373         int i;
 2374 
 2375         if (sc->flags & CXGB_SHUTDOWN)
 2376                 return;
 2377 
 2378         for_each_port(sc, i) {
 2379                 struct port_info *p = &sc->port[i];
 2380                 int status;
 2381 #ifdef INVARIANTS
 2382                 struct ifnet *ifp = p->ifp;
 2383 #endif          
 2384 
 2385                 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
 2386                     !p->link_config.link_ok)
 2387                         continue;
 2388 
 2389                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
 2390                         ("%s: state mismatch (drv_flags %x, device_map %x)",
 2391                          __func__, ifp->if_drv_flags, sc->open_device_map));
 2392 
 2393                 PORT_LOCK(p);
 2394                 status = t3b2_mac_watchdog_task(&p->mac);
 2395                 if (status == 1)
 2396                         p->mac.stats.num_toggled++;
 2397                 else if (status == 2) {
 2398                         struct cmac *mac = &p->mac;
 2399 
 2400                         cxgb_update_mac_settings(p);
 2401                         t3_link_start(&p->phy, mac, &p->link_config);
 2402                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 2403                         t3_port_intr_enable(sc, p->port_id);
 2404                         p->mac.stats.num_resets++;
 2405                 }
 2406                 PORT_UNLOCK(p);
 2407         }
 2408 }
 2409 
 2410 static void
 2411 cxgb_tick(void *arg)
 2412 {
 2413         adapter_t *sc = (adapter_t *)arg;
 2414 
 2415         if (sc->flags & CXGB_SHUTDOWN)
 2416                 return;
 2417 
 2418         taskqueue_enqueue(sc->tq, &sc->tick_task);      
 2419         callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
 2420 }
 2421 
 2422 static void
 2423 cxgb_tick_handler(void *arg, int count)
 2424 {
 2425         adapter_t *sc = (adapter_t *)arg;
 2426         const struct adapter_params *p = &sc->params;
 2427         int i;
 2428         uint32_t cause, reset;
 2429 
 2430         if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
 2431                 return;
 2432 
 2433         if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) 
 2434                 check_t3b2_mac(sc);
 2435 
 2436         cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
 2437         if (cause) {
 2438                 struct sge_qset *qs = &sc->sge.qs[0];
 2439                 uint32_t mask, v;
 2440 
 2441                 v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
 2442 
 2443                 mask = 1;
 2444                 for (i = 0; i < SGE_QSETS; i++) {
 2445                         if (v & mask)
 2446                                 qs[i].rspq.starved++;
 2447                         mask <<= 1;
 2448                 }
 2449 
 2450                 mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
 2451 
 2452                 for (i = 0; i < SGE_QSETS * 2; i++) {
 2453                         if (v & mask) {
 2454                                 qs[i / 2].fl[i % 2].empty++;
 2455                         }
 2456                         mask <<= 1;
 2457                 }
 2458 
 2459                 /* clear */
 2460                 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
 2461                 t3_write_reg(sc, A_SG_INT_CAUSE, cause);
 2462         }
 2463 
 2464         for (i = 0; i < sc->params.nports; i++) {
 2465                 struct port_info *pi = &sc->port[i];
 2466                 struct ifnet *ifp = pi->ifp;
 2467                 struct cmac *mac = &pi->mac;
 2468                 struct mac_stats *mstats = &mac->stats;
 2469 
 2470                 if (!isset(&sc->open_device_map, pi->port_id))
 2471                         continue;
 2472 
 2473                 PORT_LOCK(pi);
 2474                 t3_mac_update_stats(mac);
 2475                 PORT_UNLOCK(pi);
 2476 
 2477                 ifp->if_opackets =
 2478                     mstats->tx_frames_64 +
 2479                     mstats->tx_frames_65_127 +
 2480                     mstats->tx_frames_128_255 +
 2481                     mstats->tx_frames_256_511 +
 2482                     mstats->tx_frames_512_1023 +
 2483                     mstats->tx_frames_1024_1518 +
 2484                     mstats->tx_frames_1519_max;
 2485                 
 2486                 ifp->if_ipackets =
 2487                     mstats->rx_frames_64 +
 2488                     mstats->rx_frames_65_127 +
 2489                     mstats->rx_frames_128_255 +
 2490                     mstats->rx_frames_256_511 +
 2491                     mstats->rx_frames_512_1023 +
 2492                     mstats->rx_frames_1024_1518 +
 2493                     mstats->rx_frames_1519_max;
 2494 
 2495                 ifp->if_obytes = mstats->tx_octets;
 2496                 ifp->if_ibytes = mstats->rx_octets;
 2497                 ifp->if_omcasts = mstats->tx_mcast_frames;
 2498                 ifp->if_imcasts = mstats->rx_mcast_frames;
 2499                 
 2500                 ifp->if_collisions =
 2501                     mstats->tx_total_collisions;
 2502 
 2503                 ifp->if_iqdrops = mstats->rx_cong_drops;
 2504                 
 2505                 ifp->if_oerrors =
 2506                     mstats->tx_excess_collisions +
 2507                     mstats->tx_underrun +
 2508                     mstats->tx_len_errs +
 2509                     mstats->tx_mac_internal_errs +
 2510                     mstats->tx_excess_deferral +
 2511                     mstats->tx_fcs_errs;
 2512                 ifp->if_ierrors =
 2513                     mstats->rx_jabber +
 2514                     mstats->rx_data_errs +
 2515                     mstats->rx_sequence_errs +
 2516                     mstats->rx_runt + 
 2517                     mstats->rx_too_long +
 2518                     mstats->rx_mac_internal_errs +
 2519                     mstats->rx_short +
 2520                     mstats->rx_fcs_errs;
 2521 
 2522                 if (mac->multiport)
 2523                         continue;
 2524 
 2525                 /* Count rx fifo overflows, once per second */
 2526                 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
 2527                 reset = 0;
 2528                 if (cause & F_RXFIFO_OVERFLOW) {
 2529                         mac->stats.rx_fifo_ovfl++;
 2530                         reset |= F_RXFIFO_OVERFLOW;
 2531                 }
 2532                 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
 2533         }
 2534 }
 2535 
 2536 static void
 2537 touch_bars(device_t dev)
 2538 {
 2539         /*
 2540          * Don't enable yet
 2541          */
 2542 #if !defined(__LP64__) && 0
 2543         u32 v;
 2544 
 2545         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
 2546         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
 2547         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
 2548         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
 2549         pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
 2550         pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
 2551 #endif
 2552 }
 2553 
 2554 static int
 2555 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
 2556 {
 2557         uint8_t *buf;
 2558         int err = 0;
 2559         u32 aligned_offset, aligned_len, *p;
 2560         struct adapter *adapter = pi->adapter;
 2561 
 2562 
 2563         aligned_offset = offset & ~3;
 2564         aligned_len = (len + (offset & 3) + 3) & ~3;
 2565 
 2566         if (aligned_offset != offset || aligned_len != len) {
 2567                 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);              
 2568                 if (!buf)
 2569                         return (ENOMEM);
 2570                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
 2571                 if (!err && aligned_len > 4)
 2572                         err = t3_seeprom_read(adapter,
 2573                                               aligned_offset + aligned_len - 4,
 2574                                               (u32 *)&buf[aligned_len - 4]);
 2575                 if (err)
 2576                         goto out;
 2577                 memcpy(buf + (offset & 3), data, len);
 2578         } else
 2579                 buf = (uint8_t *)(uintptr_t)data;
 2580 
 2581         err = t3_seeprom_wp(adapter, 0);
 2582         if (err)
 2583                 goto out;
 2584 
 2585         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
 2586                 err = t3_seeprom_write(adapter, aligned_offset, *p);
 2587                 aligned_offset += 4;
 2588         }
 2589 
 2590         if (!err)
 2591                 err = t3_seeprom_wp(adapter, 1);
 2592 out:
 2593         if (buf != data)
 2594                 free(buf, M_DEVBUF);
 2595         return err;
 2596 }
 2597 
 2598 
 2599 static int
 2600 in_range(int val, int lo, int hi)
 2601 {
 2602         return val < 0 || (val <= hi && val >= lo);
 2603 }
 2604 
 2605 static int
 2606 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
 2607 {
 2608        return (0);
 2609 }
 2610 
 2611 static int
 2612 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
 2613 {
 2614        return (0);
 2615 }
 2616 
 2617 static int
 2618 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
 2619     int fflag, struct thread *td)
 2620 {
 2621         int mmd, error = 0;
 2622         struct port_info *pi = dev->si_drv1;
 2623         adapter_t *sc = pi->adapter;
 2624 
 2625 #ifdef PRIV_SUPPORTED   
 2626         if (priv_check(td, PRIV_DRIVER)) {
 2627                 if (cxgb_debug) 
 2628                         printf("user does not have access to privileged ioctls\n");
 2629                 return (EPERM);
 2630         }
 2631 #else
 2632         if (suser(td)) {
 2633                 if (cxgb_debug)
 2634                         printf("user does not have access to privileged ioctls\n");
 2635                 return (EPERM);
 2636         }
 2637 #endif
 2638         
 2639         switch (cmd) {
 2640         case CHELSIO_GET_MIIREG: {
 2641                 uint32_t val;
 2642                 struct cphy *phy = &pi->phy;
 2643                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2644                 
 2645                 if (!phy->mdio_read)
 2646                         return (EOPNOTSUPP);
 2647                 if (is_10G(sc)) {
 2648                         mmd = mid->phy_id >> 8;
 2649                         if (!mmd)
 2650                                 mmd = MDIO_DEV_PCS;
 2651                         else if (mmd > MDIO_DEV_VEND2)
 2652                                 return (EINVAL);
 2653 
 2654                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
 2655                                              mid->reg_num, &val);
 2656                 } else
 2657                         error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
 2658                                              mid->reg_num & 0x1f, &val);
 2659                 if (error == 0)
 2660                         mid->val_out = val;
 2661                 break;
 2662         }
 2663         case CHELSIO_SET_MIIREG: {
 2664                 struct cphy *phy = &pi->phy;
 2665                 struct ch_mii_data *mid = (struct ch_mii_data *)data;
 2666 
 2667                 if (!phy->mdio_write)
 2668                         return (EOPNOTSUPP);
 2669                 if (is_10G(sc)) {
 2670                         mmd = mid->phy_id >> 8;
 2671                         if (!mmd)
 2672                                 mmd = MDIO_DEV_PCS;
 2673                         else if (mmd > MDIO_DEV_VEND2)
 2674                                 return (EINVAL);
 2675                         
 2676                         error = phy->mdio_write(sc, mid->phy_id & 0x1f,
 2677                                               mmd, mid->reg_num, mid->val_in);
 2678                 } else
 2679                         error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
 2680                                               mid->reg_num & 0x1f,
 2681                                               mid->val_in);
 2682                 break;
 2683         }
 2684         case CHELSIO_SETREG: {
 2685                 struct ch_reg *edata = (struct ch_reg *)data;
 2686                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2687                         return (EFAULT);
 2688                 t3_write_reg(sc, edata->addr, edata->val);
 2689                 break;
 2690         }
 2691         case CHELSIO_GETREG: {
 2692                 struct ch_reg *edata = (struct ch_reg *)data;
 2693                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
 2694                         return (EFAULT);
 2695                 edata->val = t3_read_reg(sc, edata->addr);
 2696                 break;
 2697         }
 2698         case CHELSIO_GET_SGE_CONTEXT: {
 2699                 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
 2700                 mtx_lock_spin(&sc->sge.reg_lock);
 2701                 switch (ecntxt->cntxt_type) {
 2702                 case CNTXT_TYPE_EGRESS:
 2703                         error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
 2704                             ecntxt->data);
 2705                         break;
 2706                 case CNTXT_TYPE_FL:
 2707                         error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
 2708                             ecntxt->data);
 2709                         break;
 2710                 case CNTXT_TYPE_RSP:
 2711                         error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
 2712                             ecntxt->data);
 2713                         break;
 2714                 case CNTXT_TYPE_CQ:
 2715                         error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
 2716                             ecntxt->data);
 2717                         break;
 2718                 default:
 2719                         error = EINVAL;
 2720                         break;
 2721                 }
 2722                 mtx_unlock_spin(&sc->sge.reg_lock);
 2723                 break;
 2724         }
 2725         case CHELSIO_GET_SGE_DESC: {
 2726                 struct ch_desc *edesc = (struct ch_desc *)data;
 2727                 int ret;
 2728                 if (edesc->queue_num >= SGE_QSETS * 6)
 2729                         return (EINVAL);
 2730                 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
 2731                     edesc->queue_num % 6, edesc->idx, edesc->data);
 2732                 if (ret < 0)
 2733                         return (EINVAL);
 2734                 edesc->size = ret;
 2735                 break;
 2736         }
 2737         case CHELSIO_GET_QSET_PARAMS: {
 2738                 struct qset_params *q;
 2739                 struct ch_qset_params *t = (struct ch_qset_params *)data;
 2740                 int q1 = pi->first_qset;
 2741                 int nqsets = pi->nqsets;
 2742                 int i;
 2743 
 2744                 if (t->qset_idx >= nqsets)
 2745                         return EINVAL;
 2746 
 2747                 i = q1 + t->qset_idx;
 2748                 q = &sc->params.sge.qset[i];
 2749                 t->rspq_size   = q->rspq_size;
 2750                 t->txq_size[0] = q->txq_size[0];
 2751                 t->txq_size[1] = q->txq_size[1];
 2752                 t->txq_size[2] = q->txq_size[2];
 2753                 t->fl_size[0]  = q->fl_size;
 2754                 t->fl_size[1]  = q->jumbo_size;
 2755                 t->polling     = q->polling;
 2756                 t->lro         = q->lro;
 2757                 t->intr_lat    = q->coalesce_usecs;
 2758                 t->cong_thres  = q->cong_thres;
 2759                 t->qnum        = i;
 2760 
 2761                 if ((sc->flags & FULL_INIT_DONE) == 0)
 2762                         t->vector = 0;
 2763                 else if (sc->flags & USING_MSIX)
 2764                         t->vector = rman_get_start(sc->msix_irq_res[i]);
 2765                 else
 2766                         t->vector = rman_get_start(sc->irq_res);
 2767 
 2768                 break;
 2769         }
 2770         case CHELSIO_GET_QSET_NUM: {
 2771                 struct ch_reg *edata = (struct ch_reg *)data;
 2772                 edata->val = pi->nqsets;
 2773                 break;
 2774         }
 2775         case CHELSIO_LOAD_FW: {
 2776                 uint8_t *fw_data;
 2777                 uint32_t vers;
 2778                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2779 
 2780                 /*
 2781                  * You're allowed to load a firmware only before FULL_INIT_DONE
 2782                  *
 2783                  * FW_UPTODATE is also set so the rest of the initialization
 2784                  * will not overwrite what was loaded here.  This gives you the
 2785                  * flexibility to load any firmware (and maybe shoot yourself in
 2786                  * the foot).
 2787                  */
 2788 
 2789                 ADAPTER_LOCK(sc);
 2790                 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
 2791                         ADAPTER_UNLOCK(sc);
 2792                         return (EBUSY);
 2793                 }
 2794 
 2795                 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2796                 if (!fw_data)
 2797                         error = ENOMEM;
 2798                 else
 2799                         error = copyin(t->buf, fw_data, t->len);
 2800 
 2801                 if (!error)
 2802                         error = -t3_load_fw(sc, fw_data, t->len);
 2803 
 2804                 if (t3_get_fw_version(sc, &vers) == 0) {
 2805                         snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
 2806                             "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
 2807                             G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
 2808                 }
 2809 
 2810                 if (!error)
 2811                         sc->flags |= FW_UPTODATE;
 2812 
 2813                 free(fw_data, M_DEVBUF);
 2814                 ADAPTER_UNLOCK(sc);
 2815                 break;
 2816         }
 2817         case CHELSIO_LOAD_BOOT: {
 2818                 uint8_t *boot_data;
 2819                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2820 
 2821                 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
 2822                 if (!boot_data)
 2823                         return ENOMEM;
 2824 
 2825                 error = copyin(t->buf, boot_data, t->len);
 2826                 if (!error)
 2827                         error = -t3_load_boot(sc, boot_data, t->len);
 2828 
 2829                 free(boot_data, M_DEVBUF);
 2830                 break;
 2831         }
 2832         case CHELSIO_GET_PM: {
 2833                 struct ch_pm *m = (struct ch_pm *)data;
 2834                 struct tp_params *p = &sc->params.tp;
 2835 
 2836                 if (!is_offload(sc))
 2837                         return (EOPNOTSUPP);
 2838 
 2839                 m->tx_pg_sz = p->tx_pg_size;
 2840                 m->tx_num_pg = p->tx_num_pgs;
 2841                 m->rx_pg_sz  = p->rx_pg_size;
 2842                 m->rx_num_pg = p->rx_num_pgs;
 2843                 m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
 2844 
 2845                 break;
 2846         }
 2847         case CHELSIO_SET_PM: {
 2848                 struct ch_pm *m = (struct ch_pm *)data;
 2849                 struct tp_params *p = &sc->params.tp;
 2850 
 2851                 if (!is_offload(sc))
 2852                         return (EOPNOTSUPP);
 2853                 if (sc->flags & FULL_INIT_DONE)
 2854                         return (EBUSY);
 2855 
 2856                 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
 2857                     !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
 2858                         return (EINVAL);        /* not power of 2 */
 2859                 if (!(m->rx_pg_sz & 0x14000))
 2860                         return (EINVAL);        /* not 16KB or 64KB */
 2861                 if (!(m->tx_pg_sz & 0x1554000))
 2862                         return (EINVAL);
 2863                 if (m->tx_num_pg == -1)
 2864                         m->tx_num_pg = p->tx_num_pgs;
 2865                 if (m->rx_num_pg == -1)
 2866                         m->rx_num_pg = p->rx_num_pgs;
 2867                 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
 2868                         return (EINVAL);
 2869                 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
 2870                     m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
 2871                         return (EINVAL);
 2872 
 2873                 p->rx_pg_size = m->rx_pg_sz;
 2874                 p->tx_pg_size = m->tx_pg_sz;
 2875                 p->rx_num_pgs = m->rx_num_pg;
 2876                 p->tx_num_pgs = m->tx_num_pg;
 2877                 break;
 2878         }
 2879         case CHELSIO_SETMTUTAB: {
 2880                 struct ch_mtus *m = (struct ch_mtus *)data;
 2881                 int i;
 2882                 
 2883                 if (!is_offload(sc))
 2884                         return (EOPNOTSUPP);
 2885                 if (offload_running(sc))
 2886                         return (EBUSY);
 2887                 if (m->nmtus != NMTUS)
 2888                         return (EINVAL);
 2889                 if (m->mtus[0] < 81)         /* accommodate SACK */
 2890                         return (EINVAL);
 2891                 
 2892                 /*
 2893                  * MTUs must be in ascending order
 2894                  */
 2895                 for (i = 1; i < NMTUS; ++i)
 2896                         if (m->mtus[i] < m->mtus[i - 1])
 2897                                 return (EINVAL);
 2898 
 2899                 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
 2900                 break;
 2901         }
 2902         case CHELSIO_GETMTUTAB: {
 2903                 struct ch_mtus *m = (struct ch_mtus *)data;
 2904 
 2905                 if (!is_offload(sc))
 2906                         return (EOPNOTSUPP);
 2907 
 2908                 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
 2909                 m->nmtus = NMTUS;
 2910                 break;
 2911         }
 2912         case CHELSIO_GET_MEM: {
 2913                 struct ch_mem_range *t = (struct ch_mem_range *)data;
 2914                 struct mc7 *mem;
 2915                 uint8_t *useraddr;
 2916                 u64 buf[32];
 2917 
 2918                 /*
 2919                  * Use these to avoid modifying len/addr in the return
 2920                  * struct
 2921                  */
 2922                 uint32_t len = t->len, addr = t->addr;
 2923 
 2924                 if (!is_offload(sc))
 2925                         return (EOPNOTSUPP);
 2926                 if (!(sc->flags & FULL_INIT_DONE))
 2927                         return (EIO);         /* need the memory controllers */
 2928                 if ((addr & 0x7) || (len & 0x7))
 2929                         return (EINVAL);
 2930                 if (t->mem_id == MEM_CM)
 2931                         mem = &sc->cm;
 2932                 else if (t->mem_id == MEM_PMRX)
 2933                         mem = &sc->pmrx;
 2934                 else if (t->mem_id == MEM_PMTX)
 2935                         mem = &sc->pmtx;
 2936                 else
 2937                         return (EINVAL);
 2938 
 2939                 /*
 2940                  * Version scheme:
 2941                  * bits 0..9: chip version
 2942                  * bits 10..15: chip revision
 2943                  */
 2944                 t->version = 3 | (sc->params.rev << 10);
 2945                 
 2946                 /*
 2947                  * Read 256 bytes at a time as len can be large and we don't
 2948                  * want to use huge intermediate buffers.
 2949                  */
 2950                 useraddr = (uint8_t *)t->buf; 
 2951                 while (len) {
 2952                         unsigned int chunk = min(len, sizeof(buf));
 2953 
 2954                         error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
 2955                         if (error)
 2956                                 return (-error);
 2957                         if (copyout(buf, useraddr, chunk))
 2958                                 return (EFAULT);
 2959                         useraddr += chunk;
 2960                         addr += chunk;
 2961                         len -= chunk;
 2962                 }
 2963                 break;
 2964         }
 2965         case CHELSIO_READ_TCAM_WORD: {
 2966                 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
 2967 
 2968                 if (!is_offload(sc))
 2969                         return (EOPNOTSUPP);
 2970                 if (!(sc->flags & FULL_INIT_DONE))
 2971                         return (EIO);         /* need MC5 */            
 2972                 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
 2973                 break;
 2974         }
 2975         case CHELSIO_SET_TRACE_FILTER: {
 2976                 struct ch_trace *t = (struct ch_trace *)data;
 2977                 const struct trace_params *tp;
 2978 
 2979                 tp = (const struct trace_params *)&t->sip;
 2980                 if (t->config_tx)
 2981                         t3_config_trace_filter(sc, tp, 0, t->invert_match,
 2982                                                t->trace_tx);
 2983                 if (t->config_rx)
 2984                         t3_config_trace_filter(sc, tp, 1, t->invert_match,
 2985                                                t->trace_rx);
 2986                 break;
 2987         }
 2988         case CHELSIO_SET_PKTSCHED: {
 2989                 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
 2990                 if (sc->open_device_map == 0)
 2991                         return (EAGAIN);
 2992                 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
 2993                     p->binding);
 2994                 break;
 2995         }
 2996         case CHELSIO_IFCONF_GETREGS: {
 2997                 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
 2998                 int reglen = cxgb_get_regs_len();
 2999                 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
 3000                 if (buf == NULL) {
 3001                         return (ENOMEM);
 3002                 }
 3003                 if (regs->len > reglen)
 3004                         regs->len = reglen;
 3005                 else if (regs->len < reglen)
 3006                         error = ENOBUFS;
 3007 
 3008                 if (!error) {
 3009                         cxgb_get_regs(sc, regs, buf);
 3010                         error = copyout(buf, regs->data, reglen);
 3011                 }
 3012                 free(buf, M_DEVBUF);
 3013 
 3014                 break;
 3015         }
 3016         case CHELSIO_SET_HW_SCHED: {
 3017                 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
 3018                 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
 3019 
 3020                 if ((sc->flags & FULL_INIT_DONE) == 0)
 3021                         return (EAGAIN);       /* need TP to be initialized */
 3022                 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
 3023                     !in_range(t->channel, 0, 1) ||
 3024                     !in_range(t->kbps, 0, 10000000) ||
 3025                     !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
 3026                     !in_range(t->flow_ipg, 0,
 3027                               dack_ticks_to_usec(sc, 0x7ff)))
 3028                         return (EINVAL);
 3029 
 3030                 if (t->kbps >= 0) {
 3031                         error = t3_config_sched(sc, t->kbps, t->sched);
 3032                         if (error < 0)
 3033                                 return (-error);
 3034                 }
 3035                 if (t->class_ipg >= 0)
 3036                         t3_set_sched_ipg(sc, t->sched, t->class_ipg);
 3037                 if (t->flow_ipg >= 0) {
 3038                         t->flow_ipg *= 1000;     /* us -> ns */
 3039                         t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
 3040                 }
 3041                 if (t->mode >= 0) {
 3042                         int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
 3043 
 3044                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3045                                          bit, t->mode ? bit : 0);
 3046                 }
 3047                 if (t->channel >= 0)
 3048                         t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
 3049                                          1 << t->sched, t->channel << t->sched);
 3050                 break;
 3051         }
 3052         case CHELSIO_GET_EEPROM: {
 3053                 int i;
 3054                 struct ch_eeprom *e = (struct ch_eeprom *)data;
 3055                 uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
 3056 
 3057                 if (buf == NULL) {
 3058                         return (ENOMEM);
 3059                 }
 3060                 e->magic = EEPROM_MAGIC;
 3061                 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
 3062                         error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
 3063 
 3064                 if (!error)
 3065                         error = copyout(buf + e->offset, e->data, e->len);
 3066 
 3067                 free(buf, M_DEVBUF);
 3068                 break;
 3069         }
 3070         case CHELSIO_CLEAR_STATS: {
 3071                 if (!(sc->flags & FULL_INIT_DONE))
 3072                         return EAGAIN;
 3073 
 3074                 PORT_LOCK(pi);
 3075                 t3_mac_update_stats(&pi->mac);
 3076                 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
 3077                 PORT_UNLOCK(pi);
 3078                 break;
 3079         }
 3080         case CHELSIO_GET_UP_LA: {
 3081                 struct ch_up_la *la = (struct ch_up_la *)data;
 3082                 uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
 3083                 if (buf == NULL) {
 3084                         return (ENOMEM);
 3085                 }
 3086                 if (la->bufsize < LA_BUFSIZE)
 3087                         error = ENOBUFS;
 3088 
 3089                 if (!error)
 3090                         error = -t3_get_up_la(sc, &la->stopped, &la->idx,
 3091                                               &la->bufsize, buf);
 3092                 if (!error)
 3093                         error = copyout(buf, la->data, la->bufsize);
 3094 
 3095                 free(buf, M_DEVBUF);
 3096                 break;
 3097         }
 3098         case CHELSIO_GET_UP_IOQS: {
 3099                 struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
 3100                 uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
 3101                 uint32_t *v;
 3102 
 3103                 if (buf == NULL) {
 3104                         return (ENOMEM);
 3105                 }
 3106                 if (ioqs->bufsize < IOQS_BUFSIZE)
 3107                         error = ENOBUFS;
 3108 
 3109                 if (!error)
 3110                         error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
 3111 
 3112                 if (!error) {
 3113                         v = (uint32_t *)buf;
 3114 
 3115                         ioqs->ioq_rx_enable = *v++;
 3116                         ioqs->ioq_tx_enable = *v++;
 3117                         ioqs->ioq_rx_status = *v++;
 3118                         ioqs->ioq_tx_status = *v++;
 3119 
 3120                         error = copyout(v, ioqs->data, ioqs->bufsize);
 3121                 }
 3122 
 3123                 free(buf, M_DEVBUF);
 3124                 break;
 3125         }
 3126         case CHELSIO_SET_FILTER: {
 3127                 struct ch_filter *f = (struct ch_filter *)data;
 3128                 struct filter_info *p;
 3129                 unsigned int nfilters = sc->params.mc5.nfilters;
 3130 
 3131                 if (!is_offload(sc))
 3132                         return (EOPNOTSUPP);    /* No TCAM */
 3133                 if (!(sc->flags & FULL_INIT_DONE))
 3134                         return (EAGAIN);        /* mc5 not setup yet */
 3135                 if (nfilters == 0)
 3136                         return (EBUSY);         /* TOE will use TCAM */
 3137 
 3138                 /* sanity checks */
 3139                 if (f->filter_id >= nfilters ||
 3140                     (f->val.dip && f->mask.dip != 0xffffffff) ||
 3141                     (f->val.sport && f->mask.sport != 0xffff) ||
 3142                     (f->val.dport && f->mask.dport != 0xffff) ||
 3143                     (f->val.vlan && f->mask.vlan != 0xfff) ||
 3144                     (f->val.vlan_prio &&
 3145                         f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
 3146                     (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
 3147                     f->qset >= SGE_QSETS ||
 3148                     sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
 3149                         return (EINVAL);
 3150 
 3151                 /* Was allocated with M_WAITOK */
 3152                 KASSERT(sc->filters, ("filter table NULL\n"));
 3153 
 3154                 p = &sc->filters[f->filter_id];
 3155                 if (p->locked)
 3156                         return (EPERM);
 3157 
 3158                 bzero(p, sizeof(*p));
 3159                 p->sip = f->val.sip;
 3160                 p->sip_mask = f->mask.sip;
 3161                 p->dip = f->val.dip;
 3162                 p->sport = f->val.sport;
 3163                 p->dport = f->val.dport;
 3164                 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
 3165                 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
 3166                     FILTER_NO_VLAN_PRI;
 3167                 p->mac_hit = f->mac_hit;
 3168                 p->mac_vld = f->mac_addr_idx != 0xffff;
 3169                 p->mac_idx = f->mac_addr_idx;
 3170                 p->pkt_type = f->proto;
 3171                 p->report_filter_id = f->want_filter_id;
 3172                 p->pass = f->pass;
 3173                 p->rss = f->rss;
 3174                 p->qset = f->qset;
 3175 
 3176                 error = set_filter(sc, f->filter_id, p);
 3177                 if (error == 0)
 3178                         p->valid = 1;
 3179                 break;
 3180         }
 3181         case CHELSIO_DEL_FILTER: {
 3182                 struct ch_filter *f = (struct ch_filter *)data;
 3183                 struct filter_info *p;
 3184                 unsigned int nfilters = sc->params.mc5.nfilters;
 3185 
 3186                 if (!is_offload(sc))
 3187                         return (EOPNOTSUPP);
 3188                 if (!(sc->flags & FULL_INIT_DONE))
 3189                         return (EAGAIN);
 3190                 if (nfilters == 0 || sc->filters == NULL)
 3191                         return (EINVAL);
 3192                 if (f->filter_id >= nfilters)
 3193                        return (EINVAL);
 3194 
 3195                 p = &sc->filters[f->filter_id];
 3196                 if (p->locked)
 3197                         return (EPERM);
 3198                 if (!p->valid)
 3199                         return (EFAULT); /* Read "Bad address" as "Bad index" */
 3200 
 3201                 bzero(p, sizeof(*p));
 3202                 p->sip = p->sip_mask = 0xffffffff;
 3203                 p->vlan = 0xfff;
 3204                 p->vlan_prio = FILTER_NO_VLAN_PRI;
 3205                 p->pkt_type = 1;
 3206                 error = set_filter(sc, f->filter_id, p);
 3207                 break;
 3208         }
 3209         case CHELSIO_GET_FILTER: {
 3210                 struct ch_filter *f = (struct ch_filter *)data;
 3211                 struct filter_info *p;
 3212                 unsigned int i, nfilters = sc->params.mc5.nfilters;
 3213 
 3214                 if (!is_offload(sc))
 3215                         return (EOPNOTSUPP);
 3216                 if (!(sc->flags & FULL_INIT_DONE))
 3217                         return (EAGAIN);
 3218                 if (nfilters == 0 || sc->filters == NULL)
 3219                         return (EINVAL);
 3220 
 3221                 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
 3222                 for (; i < nfilters; i++) {
 3223                         p = &sc->filters[i];
 3224                         if (!p->valid)
 3225                                 continue;
 3226 
 3227                         bzero(f, sizeof(*f));
 3228 
 3229                         f->filter_id = i;
 3230                         f->val.sip = p->sip;
 3231                         f->mask.sip = p->sip_mask;
 3232                         f->val.dip = p->dip;
 3233                         f->mask.dip = p->dip ? 0xffffffff : 0;
 3234                         f->val.sport = p->sport;
 3235                         f->mask.sport = p->sport ? 0xffff : 0;
 3236                         f->val.dport = p->dport;
 3237                         f->mask.dport = p->dport ? 0xffff : 0;
 3238                         f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
 3239                         f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
 3240                         f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
 3241                             0 : p->vlan_prio;
 3242                         f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
 3243                             0 : FILTER_NO_VLAN_PRI;
 3244                         f->mac_hit = p->mac_hit;
 3245                         f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
 3246                         f->proto = p->pkt_type;
 3247                         f->want_filter_id = p->report_filter_id;
 3248                         f->pass = p->pass;
 3249                         f->rss = p->rss;
 3250                         f->qset = p->qset;
 3251 
 3252                         break;
 3253                 }
 3254                 
 3255                 if (i == nfilters)
 3256                         f->filter_id = 0xffffffff;
 3257                 break;
 3258         }
 3259         default:
 3260                 return (EOPNOTSUPP);
 3261                 break;
 3262         }
 3263 
 3264         return (error);
 3265 }
 3266 
 3267 static __inline void
 3268 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
 3269     unsigned int end)
 3270 {
 3271         uint32_t *p = (uint32_t *)(buf + start);
 3272 
 3273         for ( ; start <= end; start += sizeof(uint32_t))
 3274                 *p++ = t3_read_reg(ap, start);
 3275 }
 3276 
 3277 #define T3_REGMAP_SIZE (3 * 1024)
 3278 static int
 3279 cxgb_get_regs_len(void)
 3280 {
 3281         return T3_REGMAP_SIZE;
 3282 }
 3283 
 3284 static void
 3285 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
 3286 {           
 3287         
 3288         /*
 3289          * Version scheme:
 3290          * bits 0..9: chip version
 3291          * bits 10..15: chip revision
 3292          * bit 31: set for PCIe cards
 3293          */
 3294         regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
 3295 
 3296         /*
 3297          * We skip the MAC statistics registers because they are clear-on-read.
 3298          * Also reading multi-register stats would need to synchronize with the
 3299          * periodic mac stats accumulation.  Hard to justify the complexity.
 3300          */
 3301         memset(buf, 0, cxgb_get_regs_len());
 3302         reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
 3303         reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
 3304         reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
 3305         reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
 3306         reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
 3307         reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
 3308                        XGM_REG(A_XGM_SERDES_STAT3, 1));
 3309         reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
 3310                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
 3311 }
 3312 
 3313 static int
 3314 alloc_filters(struct adapter *sc)
 3315 {
 3316         struct filter_info *p;
 3317         unsigned int nfilters = sc->params.mc5.nfilters;
 3318 
 3319         if (nfilters == 0)
 3320                 return (0);
 3321 
 3322         p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
 3323         sc->filters = p;
 3324 
 3325         p = &sc->filters[nfilters - 1];
 3326         p->vlan = 0xfff;
 3327         p->vlan_prio = FILTER_NO_VLAN_PRI;
 3328         p->pass = p->rss = p->valid = p->locked = 1;
 3329 
 3330         return (0);
 3331 }
 3332 
 3333 static int
 3334 setup_hw_filters(struct adapter *sc)
 3335 {
 3336         int i, rc;
 3337         unsigned int nfilters = sc->params.mc5.nfilters;
 3338 
 3339         if (!sc->filters)
 3340                 return (0);
 3341 
 3342         t3_enable_filters(sc);
 3343 
 3344         for (i = rc = 0; i < nfilters && !rc; i++) {
 3345                 if (sc->filters[i].locked)
 3346                         rc = set_filter(sc, i, &sc->filters[i]);
 3347         }
 3348 
 3349         return (rc);
 3350 }
 3351 
 3352 static int
 3353 set_filter(struct adapter *sc, int id, const struct filter_info *f)
 3354 {
 3355         int len;
 3356         struct mbuf *m;
 3357         struct ulp_txpkt *txpkt;
 3358         struct work_request_hdr *wr;
 3359         struct cpl_pass_open_req *oreq;
 3360         struct cpl_set_tcb_field *sreq;
 3361 
 3362         len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
 3363         KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
 3364 
 3365         id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
 3366               sc->params.mc5.nfilters;
 3367 
 3368         m = m_gethdr(M_WAITOK, MT_DATA);
 3369         m->m_len = m->m_pkthdr.len = len;
 3370         bzero(mtod(m, char *), len);
 3371 
 3372         wr = mtod(m, struct work_request_hdr *);
 3373         wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
 3374 
 3375         oreq = (struct cpl_pass_open_req *)(wr + 1);
 3376         txpkt = (struct ulp_txpkt *)oreq;
 3377         txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
 3378         txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
 3379         OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
 3380         oreq->local_port = htons(f->dport);
 3381         oreq->peer_port = htons(f->sport);
 3382         oreq->local_ip = htonl(f->dip);
 3383         oreq->peer_ip = htonl(f->sip);
 3384         oreq->peer_netmask = htonl(f->sip_mask);
 3385         oreq->opt0h = 0;
 3386         oreq->opt0l = htonl(F_NO_OFFLOAD);
 3387         oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
 3388                          V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
 3389                          V_VLAN_PRI(f->vlan_prio >> 1) |
 3390                          V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
 3391                          V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
 3392                          V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
 3393 
 3394         sreq = (struct cpl_set_tcb_field *)(oreq + 1);
 3395         set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
 3396                           (f->report_filter_id << 15) | (1 << 23) |
 3397                           ((u64)f->pass << 35) | ((u64)!f->rss << 36));
 3398         set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
 3399         t3_mgmt_tx(sc, m);
 3400 
 3401         if (f->pass && !f->rss) {
 3402                 len = sizeof(*sreq);
 3403                 m = m_gethdr(M_WAITOK, MT_DATA);
 3404                 m->m_len = m->m_pkthdr.len = len;
 3405                 bzero(mtod(m, char *), len);
 3406                 sreq = mtod(m, struct cpl_set_tcb_field *);
 3407                 sreq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 3408                 mk_set_tcb_field(sreq, id, 25, 0x3f80000,
 3409                                  (u64)sc->rrss_map[f->qset] << 19);
 3410                 t3_mgmt_tx(sc, m);
 3411         }
 3412         return 0;
 3413 }
 3414 
 3415 static inline void
 3416 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
 3417     unsigned int word, u64 mask, u64 val)
 3418 {
 3419         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
 3420         req->reply = V_NO_REPLY(1);
 3421         req->cpu_idx = 0;
 3422         req->word = htons(word);
 3423         req->mask = htobe64(mask);
 3424         req->val = htobe64(val);
 3425 }
 3426 
 3427 static inline void
 3428 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
 3429     unsigned int word, u64 mask, u64 val)
 3430 {
 3431         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
 3432 
 3433         txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
 3434         txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
 3435         mk_set_tcb_field(req, tid, word, mask, val);
 3436 }

Cache object: c0beb51e5c65f7bb1ff9cd2cd8c25e5b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.