The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgbe/t4_vf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2016 Chelsio Communications, Inc.
    3  * All rights reserved.
    4  * Written by: John Baldwin <jhb@FreeBSD.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include "opt_inet.h"
   32 #include "opt_inet6.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/bus.h>
   36 #include <sys/conf.h>
   37 #include <sys/kernel.h>
   38 #include <sys/module.h>
   39 #include <sys/priv.h>
   40 #include <dev/pci/pcivar.h>
   41 #if defined(__i386__) || defined(__amd64__)
   42 #include <vm/vm.h>
   43 #include <vm/pmap.h>
   44 #endif
   45 
   46 #include "common/common.h"
   47 #include "common/t4_regs.h"
   48 #include "t4_ioctl.h"
   49 #include "t4_mp_ring.h"
   50 
   51 /*
   52  * Some notes:
   53  *
   54  * The Virtual Interfaces are connected to an internal switch on the chip
   55  * which allows VIs attached to the same port to talk to each other even when
   56  * the port link is down.  As a result, we might want to always report a
   57  * VF's link as being "up".
   58  *
   59  * XXX: Add a TUNABLE and possible per-device sysctl for this?
   60  */
   61 
   62 struct intrs_and_queues {
   63         uint16_t intr_type;     /* MSI, or MSI-X */
   64         uint16_t nirq;          /* Total # of vectors */
   65         uint16_t ntxq;          /* # of NIC txq's for each port */
   66         uint16_t nrxq;          /* # of NIC rxq's for each port */
   67 };
   68 
   69 struct {
   70         uint16_t device;
   71         char *desc;
   72 } t4vf_pciids[] = {
   73         {0x4800, "Chelsio T440-dbg VF"},
   74         {0x4801, "Chelsio T420-CR VF"},
   75         {0x4802, "Chelsio T422-CR VF"},
   76         {0x4803, "Chelsio T440-CR VF"},
   77         {0x4804, "Chelsio T420-BCH VF"},
   78         {0x4805, "Chelsio T440-BCH VF"},
   79         {0x4806, "Chelsio T440-CH VF"},
   80         {0x4807, "Chelsio T420-SO VF"},
   81         {0x4808, "Chelsio T420-CX VF"},
   82         {0x4809, "Chelsio T420-BT VF"},
   83         {0x480a, "Chelsio T404-BT VF"},
   84         {0x480e, "Chelsio T440-LP-CR VF"},
   85 }, t5vf_pciids[] = {
   86         {0x5800, "Chelsio T580-dbg VF"},
   87         {0x5801,  "Chelsio T520-CR VF"},        /* 2 x 10G */
   88         {0x5802,  "Chelsio T522-CR VF"},        /* 2 x 10G, 2 X 1G */
   89         {0x5803,  "Chelsio T540-CR VF"},        /* 4 x 10G */
   90         {0x5807,  "Chelsio T520-SO VF"},        /* 2 x 10G, nomem */
   91         {0x5809,  "Chelsio T520-BT VF"},        /* 2 x 10GBaseT */
   92         {0x580a,  "Chelsio T504-BT VF"},        /* 4 x 1G */
   93         {0x580d,  "Chelsio T580-CR VF"},        /* 2 x 40G */
   94         {0x580e,  "Chelsio T540-LP-CR VF"},     /* 4 x 10G */
   95         {0x5810,  "Chelsio T580-LP-CR VF"},     /* 2 x 40G */
   96         {0x5811,  "Chelsio T520-LL-CR VF"},     /* 2 x 10G */
   97         {0x5812,  "Chelsio T560-CR VF"},        /* 1 x 40G, 2 x 10G */
   98         {0x5814,  "Chelsio T580-LP-SO-CR VF"},  /* 2 x 40G, nomem */
   99         {0x5815,  "Chelsio T502-BT VF"},        /* 2 x 1G */
  100         {0x5818,  "Chelsio T540-BT VF"},        /* 4 x 10GBaseT */
  101         {0x5819,  "Chelsio T540-LP-BT VF"},     /* 4 x 10GBaseT */
  102         {0x581a,  "Chelsio T540-SO-BT VF"},     /* 4 x 10GBaseT, nomem */
  103         {0x581b,  "Chelsio T540-SO-CR VF"},     /* 4 x 10G, nomem */
  104 }, t6vf_pciids[] = {
  105         {0x6800, "Chelsio T6-DBG-25 VF"},       /* 2 x 10/25G, debug */
  106         {0x6801, "Chelsio T6225-CR VF"},        /* 2 x 10/25G */
  107         {0x6802, "Chelsio T6225-SO-CR VF"},     /* 2 x 10/25G, nomem */
  108         {0x6803, "Chelsio T6425-CR VF"},        /* 4 x 10/25G */
  109         {0x6804, "Chelsio T6425-SO-CR VF"},     /* 4 x 10/25G, nomem */
  110         {0x6805, "Chelsio T6225-OCP-SO VF"},    /* 2 x 10/25G, nomem */
  111         {0x6806, "Chelsio T62100-OCP-SO VF"},   /* 2 x 40/50/100G, nomem */
  112         {0x6807, "Chelsio T62100-LP-CR VF"},    /* 2 x 40/50/100G */
  113         {0x6808, "Chelsio T62100-SO-CR VF"},    /* 2 x 40/50/100G, nomem */
  114         {0x6809, "Chelsio T6210-BT VF"},        /* 2 x 10GBASE-T */
  115         {0x680d, "Chelsio T62100-CR VF"},       /* 2 x 40/50/100G */
  116         {0x6810, "Chelsio T6-DBG-100 VF"},      /* 2 x 40/50/100G, debug */
  117         {0x6811, "Chelsio T6225-LL-CR VF"},     /* 2 x 10/25G */
  118         {0x6814, "Chelsio T61100-OCP-SO VF"},   /* 1 x 40/50/100G, nomem */
  119         {0x6815, "Chelsio T6201-BT VF"},        /* 2 x 1000BASE-T */
  120 
  121         /* Custom */
  122         {0x6880, "Chelsio T6225 80 VF"},
  123         {0x6881, "Chelsio T62100 81 VF"},
  124         {0x6882, "Chelsio T6225-CR 82 VF"},
  125         {0x6883, "Chelsio T62100-CR 83 VF"},
  126         {0x6884, "Chelsio T64100-CR 84 VF"},
  127         {0x6885, "Chelsio T6240-SO 85 VF"},
  128         {0x6886, "Chelsio T6225-SO-CR 86 VF"},
  129         {0x6887, "Chelsio T6225-CR 87 VF"},
  130 };
  131 
  132 static d_ioctl_t t4vf_ioctl;
  133 
  134 static struct cdevsw t4vf_cdevsw = {
  135        .d_version = D_VERSION,
  136        .d_ioctl = t4vf_ioctl,
  137        .d_name = "t4vf",
  138 };
  139 
  140 static int
  141 t4vf_probe(device_t dev)
  142 {
  143         uint16_t d;
  144         size_t i;
  145 
  146         d = pci_get_device(dev);
  147         for (i = 0; i < nitems(t4vf_pciids); i++) {
  148                 if (d == t4vf_pciids[i].device) {
  149                         device_set_desc(dev, t4vf_pciids[i].desc);
  150                         return (BUS_PROBE_DEFAULT);
  151                 }
  152         }
  153         return (ENXIO);
  154 }
  155 
  156 static int
  157 t5vf_probe(device_t dev)
  158 {
  159         uint16_t d;
  160         size_t i;
  161 
  162         d = pci_get_device(dev);
  163         for (i = 0; i < nitems(t5vf_pciids); i++) {
  164                 if (d == t5vf_pciids[i].device) {
  165                         device_set_desc(dev, t5vf_pciids[i].desc);
  166                         return (BUS_PROBE_DEFAULT);
  167                 }
  168         }
  169         return (ENXIO);
  170 }
  171 
  172 static int
  173 t6vf_probe(device_t dev)
  174 {
  175         uint16_t d;
  176         size_t i;
  177 
  178         d = pci_get_device(dev);
  179         for (i = 0; i < nitems(t6vf_pciids); i++) {
  180                 if (d == t6vf_pciids[i].device) {
  181                         device_set_desc(dev, t6vf_pciids[i].desc);
  182                         return (BUS_PROBE_DEFAULT);
  183                 }
  184         }
  185         return (ENXIO);
  186 }
  187 
  188 #define FW_PARAM_DEV(param) \
  189         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
  190          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
  191 #define FW_PARAM_PFVF(param) \
  192         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
  193          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
  194 
  195 static int
  196 get_params__pre_init(struct adapter *sc)
  197 {
  198         int rc;
  199         uint32_t param[3], val[3];
  200 
  201         param[0] = FW_PARAM_DEV(FWREV);
  202         param[1] = FW_PARAM_DEV(TPREV);
  203         param[2] = FW_PARAM_DEV(CCLK);
  204         rc = -t4vf_query_params(sc, nitems(param), param, val);
  205         if (rc != 0) {
  206                 device_printf(sc->dev,
  207                     "failed to query parameters (pre_init): %d.\n", rc);
  208                 return (rc);
  209         }
  210 
  211         sc->params.fw_vers = val[0];
  212         sc->params.tp_vers = val[1];
  213         sc->params.vpd.cclk = val[2];
  214 
  215         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
  216             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
  217             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
  218             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
  219             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
  220 
  221         snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
  222             G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
  223             G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
  224             G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
  225             G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
  226 
  227         return (0);
  228 }
  229 
  230 static int
  231 get_params__post_init(struct adapter *sc)
  232 {
  233         int rc;
  234         uint32_t param, val;
  235 
  236         rc = -t4vf_get_sge_params(sc);
  237         if (rc != 0) {
  238                 device_printf(sc->dev,
  239                     "unable to retrieve adapter SGE parameters: %d\n", rc);
  240                 return (rc);
  241         }
  242 
  243         rc = -t4vf_get_rss_glb_config(sc);
  244         if (rc != 0) {
  245                 device_printf(sc->dev,
  246                     "unable to retrieve adapter RSS parameters: %d\n", rc);
  247                 return (rc);
  248         }
  249         if (sc->params.rss.mode != FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
  250                 device_printf(sc->dev,
  251                     "unable to operate with global RSS mode %d\n",
  252                     sc->params.rss.mode);
  253                 return (EINVAL);
  254         }
  255 
  256         /*
  257          * Grab our Virtual Interface resource allocation, extract the
  258          * features that we're interested in and do a bit of sanity testing on
  259          * what we discover.
  260          */
  261         rc = -t4vf_get_vfres(sc);
  262         if (rc != 0) {
  263                 device_printf(sc->dev,
  264                     "unable to get virtual interface resources: %d\n", rc);
  265                 return (rc);
  266         }
  267 
  268         /*
  269          * Check for various parameter sanity issues.
  270          */
  271         if (sc->params.vfres.pmask == 0) {
  272                 device_printf(sc->dev, "no port access configured/usable!\n");
  273                 return (EINVAL);
  274         }
  275         if (sc->params.vfres.nvi == 0) {
  276                 device_printf(sc->dev,
  277                     "no virtual interfaces configured/usable!\n");
  278                 return (EINVAL);
  279         }
  280         sc->params.portvec = sc->params.vfres.pmask;
  281 
  282         param = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
  283         rc = -t4vf_query_params(sc, 1, &param, &val);
  284         if (rc == 0)
  285                 sc->params.max_pkts_per_eth_tx_pkts_wr = val;
  286         else
  287                 sc->params.max_pkts_per_eth_tx_pkts_wr = 14;
  288 
  289         rc = t4_verify_chip_settings(sc);
  290         if (rc != 0)
  291                 return (rc);
  292         t4_init_rx_buf_info(sc);
  293 
  294         return (0);
  295 }
  296 
  297 static int
  298 set_params__post_init(struct adapter *sc)
  299 {
  300         uint32_t param, val;
  301 
  302         /* ask for encapsulated CPLs */
  303         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
  304         val = 1;
  305         (void)t4vf_set_params(sc, 1, &param, &val);
  306 
  307         /* Enable 32b port caps if the firmware supports it. */
  308         param = FW_PARAM_PFVF(PORT_CAPS32);
  309         val = 1;
  310         if (t4vf_set_params(sc, 1, &param, &val) == 0)
  311                 sc->params.port_caps32 = 1;
  312 
  313         return (0);
  314 }
  315 
  316 #undef FW_PARAM_PFVF
  317 #undef FW_PARAM_DEV
  318 
  319 static int
  320 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
  321 {
  322         struct vf_resources *vfres;
  323         int nrxq, ntxq, nports;
  324         int itype, iq_avail, navail, rc;
  325 
  326         /*
  327          * Figure out the layout of queues across our VIs and ensure
  328          * we can allocate enough interrupts for our layout.
  329          */
  330         vfres = &sc->params.vfres;
  331         nports = sc->params.nports;
  332         bzero(iaq, sizeof(*iaq));
  333 
  334         for (itype = INTR_MSIX; itype != 0; itype >>= 1) {
  335                 if (itype == INTR_INTX)
  336                         continue;
  337 
  338                 if (itype == INTR_MSIX)
  339                         navail = pci_msix_count(sc->dev);
  340                 else
  341                         navail = pci_msi_count(sc->dev);
  342 
  343                 if (navail == 0)
  344                         continue;
  345 
  346                 iaq->intr_type = itype;
  347 
  348                 /*
  349                  * XXX: The Linux driver reserves an Ingress Queue for
  350                  * forwarded interrupts when using MSI (but not MSI-X).
  351                  * It seems it just always asks for 2 interrupts and
  352                  * forwards all rxqs to the forwarded interrupt.
  353                  *
  354                  * We must reserve one IRQ for the for the firmware
  355                  * event queue.
  356                  *
  357                  * Every rxq requires an ingress queue with a free
  358                  * list and interrupts and an egress queue.  Every txq
  359                  * requires an ETH egress queue.
  360                  */
  361                 iaq->nirq = T4VF_EXTRA_INTR;
  362 
  363                 /*
  364                  * First, determine how many queues we can allocate.
  365                  * Start by finding the upper bound on rxqs from the
  366                  * limit on ingress queues.
  367                  */
  368                 iq_avail = vfres->niqflint - iaq->nirq;
  369                 if (iq_avail < nports) {
  370                         device_printf(sc->dev,
  371                             "Not enough ingress queues (%d) for %d ports\n",
  372                             vfres->niqflint, nports);
  373                         return (ENXIO);
  374                 }
  375 
  376                 /*
  377                  * Try to honor the cap on interrupts.  If there aren't
  378                  * enough interrupts for at least one interrupt per
  379                  * port, then don't bother, we will just forward all
  380                  * interrupts to one interrupt in that case.
  381                  */
  382                 if (iaq->nirq + nports <= navail) {
  383                         if (iq_avail > navail - iaq->nirq)
  384                                 iq_avail = navail - iaq->nirq;
  385                 }
  386 
  387                 nrxq = nports * t4_nrxq;
  388                 if (nrxq > iq_avail) {
  389                         /*
  390                          * Too many ingress queues.  Use what we can.
  391                          */
  392                         nrxq = (iq_avail / nports) * nports;
  393                 }
  394                 KASSERT(nrxq <= iq_avail, ("too many ingress queues"));
  395 
  396                 /*
  397                  * Next, determine the upper bound on txqs from the limit
  398                  * on ETH queues.
  399                  */
  400                 if (vfres->nethctrl < nports) {
  401                         device_printf(sc->dev,
  402                             "Not enough ETH queues (%d) for %d ports\n",
  403                             vfres->nethctrl, nports);
  404                         return (ENXIO);
  405                 }
  406 
  407                 ntxq = nports * t4_ntxq;
  408                 if (ntxq > vfres->nethctrl) {
  409                         /*
  410                          * Too many ETH queues.  Use what we can.
  411                          */
  412                         ntxq = (vfres->nethctrl / nports) * nports;
  413                 }
  414                 KASSERT(ntxq <= vfres->nethctrl, ("too many ETH queues"));
  415 
  416                 /*
  417                  * Finally, ensure we have enough egress queues.
  418                  */
  419                 if (vfres->neq < nports * 2) {
  420                         device_printf(sc->dev,
  421                             "Not enough egress queues (%d) for %d ports\n",
  422                             vfres->neq, nports);
  423                         return (ENXIO);
  424                 }
  425                 if (nrxq + ntxq > vfres->neq) {
  426                         /* Just punt and use 1 for everything. */
  427                         nrxq = ntxq = nports;
  428                 }
  429                 KASSERT(nrxq <= iq_avail, ("too many ingress queues"));
  430                 KASSERT(ntxq <= vfres->nethctrl, ("too many ETH queues"));
  431                 KASSERT(nrxq + ntxq <= vfres->neq, ("too many egress queues"));
  432 
  433                 /*
  434                  * Do we have enough interrupts?  For MSI the interrupts
  435                  * have to be a power of 2 as well.
  436                  */
  437                 iaq->nirq += nrxq;
  438                 iaq->ntxq = ntxq;
  439                 iaq->nrxq = nrxq;
  440                 if (iaq->nirq <= navail &&
  441                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
  442                         navail = iaq->nirq;
  443                         if (itype == INTR_MSIX)
  444                                 rc = pci_alloc_msix(sc->dev, &navail);
  445                         else
  446                                 rc = pci_alloc_msi(sc->dev, &navail);
  447                         if (rc != 0) {
  448                                 device_printf(sc->dev,
  449                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
  450                                     itype, rc, iaq->nirq, navail);
  451                                 return (rc);
  452                         }
  453                         if (navail == iaq->nirq) {
  454                                 return (0);
  455                         }
  456                         pci_release_msi(sc->dev);
  457                 }
  458 
  459                 /* Fall back to a single interrupt. */
  460                 iaq->nirq = 1;
  461                 navail = iaq->nirq;
  462                 if (itype == INTR_MSIX)
  463                         rc = pci_alloc_msix(sc->dev, &navail);
  464                 else
  465                         rc = pci_alloc_msi(sc->dev, &navail);
  466                 if (rc != 0)
  467                         device_printf(sc->dev,
  468                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
  469                             itype, rc, iaq->nirq, navail);
  470                 return (rc);
  471         }
  472 
  473         device_printf(sc->dev,
  474             "failed to find a usable interrupt type.  "
  475             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
  476             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
  477 
  478         return (ENXIO);
  479 }
  480 
  481 static int
  482 t4vf_attach(device_t dev)
  483 {
  484         struct adapter *sc;
  485         int rc = 0, i, j, rqidx, tqidx, n, p, pmask;
  486         struct make_dev_args mda;
  487         struct intrs_and_queues iaq;
  488         struct sge *s;
  489 
  490         sc = device_get_softc(dev);
  491         sc->dev = dev;
  492         sysctl_ctx_init(&sc->ctx);
  493         pci_enable_busmaster(dev);
  494         pci_set_max_read_req(dev, 4096);
  495         sc->params.pci.mps = pci_get_max_payload(dev);
  496 
  497         sc->flags |= IS_VF;
  498         TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
  499 
  500         sc->sge_gts_reg = VF_SGE_REG(A_SGE_VF_GTS);
  501         sc->sge_kdoorbell_reg = VF_SGE_REG(A_SGE_VF_KDOORBELL);
  502         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
  503             device_get_nameunit(dev));
  504         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
  505         t4_add_adapter(sc);
  506 
  507         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
  508         TAILQ_INIT(&sc->sfl);
  509         callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
  510 
  511         mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
  512 
  513         rc = t4_map_bars_0_and_4(sc);
  514         if (rc != 0)
  515                 goto done; /* error message displayed already */
  516 
  517         rc = -t4vf_prep_adapter(sc);
  518         if (rc != 0)
  519                 goto done;
  520 
  521         t4_init_devnames(sc);
  522         if (sc->names == NULL) {
  523                 rc = ENOTSUP;
  524                 goto done; /* error message displayed already */
  525         }
  526 
  527         /*
  528          * Leave the 'pf' and 'mbox' values as zero.  This ensures
  529          * that various firmware messages do not set the fields which
  530          * is the correct thing to do for a VF.
  531          */
  532 
  533         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
  534 
  535         make_dev_args_init(&mda);
  536         mda.mda_devsw = &t4vf_cdevsw;
  537         mda.mda_uid = UID_ROOT;
  538         mda.mda_gid = GID_WHEEL;
  539         mda.mda_mode = 0600;
  540         mda.mda_si_drv1 = sc;
  541         rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
  542         if (rc != 0)
  543                 device_printf(dev, "failed to create nexus char device: %d.\n",
  544                     rc);
  545 
  546 #if defined(__i386__)
  547         if ((cpu_feature & CPUID_CX8) == 0) {
  548                 device_printf(dev, "64 bit atomics not available.\n");
  549                 rc = ENOTSUP;
  550                 goto done;
  551         }
  552 #endif
  553 
  554         /*
  555          * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
  556          * 2.6.31 and later we can't call pci_reset_function() in order to
  557          * issue an FLR because of a self- deadlock on the device semaphore.
  558          * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
  559          * cases where they're needed -- for instance, some versions of KVM
  560          * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
  561          * use the firmware based reset in order to reset any per function
  562          * state.
  563          */
  564         rc = -t4vf_fw_reset(sc);
  565         if (rc != 0) {
  566                 device_printf(dev, "FW reset failed: %d\n", rc);
  567                 goto done;
  568         }
  569         sc->flags |= FW_OK;
  570 
  571         /*
  572          * Grab basic operational parameters.  These will predominantly have
  573          * been set up by the Physical Function Driver or will be hard coded
  574          * into the adapter.  We just have to live with them ...  Note that
  575          * we _must_ get our VPD parameters before our SGE parameters because
  576          * we need to know the adapter's core clock from the VPD in order to
  577          * properly decode the SGE Timer Values.
  578          */
  579         rc = get_params__pre_init(sc);
  580         if (rc != 0)
  581                 goto done; /* error message displayed already */
  582         rc = get_params__post_init(sc);
  583         if (rc != 0)
  584                 goto done; /* error message displayed already */
  585 
  586         rc = set_params__post_init(sc);
  587         if (rc != 0)
  588                 goto done; /* error message displayed already */
  589 
  590         rc = t4_map_bar_2(sc);
  591         if (rc != 0)
  592                 goto done; /* error message displayed already */
  593 
  594         rc = t4_create_dma_tag(sc);
  595         if (rc != 0)
  596                 goto done; /* error message displayed already */
  597 
  598         /*
  599          * The number of "ports" which we support is equal to the number of
  600          * Virtual Interfaces with which we've been provisioned.
  601          */
  602         sc->params.nports = imin(sc->params.vfres.nvi, MAX_NPORTS);
  603 
  604         /*
  605          * We may have been provisioned with more VIs than the number of
  606          * ports we're allowed to access (our Port Access Rights Mask).
  607          * Just use a single VI for each port.
  608          */
  609         sc->params.nports = imin(sc->params.nports,
  610             bitcount32(sc->params.vfres.pmask));
  611 
  612 #ifdef notyet
  613         /*
  614          * XXX: The Linux VF driver will lower nports if it thinks there
  615          * are too few resources in vfres (niqflint, nethctrl, neq).
  616          */
  617 #endif
  618 
  619         /*
  620          * First pass over all the ports - allocate VIs and initialize some
  621          * basic parameters like mac address, port type, etc.
  622          */
  623         pmask = sc->params.vfres.pmask;
  624         for_each_port(sc, i) {
  625                 struct port_info *pi;
  626                 uint8_t mac[ETHER_ADDR_LEN];
  627 
  628                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
  629                 sc->port[i] = pi;
  630 
  631                 /* These must be set before t4_port_init */
  632                 pi->adapter = sc;
  633                 pi->port_id = i;
  634                 pi->nvi = 1;
  635                 pi->vi = malloc(sizeof(struct vi_info) * pi->nvi, M_CXGBE,
  636                     M_ZERO | M_WAITOK);
  637 
  638                 /*
  639                  * Allocate the "main" VI and initialize parameters
  640                  * like mac addr.
  641                  */
  642                 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
  643                 if (rc != 0) {
  644                         device_printf(dev, "unable to initialize port %d: %d\n",
  645                             i, rc);
  646                         free(pi->vi, M_CXGBE);
  647                         free(pi, M_CXGBE);
  648                         sc->port[i] = NULL;
  649                         goto done;
  650                 }
  651 
  652                 /* Prefer the MAC address set by the PF, if there is one. */
  653                 n = 1;
  654                 p = ffs(pmask) - 1;
  655                 MPASS(p >= 0);
  656                 rc = t4vf_get_vf_mac(sc, p, &n, mac);
  657                 if (rc == 0 && n == 1)
  658                         t4_os_set_hw_addr(pi, mac);
  659                 pmask &= ~(1 << p);
  660 
  661                 /* No t4_link_start. */
  662 
  663                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
  664                     device_get_nameunit(dev), i);
  665                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
  666                 sc->chan_map[pi->tx_chan] = i;
  667 
  668                 /* All VIs on this port share this media. */
  669                 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
  670                     cxgbe_media_status);
  671 
  672                 pi->dev = device_add_child(dev, sc->names->vf_ifnet_name, -1);
  673                 if (pi->dev == NULL) {
  674                         device_printf(dev,
  675                             "failed to add device for port %d.\n", i);
  676                         rc = ENXIO;
  677                         goto done;
  678                 }
  679                 pi->vi[0].dev = pi->dev;
  680                 device_set_softc(pi->dev, pi);
  681         }
  682 
  683         /*
  684          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
  685          */
  686         rc = cfg_itype_and_nqueues(sc, &iaq);
  687         if (rc != 0)
  688                 goto done; /* error message displayed already */
  689 
  690         sc->intr_type = iaq.intr_type;
  691         sc->intr_count = iaq.nirq;
  692 
  693         s = &sc->sge;
  694         s->nrxq = sc->params.nports * iaq.nrxq;
  695         s->ntxq = sc->params.nports * iaq.ntxq;
  696         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
  697         s->neq += sc->params.nports;    /* ctrl queues: 1 per port */
  698         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
  699 
  700         s->iqmap_sz = s->niq;
  701         s->eqmap_sz = s->neq;
  702 
  703         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
  704             M_ZERO | M_WAITOK);
  705         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
  706             M_ZERO | M_WAITOK);
  707         s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE,
  708             M_ZERO | M_WAITOK);
  709         s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE,
  710             M_ZERO | M_WAITOK);
  711 
  712         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
  713             M_ZERO | M_WAITOK);
  714 
  715         /*
  716          * Second pass over the ports.  This time we know the number of rx and
  717          * tx queues that each port should get.
  718          */
  719         rqidx = tqidx = 0;
  720         for_each_port(sc, i) {
  721                 struct port_info *pi = sc->port[i];
  722                 struct vi_info *vi;
  723 
  724                 if (pi == NULL)
  725                         continue;
  726 
  727                 for_each_vi(pi, j, vi) {
  728                         vi->pi = pi;
  729                         vi->adapter = sc;
  730                         vi->qsize_rxq = t4_qsize_rxq;
  731                         vi->qsize_txq = t4_qsize_txq;
  732 
  733                         vi->first_rxq = rqidx;
  734                         vi->first_txq = tqidx;
  735                         vi->tmr_idx = t4_tmr_idx;
  736                         vi->pktc_idx = t4_pktc_idx;
  737                         vi->nrxq = j == 0 ? iaq.nrxq: 1;
  738                         vi->ntxq = j == 0 ? iaq.ntxq: 1;
  739 
  740                         rqidx += vi->nrxq;
  741                         tqidx += vi->ntxq;
  742 
  743                         vi->rsrv_noflowq = 0;
  744                 }
  745         }
  746 
  747         rc = t4_setup_intr_handlers(sc);
  748         if (rc != 0) {
  749                 device_printf(dev,
  750                     "failed to setup interrupt handlers: %d\n", rc);
  751                 goto done;
  752         }
  753 
  754         rc = bus_generic_attach(dev);
  755         if (rc != 0) {
  756                 device_printf(dev,
  757                     "failed to attach all child ports: %d\n", rc);
  758                 goto done;
  759         }
  760 
  761         device_printf(dev,
  762             "%d ports, %d %s interrupt%s, %d eq, %d iq\n",
  763             sc->params.nports, sc->intr_count, sc->intr_type == INTR_MSIX ?
  764             "MSI-X" : "MSI", sc->intr_count > 1 ? "s" : "", sc->sge.neq,
  765             sc->sge.niq);
  766 
  767 done:
  768         if (rc != 0)
  769                 t4_detach_common(dev);
  770         else
  771                 t4_sysctls(sc);
  772 
  773         return (rc);
  774 }
  775 
  776 static void
  777 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
  778 {
  779 
  780         /* 0x3f is used as the revision for VFs. */
  781         regs->version = chip_id(sc) | (0x3f << 10);
  782         t4_get_regs(sc, buf, regs->len);
  783 }
  784 
  785 static void
  786 t4_clr_vi_stats(struct adapter *sc)
  787 {
  788         int reg;
  789 
  790         for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
  791              reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
  792                 t4_write_reg(sc, VF_MPS_REG(reg), 0);
  793 }
  794 
  795 static int
  796 t4vf_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
  797     struct thread *td)
  798 {
  799         int rc;
  800         struct adapter *sc = dev->si_drv1;
  801 
  802         rc = priv_check(td, PRIV_DRIVER);
  803         if (rc != 0)
  804                 return (rc);
  805 
  806         switch (cmd) {
  807         case CHELSIO_T4_GETREG: {
  808                 struct t4_reg *edata = (struct t4_reg *)data;
  809 
  810                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
  811                         return (EFAULT);
  812 
  813                 if (edata->size == 4)
  814                         edata->val = t4_read_reg(sc, edata->addr);
  815                 else if (edata->size == 8)
  816                         edata->val = t4_read_reg64(sc, edata->addr);
  817                 else
  818                         return (EINVAL);
  819 
  820                 break;
  821         }
  822         case CHELSIO_T4_SETREG: {
  823                 struct t4_reg *edata = (struct t4_reg *)data;
  824 
  825                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
  826                         return (EFAULT);
  827 
  828                 if (edata->size == 4) {
  829                         if (edata->val & 0xffffffff00000000)
  830                                 return (EINVAL);
  831                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
  832                 } else if (edata->size == 8)
  833                         t4_write_reg64(sc, edata->addr, edata->val);
  834                 else
  835                         return (EINVAL);
  836                 break;
  837         }
  838         case CHELSIO_T4_REGDUMP: {
  839                 struct t4_regdump *regs = (struct t4_regdump *)data;
  840                 int reglen = t4_get_regs_len(sc);
  841                 uint8_t *buf;
  842 
  843                 if (regs->len < reglen) {
  844                         regs->len = reglen; /* hint to the caller */
  845                         return (ENOBUFS);
  846                 }
  847 
  848                 regs->len = reglen;
  849                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
  850                 get_regs(sc, regs, buf);
  851                 rc = copyout(buf, regs->data, reglen);
  852                 free(buf, M_CXGBE);
  853                 break;
  854         }
  855         case CHELSIO_T4_CLEAR_STATS: {
  856                 int i, v;
  857                 u_int port_id = *(uint32_t *)data;
  858                 struct port_info *pi;
  859                 struct vi_info *vi;
  860 
  861                 if (port_id >= sc->params.nports)
  862                         return (EINVAL);
  863                 pi = sc->port[port_id];
  864 
  865                 /* MAC stats */
  866                 pi->tx_parse_error = 0;
  867                 t4_clr_vi_stats(sc);
  868 
  869                 /*
  870                  * Since this command accepts a port, clear stats for
  871                  * all VIs on this port.
  872                  */
  873                 for_each_vi(pi, v, vi) {
  874                         if (vi->flags & VI_INIT_DONE) {
  875                                 struct sge_rxq *rxq;
  876                                 struct sge_txq *txq;
  877 
  878                                 for_each_rxq(vi, i, rxq) {
  879 #if defined(INET) || defined(INET6)
  880                                         rxq->lro.lro_queued = 0;
  881                                         rxq->lro.lro_flushed = 0;
  882 #endif
  883                                         rxq->rxcsum = 0;
  884                                         rxq->vlan_extraction = 0;
  885                                 }
  886 
  887                                 for_each_txq(vi, i, txq) {
  888                                         txq->txcsum = 0;
  889                                         txq->tso_wrs = 0;
  890                                         txq->vlan_insertion = 0;
  891                                         txq->imm_wrs = 0;
  892                                         txq->sgl_wrs = 0;
  893                                         txq->txpkt_wrs = 0;
  894                                         txq->txpkts0_wrs = 0;
  895                                         txq->txpkts1_wrs = 0;
  896                                         txq->txpkts0_pkts = 0;
  897                                         txq->txpkts1_pkts = 0;
  898                                         txq->txpkts_flush = 0;
  899                                         mp_ring_reset_stats(txq->r);
  900                                 }
  901                         }
  902                 }
  903                 break;
  904         }
  905         case CHELSIO_T4_SCHED_CLASS:
  906                 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
  907                 break;
  908         case CHELSIO_T4_SCHED_QUEUE:
  909                 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
  910                 break;
  911         default:
  912                 rc = ENOTTY;
  913         }
  914 
  915         return (rc);
  916 }
  917 
  918 static device_method_t t4vf_methods[] = {
  919         DEVMETHOD(device_probe,         t4vf_probe),
  920         DEVMETHOD(device_attach,        t4vf_attach),
  921         DEVMETHOD(device_detach,        t4_detach_common),
  922 
  923         DEVMETHOD_END
  924 };
  925 
  926 static driver_t t4vf_driver = {
  927         "t4vf",
  928         t4vf_methods,
  929         sizeof(struct adapter)
  930 };
  931 
  932 static device_method_t t5vf_methods[] = {
  933         DEVMETHOD(device_probe,         t5vf_probe),
  934         DEVMETHOD(device_attach,        t4vf_attach),
  935         DEVMETHOD(device_detach,        t4_detach_common),
  936 
  937         DEVMETHOD_END
  938 };
  939 
  940 static driver_t t5vf_driver = {
  941         "t5vf",
  942         t5vf_methods,
  943         sizeof(struct adapter)
  944 };
  945 
  946 static device_method_t t6vf_methods[] = {
  947         DEVMETHOD(device_probe,         t6vf_probe),
  948         DEVMETHOD(device_attach,        t4vf_attach),
  949         DEVMETHOD(device_detach,        t4_detach_common),
  950 
  951         DEVMETHOD_END
  952 };
  953 
  954 static driver_t t6vf_driver = {
  955         "t6vf",
  956         t6vf_methods,
  957         sizeof(struct adapter)
  958 };
  959 
  960 static driver_t cxgbev_driver = {
  961         "cxgbev",
  962         cxgbe_methods,
  963         sizeof(struct port_info)
  964 };
  965 
  966 static driver_t cxlv_driver = {
  967         "cxlv",
  968         cxgbe_methods,
  969         sizeof(struct port_info)
  970 };
  971 
  972 static driver_t ccv_driver = {
  973         "ccv",
  974         cxgbe_methods,
  975         sizeof(struct port_info)
  976 };
  977 
  978 DRIVER_MODULE(t4vf, pci, t4vf_driver, 0, 0);
  979 MODULE_VERSION(t4vf, 1);
  980 MODULE_DEPEND(t4vf, t4nex, 1, 1, 1);
  981 
  982 DRIVER_MODULE(t5vf, pci, t5vf_driver, 0, 0);
  983 MODULE_VERSION(t5vf, 1);
  984 MODULE_DEPEND(t5vf, t5nex, 1, 1, 1);
  985 
  986 DRIVER_MODULE(t6vf, pci, t6vf_driver, 0, 0);
  987 MODULE_VERSION(t6vf, 1);
  988 MODULE_DEPEND(t6vf, t6nex, 1, 1, 1);
  989 
  990 DRIVER_MODULE(cxgbev, t4vf, cxgbev_driver, 0, 0);
  991 MODULE_VERSION(cxgbev, 1);
  992 
  993 DRIVER_MODULE(cxlv, t5vf, cxlv_driver, 0, 0);
  994 MODULE_VERSION(cxlv, 1);
  995 
  996 DRIVER_MODULE(ccv, t6vf, ccv_driver, 0, 0);
  997 MODULE_VERSION(ccv, 1);

Cache object: c50e499b5e2d52aed01fa286a87d10d6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.