The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/netif/oce/oce_if.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2013 Emulex
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions are met:
    7  *
    8  * 1. Redistributions of source code must retain the above copyright notice,
    9  *    this list of conditions and the following disclaimer.
   10  *
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * 3. Neither the name of the Emulex Corporation nor the names of its
   16  *    contributors may be used to endorse or promote products derived from
   17  *    this software without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  *
   31  * Contact Information:
   32  * freebsd-drivers@emulex.com
   33  *
   34  * Emulex
   35  * 3333 Susan Street
   36  * Costa Mesa, CA 92626
   37  */
   38 
   39 
   40 /* $FreeBSD: src/sys/dev/oce/oce_if.c,v 1.14 2013/07/07 00:30:13 svnexp Exp $ */
   41 
   42 #include "opt_inet6.h"
   43 #include "opt_inet.h"
   44 
   45 #include "oce_if.h"
   46 
   47 
   48 /* Driver entry points prototypes */
   49 static int  oce_probe(device_t dev);
   50 static int  oce_attach(device_t dev);
   51 static int  oce_detach(device_t dev);
   52 static int  oce_shutdown(device_t dev);
   53 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr);
   54 static void oce_init(void *xsc);
   55 #if 0 /* XXX swildner: MULTIQUEUE */
   56 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
   57 static void oce_multiq_flush(struct ifnet *ifp);
   58 #endif
   59 
   60 /* Driver interrupt routines protypes */
   61 static void oce_intr(void *arg, int pending);
   62 static int  oce_setup_intr(POCE_SOFTC sc);
   63 static void oce_fast_isr(void *arg);
   64 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
   65                           void (*isr) (void *arg, int pending));
   66 
   67 /* Media callbacks prototypes */
   68 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
   69 static int  oce_media_change(struct ifnet *ifp);
   70 
   71 /* Transmit routines prototypes */
   72 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
   73 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
   74 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
   75                                         uint32_t status);
   76 #if 0 /* XXX swildner: MULTIQUEUE */
   77 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
   78                                  struct oce_wq *wq);
   79 #endif
   80 
   81 /* Receive routines prototypes */
   82 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
   83 #if 0 /* XXX swildner: ETHER_VTAG */
   84 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
   85 #endif
   86 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
   87 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
   88                                                 struct oce_nic_rx_cqe *cqe);
   89 
   90 /* Helper function prototypes in this file */
   91 static int  oce_attach_ifp(POCE_SOFTC sc);
   92 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
   93 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
   94 static int  oce_vid_config(POCE_SOFTC sc);
   95 static void oce_mac_addr_set(POCE_SOFTC sc);
   96 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
   97 static void oce_local_timer(void *arg);
   98 static void oce_if_deactivate(POCE_SOFTC sc);
   99 static void oce_if_activate(POCE_SOFTC sc);
  100 static void setup_max_queues_want(POCE_SOFTC sc);
  101 static void update_queues_got(POCE_SOFTC sc);
  102 static void process_link_state(POCE_SOFTC sc,
  103                  struct oce_async_cqe_link_state *acqe);
  104 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
  105 static void oce_get_config(POCE_SOFTC sc);
  106 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
  107 
  108 /* IP specific */
  109 #if defined(INET6) || defined(INET)
  110 #if 0 /* XXX swildner: LRO */
  111 static int  oce_init_lro(POCE_SOFTC sc);
  112 static void oce_rx_flush_lro(struct oce_rq *rq);
  113 #endif
  114 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
  115 #endif
  116 
  117 static device_method_t oce_dispatch[] = {
  118         DEVMETHOD(device_probe, oce_probe),
  119         DEVMETHOD(device_attach, oce_attach),
  120         DEVMETHOD(device_detach, oce_detach),
  121         DEVMETHOD(device_shutdown, oce_shutdown),
  122 
  123         DEVMETHOD_END
  124 };
  125 
  126 static driver_t oce_driver = {
  127         "oce",
  128         oce_dispatch,
  129         sizeof(OCE_SOFTC)
  130 };
  131 static devclass_t oce_devclass;
  132 
  133 
  134 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
  135 MODULE_DEPEND(oce, pci, 1, 1, 1);
  136 MODULE_DEPEND(oce, ether, 1, 1, 1);
  137 MODULE_VERSION(oce, 1);
  138 
  139 
  140 /* global vars */
  141 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
  142 
  143 /* Module capabilites and parameters */
  144 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
  145 #if 0 /* XXX swildner: RSS */
  146 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
  147 #else
  148 uint32_t oce_enable_rss = 0;
  149 #endif
  150 
  151 
  152 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
  153 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
  154 
  155 
  156 /* Supported devices table */
  157 static uint32_t supportedDevices[] =  {
  158         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
  159         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
  160         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
  161         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
  162         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
  163         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
  164 };
  165 
  166 
  167 
  168 
  169 /*****************************************************************************
  170  *                      Driver entry points functions                        *
  171  *****************************************************************************/
  172 
  173 static int
  174 oce_probe(device_t dev)
  175 {
  176         uint16_t vendor = 0;
  177         uint16_t device = 0;
  178         int i = 0;
  179         char str[256] = {0};
  180         POCE_SOFTC sc;
  181 
  182         sc = device_get_softc(dev);
  183         bzero(sc, sizeof(OCE_SOFTC));
  184         sc->dev = dev;
  185 
  186         vendor = pci_get_vendor(dev);
  187         device = pci_get_device(dev);
  188 
  189         for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
  190                 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
  191                         if (device == (supportedDevices[i] & 0xffff)) {
  192                                 ksprintf(str, "%s:%s", "Emulex CNA NIC function",
  193                                         component_revision);
  194                                 device_set_desc_copy(dev, str);
  195 
  196                                 switch (device) {
  197                                 case PCI_PRODUCT_BE2:
  198                                         sc->flags |= OCE_FLAGS_BE2;
  199                                         break;
  200                                 case PCI_PRODUCT_BE3:
  201                                         sc->flags |= OCE_FLAGS_BE3;
  202                                         break;
  203                                 case PCI_PRODUCT_XE201:
  204                                 case PCI_PRODUCT_XE201_VF:
  205                                         sc->flags |= OCE_FLAGS_XE201;
  206                                         break;
  207                                 case PCI_PRODUCT_SH:
  208                                         sc->flags |= OCE_FLAGS_SH;
  209                                         break;
  210                                 default:
  211                                         return ENXIO;
  212                                 }
  213                                 return BUS_PROBE_DEFAULT;
  214                         }
  215                 }
  216         }
  217 
  218         return ENXIO;
  219 }
  220 
  221 
  222 static int
  223 oce_attach(device_t dev)
  224 {
  225         POCE_SOFTC sc;
  226         int rc = 0;
  227 
  228         sc = device_get_softc(dev);
  229 
  230         rc = oce_hw_pci_alloc(sc);
  231         if (rc)
  232                 return rc;
  233 
  234         sc->tx_ring_size = OCE_TX_RING_SIZE;
  235         sc->rx_ring_size = OCE_RX_RING_SIZE;
  236         sc->rq_frag_size = OCE_RQ_BUF_SIZE;
  237         sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
  238         sc->promisc      = OCE_DEFAULT_PROMISCUOUS;
  239 
  240         LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
  241         LOCK_CREATE(&sc->dev_lock,  "Device_lock");
  242 
  243         /* initialise the hardware */
  244         rc = oce_hw_init(sc);
  245         if (rc)
  246                 goto pci_res_free;
  247 
  248         oce_get_config(sc);
  249 
  250         setup_max_queues_want(sc);
  251 
  252         rc = oce_setup_intr(sc);
  253         if (rc)
  254                 goto mbox_free;
  255 
  256         rc = oce_queue_init_all(sc);
  257         if (rc)
  258                 goto intr_free;
  259 
  260         rc = oce_attach_ifp(sc);
  261         if (rc)
  262                 goto queues_free;
  263 
  264 #if defined(INET6) || defined(INET)
  265 #if 0 /* XXX swildner: LRO */
  266         rc = oce_init_lro(sc);
  267         if (rc)
  268                 goto ifp_free;
  269 #endif
  270 #endif
  271 
  272         rc = oce_hw_start(sc);
  273         if (rc)
  274                 goto lro_free;
  275 
  276         sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
  277                                 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
  278         sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
  279                                 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
  280 
  281         rc = oce_stats_init(sc);
  282         if (rc)
  283                 goto vlan_free;
  284 
  285         sysctl_ctx_init(&sc->sysctl_ctx);
  286         sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
  287             SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
  288             device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
  289         if (sc->sysctl_tree == NULL) {
  290                 device_printf(sc->dev, "cannot add sysctl tree node\n");
  291                 rc = ENXIO;
  292                 goto vlan_free;
  293         }
  294         oce_add_sysctls(sc);
  295 
  296         callout_init_mp(&sc->timer);
  297         callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
  298 
  299         return 0;
  300 
  301 vlan_free:
  302         if (sc->vlan_attach)
  303                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
  304         if (sc->vlan_detach)
  305                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
  306         oce_hw_intr_disable(sc);
  307 lro_free:
  308 #if defined(INET6) || defined(INET)
  309 #if 0 /* XXX swildner: LRO */
  310         oce_free_lro(sc);
  311 ifp_free:
  312 #endif
  313 #endif
  314         ether_ifdetach(sc->ifp);
  315         if_free(sc->ifp);
  316 queues_free:
  317         oce_queue_release_all(sc);
  318 intr_free:
  319         oce_intr_free(sc);
  320 mbox_free:
  321         oce_dma_free(sc, &sc->bsmbx);
  322 pci_res_free:
  323         oce_hw_pci_free(sc);
  324         LOCK_DESTROY(&sc->dev_lock);
  325         LOCK_DESTROY(&sc->bmbx_lock);
  326         return rc;
  327 
  328 }
  329 
  330 
  331 static int
  332 oce_detach(device_t dev)
  333 {
  334         POCE_SOFTC sc = device_get_softc(dev);
  335 
  336         LOCK(&sc->dev_lock);
  337         oce_if_deactivate(sc);
  338         UNLOCK(&sc->dev_lock);
  339 
  340         callout_stop_sync(&sc->timer);
  341 
  342         if (sc->vlan_attach != NULL)
  343                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
  344         if (sc->vlan_detach != NULL)
  345                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
  346 
  347         ether_ifdetach(sc->ifp);
  348 
  349         if_free(sc->ifp);
  350 
  351         oce_hw_shutdown(sc);
  352 
  353         bus_generic_detach(dev);
  354 
  355         sysctl_ctx_free(&sc->sysctl_ctx);
  356         return 0;
  357 }
  358 
  359 
  360 static int
  361 oce_shutdown(device_t dev)
  362 {
  363         int rc;
  364 
  365         rc = oce_detach(dev);
  366 
  367         return rc;
  368 }
  369 
  370 
  371 static int
  372 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
  373 {
  374         struct ifreq *ifr = (struct ifreq *)data;
  375         POCE_SOFTC sc = ifp->if_softc;
  376         int rc = 0;
  377         uint32_t u;
  378 
  379         switch (command) {
  380 
  381         case SIOCGIFMEDIA:
  382                 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
  383                 break;
  384 
  385         case SIOCSIFMTU:
  386                 if (ifr->ifr_mtu > OCE_MAX_MTU)
  387                         rc = EINVAL;
  388                 else
  389                         ifp->if_mtu = ifr->ifr_mtu;
  390                 break;
  391 
  392         case SIOCSIFFLAGS:
  393                 if (ifp->if_flags & IFF_UP) {
  394                         if (!(ifp->if_flags & IFF_RUNNING)) {
  395                                 sc->ifp->if_flags |= IFF_RUNNING;
  396                                 oce_init(sc);
  397                         }
  398                         device_printf(sc->dev, "Interface Up\n");
  399                 } else {
  400                         LOCK(&sc->dev_lock);
  401 
  402                         sc->ifp->if_flags &= ~IFF_RUNNING;
  403                         ifq_clr_oactive(&ifp->if_snd);
  404                         oce_if_deactivate(sc);
  405 
  406                         UNLOCK(&sc->dev_lock);
  407 
  408                         device_printf(sc->dev, "Interface Down\n");
  409                 }
  410 
  411                 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
  412                         sc->promisc = TRUE;
  413                         oce_rxf_set_promiscuous(sc, sc->promisc);
  414                 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
  415                         sc->promisc = FALSE;
  416                         oce_rxf_set_promiscuous(sc, sc->promisc);
  417                 }
  418 
  419                 break;
  420 
  421         case SIOCADDMULTI:
  422         case SIOCDELMULTI:
  423                 rc = oce_hw_update_multicast(sc);
  424                 if (rc)
  425                         device_printf(sc->dev,
  426                                 "Update multicast address failed\n");
  427                 break;
  428 
  429         case SIOCSIFCAP:
  430                 u = ifr->ifr_reqcap ^ ifp->if_capenable;
  431 
  432                 if (u & IFCAP_TXCSUM) {
  433                         ifp->if_capenable ^= IFCAP_TXCSUM;
  434                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
  435 
  436                         if (IFCAP_TSO & ifp->if_capenable &&
  437                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
  438                                 ifp->if_capenable &= ~IFCAP_TSO;
  439                                 ifp->if_hwassist &= ~CSUM_TSO;
  440                                 if_printf(ifp,
  441                                          "TSO disabled due to -txcsum.\n");
  442                         }
  443                 }
  444 
  445                 if (u & IFCAP_RXCSUM)
  446                         ifp->if_capenable ^= IFCAP_RXCSUM;
  447 
  448                 if (u & IFCAP_TSO4) {
  449                         ifp->if_capenable ^= IFCAP_TSO4;
  450 
  451                         if (IFCAP_TSO & ifp->if_capenable) {
  452                                 if (IFCAP_TXCSUM & ifp->if_capenable)
  453                                         ifp->if_hwassist |= CSUM_TSO;
  454                                 else {
  455                                         ifp->if_capenable &= ~IFCAP_TSO;
  456                                         ifp->if_hwassist &= ~CSUM_TSO;
  457                                         if_printf(ifp,
  458                                             "Enable txcsum first.\n");
  459                                         rc = EAGAIN;
  460                                 }
  461                         } else
  462                                 ifp->if_hwassist &= ~CSUM_TSO;
  463                 }
  464 
  465                 if (u & IFCAP_VLAN_HWTAGGING)
  466                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
  467 
  468 #if 0 /* XXX swildner: VLAN_HWFILTER */
  469                 if (u & IFCAP_VLAN_HWFILTER) {
  470                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
  471                         oce_vid_config(sc);
  472                 }
  473 #endif
  474 #if defined(INET6) || defined(INET)
  475 #if 0 /* XXX swildner: LRO */
  476                 if (u & IFCAP_LRO)
  477                         ifp->if_capenable ^= IFCAP_LRO;
  478 #endif
  479 #endif
  480 
  481                 break;
  482 
  483         case SIOCGPRIVATE_0:
  484                 rc = oce_handle_passthrough(ifp, data);
  485                 break;
  486         default:
  487                 rc = ether_ioctl(ifp, command, data);
  488                 break;
  489         }
  490 
  491         return rc;
  492 }
  493 
  494 
  495 static void
  496 oce_init(void *arg)
  497 {
  498         POCE_SOFTC sc = arg;
  499 
  500         LOCK(&sc->dev_lock);
  501 
  502         if (sc->ifp->if_flags & IFF_UP) {
  503                 oce_if_deactivate(sc);
  504                 oce_if_activate(sc);
  505         }
  506 
  507         UNLOCK(&sc->dev_lock);
  508 
  509 }
  510 
  511 
  512 #if 0 /* XXX swildner: MULTIQUEUE */
  513 static int
  514 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
  515 {
  516         POCE_SOFTC sc = ifp->if_softc;
  517         struct oce_wq *wq = NULL;
  518         int queue_index = 0;
  519         int status = 0;
  520 
  521         if (!sc->link_status) {
  522                 ifq_purge(&ifp->if_snd);
  523                 return ENXIO;
  524         }
  525 
  526         if ((m->m_flags & M_FLOWID) != 0)
  527                 queue_index = m->m_pkthdr.flowid % sc->nwqs;
  528 
  529         wq = sc->wq[queue_index];
  530 
  531         LOCK(&wq->tx_lock);
  532         status = oce_multiq_transmit(ifp, m, wq);
  533         UNLOCK(&wq->tx_lock);
  534 
  535         return status;
  536 
  537 }
  538 
  539 
  540 static void
  541 oce_multiq_flush(struct ifnet *ifp)
  542 {
  543         POCE_SOFTC sc = ifp->if_softc;
  544         struct mbuf     *m;
  545         int i = 0;
  546 
  547         for (i = 0; i < sc->nwqs; i++) {
  548                 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
  549                         m_freem(m);
  550         }
  551         if_qflush(ifp);
  552 }
  553 #endif
  554 
  555 
  556 
  557 /*****************************************************************************
  558  *                   Driver interrupt routines functions                     *
  559  *****************************************************************************/
  560 
  561 static void
  562 oce_intr(void *arg, int pending)
  563 {
  564 
  565         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
  566         POCE_SOFTC sc = ii->sc;
  567         struct oce_eq *eq = ii->eq;
  568         struct oce_eqe *eqe;
  569         struct oce_cq *cq = NULL;
  570         int i, num_eqes = 0;
  571 
  572 
  573         bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
  574                                  BUS_DMASYNC_POSTWRITE);
  575         do {
  576                 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
  577                 if (eqe->evnt == 0)
  578                         break;
  579                 eqe->evnt = 0;
  580                 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
  581                                         BUS_DMASYNC_POSTWRITE);
  582                 RING_GET(eq->ring, 1);
  583                 num_eqes++;
  584 
  585         } while (TRUE);
  586 
  587         if (!num_eqes)
  588                 goto eq_arm; /* Spurious */
  589 
  590         /* Clear EQ entries, but dont arm */
  591         oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
  592 
  593         /* Process TX, RX and MCC. But dont arm CQ*/
  594         for (i = 0; i < eq->cq_valid; i++) {
  595                 cq = eq->cq[i];
  596                 (*cq->cq_handler)(cq->cb_arg);
  597         }
  598 
  599         /* Arm all cqs connected to this EQ */
  600         for (i = 0; i < eq->cq_valid; i++) {
  601                 cq = eq->cq[i];
  602                 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
  603         }
  604 
  605 eq_arm:
  606         oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
  607 
  608         return;
  609 }
  610 
  611 
  612 static int
  613 oce_setup_intr(POCE_SOFTC sc)
  614 {
  615         int rc = 0, use_intx = 0;
  616         int vector = 0;
  617 #if 0 /* XXX swildner: MSI-X */
  618         int req_vectors = 0;
  619 
  620         if (is_rss_enabled(sc))
  621                 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
  622         else
  623                 req_vectors = 1;
  624 
  625         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
  626                 sc->intr_count = req_vectors;
  627                 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
  628                 if (rc != 0) {
  629                         use_intx = 1;
  630                         pci_release_msi(sc->dev);
  631                 } else
  632                         sc->flags |= OCE_FLAGS_USING_MSIX;
  633         } else
  634 #endif
  635                 use_intx = 1;
  636 
  637         if (use_intx)
  638                 sc->intr_count = 1;
  639 
  640         /* Scale number of queues based on intr we got */
  641         update_queues_got(sc);
  642 
  643         if (use_intx) {
  644                 device_printf(sc->dev, "Using legacy interrupt\n");
  645                 rc = oce_alloc_intr(sc, vector, oce_intr);
  646                 if (rc)
  647                         goto error;
  648 #if 0 /* XXX swildner: MSI-X */
  649         } else {
  650                 for (; vector < sc->intr_count; vector++) {
  651                         rc = oce_alloc_intr(sc, vector, oce_intr);
  652                         if (rc)
  653                                 goto error;
  654                 }
  655 #endif
  656         }
  657 
  658         return 0;
  659 error:
  660         oce_intr_free(sc);
  661         return rc;
  662 }
  663 
  664 
  665 void
  666 oce_fast_isr(void *arg)
  667 {
  668         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
  669         POCE_SOFTC sc = ii->sc;
  670 
  671         if (ii->eq == NULL)
  672                 return;
  673 
  674         oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
  675 
  676         taskqueue_enqueue(ii->tq, &ii->task);
  677 
  678         ii->eq->intr++;
  679 }
  680 
  681 
  682 static int
  683 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
  684 {
  685         POCE_INTR_INFO ii = &sc->intrs[vector];
  686         int rc = 0, rr;
  687         u_int irq_flags;
  688 
  689         if (vector >= OCE_MAX_EQ)
  690                 return (EINVAL);
  691 
  692 #if 0 /* XXX swildner: MSI-X */
  693         /* Set the resource id for the interrupt.
  694          * MSIx is vector + 1 for the resource id,
  695          * INTx is 0 for the resource id.
  696          */
  697         if (sc->flags & OCE_FLAGS_USING_MSIX)
  698                 rr = vector + 1;
  699         else
  700 #endif
  701                 rr = 0;
  702         ii->irq_type = pci_alloc_1intr(sc->dev,
  703             sc->flags & OCE_FLAGS_USING_MSI, &rr, &irq_flags);
  704         ii->intr_res = bus_alloc_resource_any(sc->dev,
  705                                               SYS_RES_IRQ,
  706                                               &rr, irq_flags);
  707         ii->irq_rr = rr;
  708         if (ii->intr_res == NULL) {
  709                 device_printf(sc->dev,
  710                           "Could not allocate interrupt\n");
  711                 rc = ENXIO;
  712                 return rc;
  713         }
  714 
  715         TASK_INIT(&ii->task, 0, isr, ii);
  716         ii->vector = vector;
  717         ksprintf(ii->task_name, "oce_task[%d]", ii->vector);
  718         ii->tq = taskqueue_create(ii->task_name,
  719                         M_NOWAIT,
  720                         taskqueue_thread_enqueue,
  721                         &ii->tq);
  722         taskqueue_start_threads(&ii->tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq",
  723                         device_get_nameunit(sc->dev));
  724 
  725         ii->sc = sc;
  726         rc = bus_setup_intr(sc->dev,
  727                         ii->intr_res,
  728                         0,
  729                         oce_fast_isr, ii, &ii->tag, NULL);
  730         return rc;
  731 
  732 }
  733 
  734 
  735 void
  736 oce_intr_free(POCE_SOFTC sc)
  737 {
  738         int i = 0;
  739 
  740         for (i = 0; i < sc->intr_count; i++) {
  741 
  742                 if (sc->intrs[i].tag != NULL)
  743                         bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
  744                                                 sc->intrs[i].tag);
  745                 if (sc->intrs[i].tq != NULL)
  746                         taskqueue_free(sc->intrs[i].tq);
  747 
  748                 if (sc->intrs[i].intr_res != NULL)
  749                         bus_release_resource(sc->dev, SYS_RES_IRQ,
  750                                                 sc->intrs[i].irq_rr,
  751                                                 sc->intrs[i].intr_res);
  752                 sc->intrs[i].tag = NULL;
  753                 sc->intrs[i].intr_res = NULL;
  754         }
  755 
  756         if (sc->flags & OCE_FLAGS_USING_MSIX ||
  757             sc->flags & OCE_FLAGS_USING_MSI)
  758                 pci_release_msi(sc->dev);
  759 
  760 }
  761 
  762 
  763 
  764 /******************************************************************************
  765 *                         Media callbacks functions                           *
  766 ******************************************************************************/
  767 
  768 static void
  769 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
  770 {
  771         POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
  772 
  773 
  774         req->ifm_status = IFM_AVALID;
  775         req->ifm_active = IFM_ETHER;
  776 
  777         if (sc->link_status == 1)
  778                 req->ifm_status |= IFM_ACTIVE;
  779         else
  780                 return;
  781 
  782         switch (sc->link_speed) {
  783         case 1: /* 10 Mbps */
  784                 req->ifm_active |= IFM_10_T | IFM_FDX;
  785                 sc->speed = 10;
  786                 break;
  787         case 2: /* 100 Mbps */
  788                 req->ifm_active |= IFM_100_TX | IFM_FDX;
  789                 sc->speed = 100;
  790                 break;
  791         case 3: /* 1 Gbps */
  792                 req->ifm_active |= IFM_1000_T | IFM_FDX;
  793                 sc->speed = 1000;
  794                 break;
  795         case 4: /* 10 Gbps */
  796                 req->ifm_active |= IFM_10G_SR | IFM_FDX;
  797                 sc->speed = 10000;
  798                 break;
  799         }
  800 
  801         return;
  802 }
  803 
  804 
  805 int
  806 oce_media_change(struct ifnet *ifp)
  807 {
  808         return 0;
  809 }
  810 
  811 
  812 
  813 
  814 /*****************************************************************************
  815  *                        Transmit routines functions                        *
  816  *****************************************************************************/
  817 
  818 static int
  819 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
  820 {
  821         int rc = 0, i, retry_cnt = 0;
  822         bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
  823         struct mbuf *m, *m_temp;
  824         struct oce_wq *wq = sc->wq[wq_index];
  825         struct oce_packet_desc *pd;
  826         struct oce_nic_hdr_wqe *nichdr;
  827         struct oce_nic_frag_wqe *nicfrag;
  828         int num_wqes;
  829         uint32_t reg_value;
  830         boolean_t complete = TRUE;
  831 
  832         m = *mpp;
  833         if (!m)
  834                 return EINVAL;
  835 
  836         if (!(m->m_flags & M_PKTHDR)) {
  837                 rc = ENXIO;
  838                 goto free_ret;
  839         }
  840 
  841         if(oce_tx_asic_stall_verify(sc, m)) {
  842                 m = oce_insert_vlan_tag(sc, m, &complete);
  843                 if(!m) {
  844                         device_printf(sc->dev, "Insertion unsuccessful\n");
  845                         return 0;
  846                 }
  847 
  848         }
  849 
  850         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
  851                 /* consolidate packet buffers for TSO/LSO segment offload */
  852 #if defined(INET6) || defined(INET)
  853                 m = oce_tso_setup(sc, mpp);
  854 #else
  855                 m = NULL;
  856 #endif
  857                 if (m == NULL) {
  858                         rc = ENXIO;
  859                         goto free_ret;
  860                 }
  861         }
  862 
  863         pd = &wq->pckts[wq->pkt_desc_head];
  864 retry:
  865         rc = bus_dmamap_load_mbuf_defrag(wq->tag,
  866                                      pd->map,
  867                                      mpp, segs, OCE_MAX_TX_ELEMENTS,
  868                                      &pd->nsegs, BUS_DMA_NOWAIT);
  869         if (rc == 0) {
  870                 num_wqes = pd->nsegs + 1;
  871                 if (IS_BE(sc) || IS_SH(sc)) {
  872                         /*Dummy required only for BE3.*/
  873                         if (num_wqes & 1)
  874                                 num_wqes++;
  875                 }
  876                 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
  877                         bus_dmamap_unload(wq->tag, pd->map);
  878                         return EBUSY;
  879                 }
  880                 atomic_store_rel_int(&wq->pkt_desc_head,
  881                                      (wq->pkt_desc_head + 1) % \
  882                                       OCE_WQ_PACKET_ARRAY_SIZE);
  883                 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
  884                 pd->mbuf = m;
  885 
  886                 nichdr =
  887                     RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
  888                 nichdr->u0.dw[0] = 0;
  889                 nichdr->u0.dw[1] = 0;
  890                 nichdr->u0.dw[2] = 0;
  891                 nichdr->u0.dw[3] = 0;
  892 
  893                 nichdr->u0.s.complete = complete;
  894                 nichdr->u0.s.event = 1;
  895                 nichdr->u0.s.crc = 1;
  896                 nichdr->u0.s.forward = 0;
  897                 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
  898                 nichdr->u0.s.udpcs =
  899                         (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
  900                 nichdr->u0.s.tcpcs =
  901                         (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
  902                 nichdr->u0.s.num_wqe = num_wqes;
  903                 nichdr->u0.s.total_length = m->m_pkthdr.len;
  904 #if 0 /* XXX swildner: ETHER_VTAG */
  905                 if (m->m_flags & M_VLANTAG) {
  906                         nichdr->u0.s.vlan = 1; /*Vlan present*/
  907                         nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
  908                 }
  909 #endif
  910                 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
  911                         if (m->m_pkthdr.tso_segsz) {
  912                                 nichdr->u0.s.lso = 1;
  913                                 nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
  914                         }
  915                         if (!IS_BE(sc) || !IS_SH(sc))
  916                                 nichdr->u0.s.ipcs = 1;
  917                 }
  918 
  919                 RING_PUT(wq->ring, 1);
  920                 atomic_add_int(&wq->ring->num_used, 1);
  921 
  922                 for (i = 0; i < pd->nsegs; i++) {
  923                         nicfrag =
  924                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
  925                                                       struct oce_nic_frag_wqe);
  926                         nicfrag->u0.s.rsvd0 = 0;
  927                         nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
  928                         nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
  929                         nicfrag->u0.s.frag_len = segs[i].ds_len;
  930                         pd->wqe_idx = wq->ring->pidx;
  931                         RING_PUT(wq->ring, 1);
  932                         atomic_add_int(&wq->ring->num_used, 1);
  933                 }
  934                 if (num_wqes > (pd->nsegs + 1)) {
  935                         nicfrag =
  936                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
  937                                                       struct oce_nic_frag_wqe);
  938                         nicfrag->u0.dw[0] = 0;
  939                         nicfrag->u0.dw[1] = 0;
  940                         nicfrag->u0.dw[2] = 0;
  941                         nicfrag->u0.dw[3] = 0;
  942                         pd->wqe_idx = wq->ring->pidx;
  943                         RING_PUT(wq->ring, 1);
  944                         atomic_add_int(&wq->ring->num_used, 1);
  945                         pd->nsegs++;
  946                 }
  947 
  948                 sc->ifp->if_opackets++;
  949                 wq->tx_stats.tx_reqs++;
  950                 wq->tx_stats.tx_wrbs += num_wqes;
  951                 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
  952                 wq->tx_stats.tx_pkts++;
  953 
  954                 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
  955                                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  956                 reg_value = (num_wqes << 16) | wq->wq_id;
  957                 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
  958 
  959         } else if (rc == EFBIG) {
  960                 if (retry_cnt == 0) {
  961                         m_temp = m_defrag(m, M_NOWAIT);
  962                         if (m_temp == NULL)
  963                                 goto free_ret;
  964                         m = m_temp;
  965                         *mpp = m_temp;
  966                         retry_cnt = retry_cnt + 1;
  967                         goto retry;
  968                 } else
  969                         goto free_ret;
  970         } else if (rc == ENOMEM)
  971                 return rc;
  972         else
  973                 goto free_ret;
  974 
  975         return 0;
  976 
  977 free_ret:
  978         m_freem(*mpp);
  979         *mpp = NULL;
  980         return rc;
  981 }
  982 
  983 
  984 static void
  985 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
  986 {
  987         struct oce_packet_desc *pd;
  988         POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
  989         struct mbuf *m;
  990 
  991         pd = &wq->pckts[wq->pkt_desc_tail];
  992         atomic_store_rel_int(&wq->pkt_desc_tail,
  993                              (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
  994         atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
  995         bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
  996         bus_dmamap_unload(wq->tag, pd->map);
  997 
  998         m = pd->mbuf;
  999         m_freem(m);
 1000         pd->mbuf = NULL;
 1001 
 1002         if (ifq_is_oactive(&sc->ifp->if_snd)) {
 1003                 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
 1004                         ifq_clr_oactive(&sc->ifp->if_snd);
 1005                         oce_tx_restart(sc, wq);
 1006                 }
 1007         }
 1008 }
 1009 
 1010 
 1011 static void
 1012 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
 1013 {
 1014 
 1015         if ((sc->ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
 1016                 return;
 1017 
 1018 #if __FreeBSD_version >= 800000
 1019         if (!drbr_empty(sc->ifp, wq->br))
 1020 #else
 1021         if (!ifq_is_empty(&sc->ifp->if_snd))
 1022 #endif
 1023                 taskqueue_enqueue(taskqueue_swi, &wq->txtask);
 1024 
 1025 }
 1026 
 1027 
 1028 #if defined(INET6) || defined(INET)
 1029 static struct mbuf *
 1030 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
 1031 {
 1032         struct mbuf *m;
 1033 #ifdef INET
 1034         struct ip *ip;
 1035 #endif
 1036 #ifdef INET6
 1037         struct ip6_hdr *ip6;
 1038 #endif
 1039         struct ether_vlan_header *eh;
 1040         struct tcphdr *th;
 1041         uint16_t etype;
 1042         int total_len = 0, ehdrlen = 0;
 1043 
 1044         m = *mpp;
 1045 
 1046         if (M_WRITABLE(m) == 0) {
 1047                 m = m_dup(*mpp, M_NOWAIT);
 1048                 if (!m)
 1049                         return NULL;
 1050                 m_freem(*mpp);
 1051                 *mpp = m;
 1052         }
 1053 
 1054         eh = mtod(m, struct ether_vlan_header *);
 1055         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 1056                 etype = ntohs(eh->evl_proto);
 1057                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1058         } else {
 1059                 etype = ntohs(eh->evl_encap_proto);
 1060                 ehdrlen = ETHER_HDR_LEN;
 1061         }
 1062 
 1063         switch (etype) {
 1064 #ifdef INET
 1065         case ETHERTYPE_IP:
 1066                 ip = (struct ip *)(m->m_data + ehdrlen);
 1067                 if (ip->ip_p != IPPROTO_TCP)
 1068                         return NULL;
 1069                 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
 1070 
 1071                 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
 1072                 break;
 1073 #endif
 1074 #ifdef INET6
 1075         case ETHERTYPE_IPV6:
 1076                 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
 1077                 if (ip6->ip6_nxt != IPPROTO_TCP)
 1078                         return NULL;
 1079                 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
 1080 
 1081                 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
 1082                 break;
 1083 #endif
 1084         default:
 1085                 return NULL;
 1086         }
 1087 
 1088         m = m_pullup(m, total_len);
 1089         if (!m)
 1090                 return NULL;
 1091         *mpp = m;
 1092         return m;
 1093 
 1094 }
 1095 #endif /* INET6 || INET */
 1096 
 1097 void
 1098 oce_tx_task(void *arg, int npending)
 1099 {
 1100         struct oce_wq *wq = arg;
 1101         POCE_SOFTC sc = wq->parent;
 1102         struct ifnet *ifp = sc->ifp;
 1103 #if 0 /* XXX swildner: MULTIQUEUE */
 1104         int rc = 0;
 1105 
 1106         LOCK(&wq->tx_lock);
 1107         rc = oce_multiq_transmit(ifp, NULL, wq);
 1108         if (rc) {
 1109                 device_printf(sc->dev,
 1110                                 "TX[%d] restart failed\n", wq->queue_index);
 1111         }
 1112         UNLOCK(&wq->tx_lock);
 1113 #else
 1114         lwkt_serialize_enter(ifp->if_serializer);
 1115         oce_start_locked(ifp);
 1116         lwkt_serialize_exit(ifp->if_serializer);
 1117 #endif
 1118 }
 1119 
 1120 
 1121 void
 1122 oce_start_locked(struct ifnet *ifp)
 1123 {
 1124         POCE_SOFTC sc = ifp->if_softc;
 1125         struct mbuf *m;
 1126         int rc = 0;
 1127         int def_q = 0; /* Default tx queue is 0 */
 1128 
 1129         if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd)))
 1130                 return;
 1131 
 1132         if (!sc->link_status) {
 1133                 ifq_purge(&ifp->if_snd);
 1134                 return;
 1135         }
 1136 
 1137         do {
 1138                 m = ifq_dequeue(&sc->ifp->if_snd);
 1139                 if (m == NULL)
 1140                         break;
 1141 
 1142                 rc = oce_tx(sc, &m, def_q);
 1143                 if (rc) {
 1144                         if (m != NULL) {
 1145                                 sc->wq[def_q]->tx_stats.tx_stops ++;
 1146                                 ifq_set_oactive(&ifp->if_snd);
 1147                                 ifq_prepend(&ifp->if_snd, m);
 1148                                 m = NULL;
 1149                         }
 1150                         break;
 1151                 }
 1152                 if (m != NULL)
 1153                         ETHER_BPF_MTAP(ifp, m);
 1154 
 1155         } while (TRUE);
 1156 
 1157         return;
 1158 }
 1159 
 1160 void
 1161 oce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
 1162 {
 1163         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
 1164         oce_start_locked(ifp);
 1165 }
 1166 
 1167 
 1168 /* Handle the Completion Queue for transmit */
 1169 uint16_t
 1170 oce_wq_handler(void *arg)
 1171 {
 1172         struct oce_wq *wq = (struct oce_wq *)arg;
 1173         POCE_SOFTC sc = wq->parent;
 1174         struct oce_cq *cq = wq->cq;
 1175         struct oce_nic_tx_cqe *cqe;
 1176         int num_cqes = 0;
 1177 
 1178         bus_dmamap_sync(cq->ring->dma.tag,
 1179                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
 1180         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
 1181         while (cqe->u0.dw[3]) {
 1182                 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
 1183 
 1184                 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
 1185                 if (wq->ring->cidx >= wq->ring->num_items)
 1186                         wq->ring->cidx -= wq->ring->num_items;
 1187 
 1188                 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
 1189                 wq->tx_stats.tx_compl++;
 1190                 cqe->u0.dw[3] = 0;
 1191                 RING_GET(cq->ring, 1);
 1192                 bus_dmamap_sync(cq->ring->dma.tag,
 1193                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
 1194                 cqe =
 1195                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
 1196                 num_cqes++;
 1197         }
 1198 
 1199         if (num_cqes)
 1200                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
 1201 
 1202         return 0;
 1203 }
 1204 
 1205 
 1206 #if 0 /* XXX swildner: MULTIQUEUE */
 1207 static int
 1208 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
 1209 {
 1210         POCE_SOFTC sc = ifp->if_softc;
 1211         int status = 0, queue_index = 0;
 1212         struct mbuf *next = NULL;
 1213         struct buf_ring *br = NULL;
 1214 
 1215         br  = wq->br;
 1216         queue_index = wq->queue_index;
 1217 
 1218         if (!((ifp->if_flags & IFF_RUNNING) && !ifq_is_oactive(&ifp->if_snd))) {
 1219                 if (m != NULL)
 1220                         status = drbr_enqueue(ifp, br, m);
 1221                 return status;
 1222         }
 1223 
 1224          if (m != NULL) {
 1225                 if ((status = drbr_enqueue(ifp, br, m)) != 0)
 1226                         return status;
 1227         }
 1228         while ((next = drbr_peek(ifp, br)) != NULL) {
 1229                 if (oce_tx(sc, &next, queue_index)) {
 1230                         if (next == NULL) {
 1231                                 drbr_advance(ifp, br);
 1232                         } else {
 1233                                 drbr_putback(ifp, br, next);
 1234                                 wq->tx_stats.tx_stops ++;
 1235                                 ifp_set_oactive(&ifp->if_snd);
 1236                                 status = drbr_enqueue(ifp, br, next);
 1237                         }
 1238                         break;
 1239                 }
 1240                 drbr_advance(ifp, br);
 1241                 ifp->if_obytes += next->m_pkthdr.len;
 1242                 if (next->m_flags & M_MCAST)
 1243                         ifp->if_omcasts++;
 1244                 ETHER_BPF_MTAP(ifp, next);
 1245         }
 1246 
 1247         return status;
 1248 }
 1249 #endif
 1250 
 1251 
 1252 
 1253 
 1254 /*****************************************************************************
 1255  *                          Receive  routines functions                      *
 1256  *****************************************************************************/
 1257 
 1258 static void
 1259 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
 1260 {
 1261         uint32_t out;
 1262         struct oce_packet_desc *pd;
 1263         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
 1264         int i, len, frag_len;
 1265         struct mbuf *m = NULL, *tail = NULL;
 1266         uint16_t vtag;
 1267 
 1268         len = cqe->u0.s.pkt_size;
 1269         if (!len) {
 1270                 /*partial DMA workaround for Lancer*/
 1271                 oce_discard_rx_comp(rq, cqe);
 1272                 goto exit;
 1273         }
 1274 
 1275          /* Get vlan_tag value */
 1276         if(IS_BE(sc) || IS_SH(sc))
 1277                 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
 1278         else
 1279                 vtag = cqe->u0.s.vlan_tag;
 1280 
 1281 
 1282         for (i = 0; i < cqe->u0.s.num_fragments; i++) {
 1283 
 1284                 if (rq->packets_out == rq->packets_in) {
 1285                         device_printf(sc->dev,
 1286                                   "RQ transmit descriptor missing\n");
 1287                 }
 1288                 out = rq->packets_out + 1;
 1289                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
 1290                         out = 0;
 1291                 pd = &rq->pckts[rq->packets_out];
 1292                 rq->packets_out = out;
 1293 
 1294                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
 1295                 bus_dmamap_unload(rq->tag, pd->map);
 1296                 rq->pending--;
 1297 
 1298                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
 1299                 pd->mbuf->m_len = frag_len;
 1300 
 1301                 if (tail != NULL) {
 1302                         /* additional fragments */
 1303                         tail->m_next = pd->mbuf;
 1304                         tail = pd->mbuf;
 1305                 } else {
 1306                         /* first fragment, fill out much of the packet header */
 1307                         pd->mbuf->m_pkthdr.len = len;
 1308                         pd->mbuf->m_pkthdr.csum_flags = 0;
 1309                         if (IF_CSUM_ENABLED(sc)) {
 1310                                 if (cqe->u0.s.l4_cksum_pass) {
 1311                                         pd->mbuf->m_pkthdr.csum_flags |=
 1312                                             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 1313                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
 1314                                 }
 1315                                 if (cqe->u0.s.ip_cksum_pass) {
 1316                                         if (!cqe->u0.s.ip_ver) { /* IPV4 */
 1317                                                 pd->mbuf->m_pkthdr.csum_flags |=
 1318                                                 (CSUM_IP_CHECKED|CSUM_IP_VALID);
 1319                                         }
 1320                                 }
 1321                         }
 1322                         m = tail = pd->mbuf;
 1323                 }
 1324                 pd->mbuf = NULL;
 1325                 len -= frag_len;
 1326         }
 1327 
 1328         if (m) {
 1329                 if (!oce_cqe_portid_valid(sc, cqe)) {
 1330                          m_freem(m);
 1331                          goto exit;
 1332                 }
 1333 
 1334                 m->m_pkthdr.rcvif = sc->ifp;
 1335 #if __FreeBSD_version >= 800000
 1336                 if (rq->queue_index)
 1337                         m->m_pkthdr.flowid = (rq->queue_index - 1);
 1338                 else
 1339                         m->m_pkthdr.flowid = rq->queue_index;
 1340                 m->m_flags |= M_FLOWID;
 1341 #endif
 1342 #if 0 /* XXX swildner: ETHER_VTAG */
 1343                 /* This deternies if vlan tag is Valid */
 1344                 if (oce_cqe_vtp_valid(sc, cqe)) {
 1345                         if (sc->function_mode & FNM_FLEX10_MODE) {
 1346                                 /* FLEX10. If QnQ is not set, neglect VLAN */
 1347                                 if (cqe->u0.s.qnq) {
 1348                                         m->m_pkthdr.ether_vtag = vtag;
 1349                                         m->m_flags |= M_VLANTAG;
 1350                                 }
 1351                         } else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
 1352                                 /* In UMC mode generally pvid will be striped by
 1353                                    hw. But in some cases we have seen it comes
 1354                                    with pvid. So if pvid == vlan, neglect vlan.
 1355                                 */
 1356                                 m->m_pkthdr.ether_vtag = vtag;
 1357                                 m->m_flags |= M_VLANTAG;
 1358                         }
 1359                 }
 1360 #endif
 1361 
 1362                 sc->ifp->if_ipackets++;
 1363 #if defined(INET6) || defined(INET)
 1364 #if 0 /* XXX swildner: LRO */
 1365                 /* Try to queue to LRO */
 1366                 if (IF_LRO_ENABLED(sc) &&
 1367                     (cqe->u0.s.ip_cksum_pass) &&
 1368                     (cqe->u0.s.l4_cksum_pass) &&
 1369                     (!cqe->u0.s.ip_ver)       &&
 1370                     (rq->lro.lro_cnt != 0)) {
 1371 
 1372                         if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
 1373                                 rq->lro_pkts_queued ++;
 1374                                 goto post_done;
 1375                         }
 1376                         /* If LRO posting fails then try to post to STACK */
 1377                 }
 1378 #endif
 1379 #endif
 1380 
 1381                 (*sc->ifp->if_input) (sc->ifp, m);
 1382 #if defined(INET6) || defined(INET)
 1383 #if 0 /* XXX swildner: LRO */
 1384 post_done:
 1385 #endif
 1386 #endif
 1387                 /* Update rx stats per queue */
 1388                 rq->rx_stats.rx_pkts++;
 1389                 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
 1390                 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
 1391                 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
 1392                         rq->rx_stats.rx_mcast_pkts++;
 1393                 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
 1394                         rq->rx_stats.rx_ucast_pkts++;
 1395         }
 1396 exit:
 1397         return;
 1398 }
 1399 
 1400 
 1401 static void
 1402 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 1403 {
 1404         uint32_t out, i = 0;
 1405         struct oce_packet_desc *pd;
 1406         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
 1407         int num_frags = cqe->u0.s.num_fragments;
 1408 
 1409         for (i = 0; i < num_frags; i++) {
 1410                 if (rq->packets_out == rq->packets_in) {
 1411                         device_printf(sc->dev,
 1412                                 "RQ transmit descriptor missing\n");
 1413                 }
 1414                 out = rq->packets_out + 1;
 1415                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
 1416                         out = 0;
 1417                 pd = &rq->pckts[rq->packets_out];
 1418                 rq->packets_out = out;
 1419 
 1420                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
 1421                 bus_dmamap_unload(rq->tag, pd->map);
 1422                 rq->pending--;
 1423                 m_freem(pd->mbuf);
 1424         }
 1425 
 1426 }
 1427 
 1428 
 1429 #if 0 /* XXX swildner: ETHER_VTAG */
 1430 static int
 1431 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
 1432 {
 1433         struct oce_nic_rx_cqe_v1 *cqe_v1;
 1434         int vtp = 0;
 1435 
 1436         if (sc->be3_native) {
 1437                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
 1438                 vtp =  cqe_v1->u0.s.vlan_tag_present;
 1439         } else
 1440                 vtp = cqe->u0.s.vlan_tag_present;
 1441 
 1442         return vtp;
 1443 
 1444 }
 1445 #endif
 1446 
 1447 
 1448 static int
 1449 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
 1450 {
 1451         struct oce_nic_rx_cqe_v1 *cqe_v1;
 1452         int port_id = 0;
 1453 
 1454         if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
 1455                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
 1456                 port_id =  cqe_v1->u0.s.port;
 1457                 if (sc->port_id != port_id)
 1458                         return 0;
 1459         } else
 1460                 ;/* For BE3 legacy and Lancer this is dummy */
 1461 
 1462         return 1;
 1463 
 1464 }
 1465 
 1466 #if defined(INET6) || defined(INET)
 1467 #if 0 /* XXX swildner: LRO */
 1468 static void
 1469 oce_rx_flush_lro(struct oce_rq *rq)
 1470 {
 1471         struct lro_ctrl *lro = &rq->lro;
 1472         struct lro_entry *queued;
 1473         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
 1474 
 1475         if (!IF_LRO_ENABLED(sc))
 1476                 return;
 1477 
 1478         while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
 1479                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
 1480                 tcp_lro_flush(lro, queued);
 1481         }
 1482         rq->lro_pkts_queued = 0;
 1483 
 1484         return;
 1485 }
 1486 
 1487 
 1488 static int
 1489 oce_init_lro(POCE_SOFTC sc)
 1490 {
 1491         struct lro_ctrl *lro = NULL;
 1492         int i = 0, rc = 0;
 1493 
 1494         for (i = 0; i < sc->nrqs; i++) {
 1495                 lro = &sc->rq[i]->lro;
 1496                 rc = tcp_lro_init(lro);
 1497                 if (rc != 0) {
 1498                         device_printf(sc->dev, "LRO init failed\n");
 1499                         return rc;
 1500                 }
 1501                 lro->ifp = sc->ifp;
 1502         }
 1503 
 1504         return rc;
 1505 }
 1506 
 1507 
 1508 void
 1509 oce_free_lro(POCE_SOFTC sc)
 1510 {
 1511         struct lro_ctrl *lro = NULL;
 1512         int i = 0;
 1513 
 1514         for (i = 0; i < sc->nrqs; i++) {
 1515                 lro = &sc->rq[i]->lro;
 1516                 if (lro)
 1517                         tcp_lro_free(lro);
 1518         }
 1519 }
 1520 #endif
 1521 #endif
 1522 
 1523 int
 1524 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
 1525 {
 1526         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
 1527         int i, in, rc;
 1528         struct oce_packet_desc *pd;
 1529         bus_dma_segment_t segs[6];
 1530         int nsegs, added = 0;
 1531         struct oce_nic_rqe *rqe;
 1532         pd_rxulp_db_t rxdb_reg;
 1533 
 1534         bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
 1535         for (i = 0; i < count; i++) {
 1536                 in = rq->packets_in + 1;
 1537                 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
 1538                         in = 0;
 1539                 if (in == rq->packets_out)
 1540                         break;  /* no more room */
 1541 
 1542                 pd = &rq->pckts[rq->packets_in];
 1543                 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1544                 if (pd->mbuf == NULL)
 1545                         break;
 1546 
 1547                 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
 1548                 rc = bus_dmamap_load_mbuf_segment(rq->tag,
 1549                                              pd->map,
 1550                                              pd->mbuf,
 1551                                              segs, 1,
 1552                                              &nsegs, BUS_DMA_NOWAIT);
 1553                 if (rc) {
 1554                         m_free(pd->mbuf);
 1555                         break;
 1556                 }
 1557 
 1558                 if (nsegs != 1) {
 1559                         i--;
 1560                         continue;
 1561                 }
 1562 
 1563                 rq->packets_in = in;
 1564                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
 1565 
 1566                 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
 1567                 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
 1568                 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
 1569                 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
 1570                 RING_PUT(rq->ring, 1);
 1571                 added++;
 1572                 rq->pending++;
 1573         }
 1574         if (added != 0) {
 1575                 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
 1576                         rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
 1577                         rxdb_reg.bits.qid = rq->rq_id;
 1578                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
 1579                         added -= OCE_MAX_RQ_POSTS;
 1580                 }
 1581                 if (added > 0) {
 1582                         rxdb_reg.bits.qid = rq->rq_id;
 1583                         rxdb_reg.bits.num_posted = added;
 1584                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
 1585                 }
 1586         }
 1587 
 1588         return 0;
 1589 }
 1590 
 1591 
 1592 /* Handle the Completion Queue for receive */
 1593 uint16_t
 1594 oce_rq_handler(void *arg)
 1595 {
 1596         struct oce_rq *rq = (struct oce_rq *)arg;
 1597         struct oce_cq *cq = rq->cq;
 1598         POCE_SOFTC sc = rq->parent;
 1599         struct oce_nic_rx_cqe *cqe;
 1600         int num_cqes = 0, rq_buffers_used = 0;
 1601 
 1602         bus_dmamap_sync(cq->ring->dma.tag,
 1603                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
 1604         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 1605         while (cqe->u0.dw[2]) {
 1606                 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
 1607 
 1608                 RING_GET(rq->ring, 1);
 1609                 if (cqe->u0.s.error == 0) {
 1610                         oce_rx(rq, cqe->u0.s.frag_index, cqe);
 1611                 } else {
 1612                         rq->rx_stats.rxcp_err++;
 1613                         sc->ifp->if_ierrors++;
 1614                         /* Post L3/L4 errors to stack.*/
 1615                         oce_rx(rq, cqe->u0.s.frag_index, cqe);
 1616                 }
 1617                 rq->rx_stats.rx_compl++;
 1618                 cqe->u0.dw[2] = 0;
 1619 
 1620 #if defined(INET6) || defined(INET)
 1621 #if 0 /* XXX swildner: LRO */
 1622                 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
 1623                         oce_rx_flush_lro(rq);
 1624                 }
 1625 #endif
 1626 #endif
 1627 
 1628                 RING_GET(cq->ring, 1);
 1629                 bus_dmamap_sync(cq->ring->dma.tag,
 1630                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
 1631                 cqe =
 1632                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 1633                 num_cqes++;
 1634                 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
 1635                         break;
 1636         }
 1637 
 1638 #if defined(INET6) || defined(INET)
 1639 #if 0 /* XXX swildner: LRO */
 1640         if (IF_LRO_ENABLED(sc))
 1641                 oce_rx_flush_lro(rq);
 1642 #endif
 1643 #endif
 1644 
 1645         if (num_cqes) {
 1646                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
 1647                 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
 1648                 if (rq_buffers_used > 1)
 1649                         oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
 1650         }
 1651 
 1652         return 0;
 1653 
 1654 }
 1655 
 1656 
 1657 
 1658 
 1659 /*****************************************************************************
 1660  *                 Helper function prototypes in this file                   *
 1661  *****************************************************************************/
 1662 
 1663 static int
 1664 oce_attach_ifp(POCE_SOFTC sc)
 1665 {
 1666 
 1667         sc->ifp = if_alloc(IFT_ETHER);
 1668         if (!sc->ifp)
 1669                 return ENOMEM;
 1670 
 1671         ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
 1672         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
 1673         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
 1674 
 1675         sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
 1676         sc->ifp->if_ioctl = oce_ioctl;
 1677         sc->ifp->if_start = oce_start;
 1678         sc->ifp->if_init = oce_init;
 1679         sc->ifp->if_mtu = ETHERMTU;
 1680         sc->ifp->if_softc = sc;
 1681 #if 0 /* XXX swildner: MULTIQUEUE */
 1682         sc->ifp->if_transmit = oce_multiq_start;
 1683         sc->ifp->if_qflush = oce_multiq_flush;
 1684 #endif
 1685 
 1686         if_initname(sc->ifp,
 1687                     device_get_name(sc->dev), device_get_unit(sc->dev));
 1688 
 1689         ifq_set_maxlen(&sc->ifp->if_snd, OCE_MAX_TX_DESC - 1);
 1690         ifq_set_ready(&sc->ifp->if_snd);
 1691 
 1692         sc->ifp->if_hwassist = OCE_IF_HWASSIST;
 1693         sc->ifp->if_hwassist |= CSUM_TSO;
 1694         sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
 1695 
 1696         sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
 1697         sc->ifp->if_capabilities |= IFCAP_HWCSUM;
 1698 #if 0 /* XXX swildner: VLAN_HWFILTER */
 1699         sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
 1700 #endif
 1701 
 1702 #if defined(INET6) || defined(INET)
 1703         sc->ifp->if_capabilities |= IFCAP_TSO;
 1704 #if 0 /* XXX swildner: LRO */
 1705         sc->ifp->if_capabilities |= IFCAP_LRO;
 1706 #endif
 1707 #if 0 /* XXX swildner: VLAN_HWTSO */
 1708         sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
 1709 #endif
 1710 #endif
 1711 
 1712         sc->ifp->if_capenable = sc->ifp->if_capabilities;
 1713         sc->ifp->if_baudrate = IF_Gbps(10UL);
 1714 
 1715         ether_ifattach(sc->ifp, sc->macaddr.mac_addr, NULL);
 1716 
 1717         return 0;
 1718 }
 1719 
 1720 
 1721 static void
 1722 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
 1723 {
 1724         POCE_SOFTC sc = ifp->if_softc;
 1725 
 1726         if (ifp->if_softc !=  arg)
 1727                 return;
 1728         if ((vtag == 0) || (vtag > 4095))
 1729                 return;
 1730 
 1731         sc->vlan_tag[vtag] = 1;
 1732         sc->vlans_added++;
 1733         oce_vid_config(sc);
 1734 }
 1735 
 1736 
 1737 static void
 1738 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
 1739 {
 1740         POCE_SOFTC sc = ifp->if_softc;
 1741 
 1742         if (ifp->if_softc !=  arg)
 1743                 return;
 1744         if ((vtag == 0) || (vtag > 4095))
 1745                 return;
 1746 
 1747         sc->vlan_tag[vtag] = 0;
 1748         sc->vlans_added--;
 1749         oce_vid_config(sc);
 1750 }
 1751 
 1752 
 1753 /*
 1754  * A max of 64 vlans can be configured in BE. If the user configures
 1755  * more, place the card in vlan promiscuous mode.
 1756  */
 1757 static int
 1758 oce_vid_config(POCE_SOFTC sc)
 1759 {
 1760 #if 0 /* XXX swildner: VLAN_HWFILTER */
 1761         struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
 1762         uint16_t ntags = 0, i;
 1763 #endif
 1764         int status = 0;
 1765 
 1766 #if 0 /* XXX swildner: VLAN_HWFILTER */
 1767         if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
 1768                         (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
 1769                 for (i = 0; i < MAX_VLANS; i++) {
 1770                         if (sc->vlan_tag[i]) {
 1771                                 vtags[ntags].vtag = i;
 1772                                 ntags++;
 1773                         }
 1774                 }
 1775                 if (ntags)
 1776                         status = oce_config_vlan(sc, (uint8_t) sc->if_id,
 1777                                                 vtags, ntags, 1, 0);
 1778         } else
 1779 #endif
 1780                 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
 1781                                                 NULL, 0, 1, 1);
 1782         return status;
 1783 }
 1784 
 1785 
 1786 static void
 1787 oce_mac_addr_set(POCE_SOFTC sc)
 1788 {
 1789         uint32_t old_pmac_id = sc->pmac_id;
 1790         int status = 0;
 1791 
 1792 
 1793         status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
 1794                          sc->macaddr.size_of_struct);
 1795         if (!status)
 1796                 return;
 1797 
 1798         status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
 1799                                         sc->if_id, &sc->pmac_id);
 1800         if (!status) {
 1801                 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
 1802                 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
 1803                                  sc->macaddr.size_of_struct);
 1804         }
 1805         if (status)
 1806                 device_printf(sc->dev, "Failed update macaddress\n");
 1807 
 1808 }
 1809 
 1810 
 1811 static int
 1812 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
 1813 {
 1814         POCE_SOFTC sc = ifp->if_softc;
 1815         struct ifreq *ifr = (struct ifreq *)data;
 1816         int rc = ENXIO;
 1817         char cookie[32] = {0};
 1818         void *priv_data = (void *)ifr->ifr_data;
 1819         void *ioctl_ptr;
 1820         uint32_t req_size;
 1821         struct mbx_hdr req;
 1822         OCE_DMA_MEM dma_mem;
 1823         struct mbx_common_get_cntl_attr *fw_cmd;
 1824 
 1825         if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
 1826                 return EFAULT;
 1827 
 1828         if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
 1829                 return EINVAL;
 1830 
 1831         ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
 1832         if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
 1833                 return EFAULT;
 1834 
 1835         req_size = le32toh(req.u0.req.request_length);
 1836         if (req_size > 65536)
 1837                 return EINVAL;
 1838 
 1839         req_size += sizeof(struct mbx_hdr);
 1840         rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
 1841         if (rc)
 1842                 return ENOMEM;
 1843 
 1844         if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
 1845                 rc = EFAULT;
 1846                 goto dma_free;
 1847         }
 1848 
 1849         rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
 1850         if (rc) {
 1851                 rc = EIO;
 1852                 goto dma_free;
 1853         }
 1854 
 1855         if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
 1856                 rc =  EFAULT;
 1857 
 1858         /*
 1859            firmware is filling all the attributes for this ioctl except
 1860            the driver version..so fill it
 1861          */
 1862         if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
 1863                 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
 1864                 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
 1865                         COMPONENT_REVISION, strlen(COMPONENT_REVISION));
 1866         }
 1867 
 1868 dma_free:
 1869         oce_dma_free(sc, &dma_mem);
 1870         return rc;
 1871 
 1872 }
 1873 
 1874 static void
 1875 oce_eqd_set_periodic(POCE_SOFTC sc)
 1876 {
 1877         struct oce_set_eqd set_eqd[OCE_MAX_EQ];
 1878         struct oce_aic_obj *aic;
 1879         struct oce_eq *eqo;
 1880         uint64_t now = 0, delta;
 1881         int eqd, i, num = 0;
 1882         uint32_t ips = 0;
 1883         int tps;
 1884 
 1885         for (i = 0 ; i < sc->neqs; i++) {
 1886                 eqo = sc->eq[i];
 1887                 aic = &sc->aic_obj[i];
 1888                 /* When setting the static eq delay from the user space */
 1889                 if (!aic->enable) {
 1890                         eqd = aic->et_eqd;
 1891                         goto modify_eqd;
 1892                 }
 1893 
 1894                 now = ticks;
 1895 
 1896                 /* Over flow check */
 1897                 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
 1898                         goto done;
 1899 
 1900                 delta = now - aic->ticks;
 1901                 tps = delta/hz;
 1902 
 1903                 /* Interrupt rate based on elapsed ticks */
 1904                 if(tps)
 1905                         ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
 1906 
 1907                 if (ips > INTR_RATE_HWM)
 1908                         eqd = aic->cur_eqd + 20;
 1909                 else if (ips < INTR_RATE_LWM)
 1910                         eqd = aic->cur_eqd / 2;
 1911                 else
 1912                         goto done;
 1913 
 1914                 if (eqd < 10)
 1915                         eqd = 0;
 1916 
 1917                 /* Make sure that the eq delay is in the known range */
 1918                 eqd = min(eqd, aic->max_eqd);
 1919                 eqd = max(eqd, aic->min_eqd);
 1920 
 1921 modify_eqd:
 1922                 if (eqd != aic->cur_eqd) {
 1923                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
 1924                         set_eqd[num].eq_id = eqo->eq_id;
 1925                         aic->cur_eqd = eqd;
 1926                         num++;
 1927                 }
 1928 done:
 1929                 aic->intr_prev = eqo->intr;
 1930                 aic->ticks = now;
 1931         }
 1932 
 1933         /* Is there atleast one eq that needs to be modified? */
 1934         if(num)
 1935                 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
 1936 
 1937 }
 1938 
 1939 static void
 1940 oce_local_timer(void *arg)
 1941 {
 1942         POCE_SOFTC sc = arg;
 1943         int i = 0;
 1944 
 1945         lwkt_serialize_enter(sc->ifp->if_serializer);
 1946         oce_refresh_nic_stats(sc);
 1947         oce_refresh_queue_stats(sc);
 1948         oce_mac_addr_set(sc);
 1949 
 1950         /* TX Watch Dog*/
 1951         for (i = 0; i < sc->nwqs; i++)
 1952                 oce_tx_restart(sc, sc->wq[i]);
 1953 
 1954         /* calculate and set the eq delay for optimal interrupt rate */
 1955         if (IS_BE(sc) || IS_SH(sc))
 1956                 oce_eqd_set_periodic(sc);
 1957 
 1958         callout_reset(&sc->timer, hz, oce_local_timer, sc);
 1959         lwkt_serialize_exit(sc->ifp->if_serializer);
 1960 }
 1961 
 1962 
 1963 /* NOTE : This should only be called holding
 1964  *        DEVICE_LOCK.
 1965 */
 1966 static void
 1967 oce_if_deactivate(POCE_SOFTC sc)
 1968 {
 1969         int i, mtime = 0;
 1970         int wait_req = 0;
 1971         struct oce_rq *rq;
 1972         struct oce_wq *wq;
 1973         struct oce_eq *eq;
 1974 
 1975         sc->ifp->if_flags &= ~IFF_RUNNING;
 1976         ifq_clr_oactive(&sc->ifp->if_snd);
 1977 
 1978         /*Wait for max of 400ms for TX completions to be done */
 1979         while (mtime < 400) {
 1980                 wait_req = 0;
 1981                 for_all_wq_queues(sc, wq, i) {
 1982                         if (wq->ring->num_used) {
 1983                                 wait_req = 1;
 1984                                 DELAY(1);
 1985                                 break;
 1986                         }
 1987                 }
 1988                 mtime += 1;
 1989                 if (!wait_req)
 1990                         break;
 1991         }
 1992 
 1993         /* Stop intrs and finish any bottom halves pending */
 1994         oce_hw_intr_disable(sc);
 1995 
 1996         /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
 1997            any other lock. So unlock device lock and require after
 1998            completing taskqueue_drain.
 1999         */
 2000         UNLOCK(&sc->dev_lock);
 2001         for (i = 0; i < sc->intr_count; i++) {
 2002                 if (sc->intrs[i].tq != NULL) {
 2003                         taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
 2004                 }
 2005         }
 2006         LOCK(&sc->dev_lock);
 2007 
 2008         /* Delete RX queue in card with flush param */
 2009         oce_stop_rx(sc);
 2010 
 2011         /* Invalidate any pending cq and eq entries*/
 2012         for_all_evnt_queues(sc, eq, i)
 2013                 oce_drain_eq(eq);
 2014         for_all_rq_queues(sc, rq, i)
 2015                 oce_drain_rq_cq(rq);
 2016         for_all_wq_queues(sc, wq, i)
 2017                 oce_drain_wq_cq(wq);
 2018 
 2019         /* But still we need to get MCC aync events.
 2020            So enable intrs and also arm first EQ
 2021         */
 2022         oce_hw_intr_enable(sc);
 2023         oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
 2024 
 2025         DELAY(10);
 2026 }
 2027 
 2028 
 2029 static void
 2030 oce_if_activate(POCE_SOFTC sc)
 2031 {
 2032         struct oce_eq *eq;
 2033         struct oce_rq *rq;
 2034         struct oce_wq *wq;
 2035         int i, rc = 0;
 2036 
 2037         sc->ifp->if_flags |= IFF_RUNNING;
 2038 
 2039         oce_hw_intr_disable(sc);
 2040 
 2041         oce_start_rx(sc);
 2042 
 2043         for_all_rq_queues(sc, rq, i) {
 2044                 rc = oce_start_rq(rq);
 2045                 if (rc)
 2046                         device_printf(sc->dev, "Unable to start RX\n");
 2047         }
 2048 
 2049         for_all_wq_queues(sc, wq, i) {
 2050                 rc = oce_start_wq(wq);
 2051                 if (rc)
 2052                         device_printf(sc->dev, "Unable to start TX\n");
 2053         }
 2054 
 2055 
 2056         for_all_evnt_queues(sc, eq, i)
 2057                 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
 2058 
 2059         oce_hw_intr_enable(sc);
 2060 
 2061 }
 2062 
 2063 static void
 2064 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
 2065 {
 2066         /* Update Link status */
 2067         if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
 2068              ASYNC_EVENT_LINK_UP) {
 2069                 sc->link_status = ASYNC_EVENT_LINK_UP;
 2070                 if_link_state_change(sc->ifp);
 2071         } else {
 2072                 sc->link_status = ASYNC_EVENT_LINK_DOWN;
 2073                 if_link_state_change(sc->ifp);
 2074         }
 2075 
 2076         /* Update speed */
 2077         sc->link_speed = acqe->u0.s.speed;
 2078         sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
 2079 
 2080 }
 2081 
 2082 
 2083 /* Handle the Completion Queue for the Mailbox/Async notifications */
 2084 uint16_t
 2085 oce_mq_handler(void *arg)
 2086 {
 2087         struct oce_mq *mq = (struct oce_mq *)arg;
 2088         POCE_SOFTC sc = mq->parent;
 2089         struct oce_cq *cq = mq->cq;
 2090         int num_cqes = 0, evt_type = 0, optype = 0;
 2091         struct oce_mq_cqe *cqe;
 2092         struct oce_async_cqe_link_state *acqe;
 2093         struct oce_async_event_grp5_pvid_state *gcqe;
 2094         struct oce_async_event_qnq *dbgcqe;
 2095 
 2096 
 2097         bus_dmamap_sync(cq->ring->dma.tag,
 2098                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
 2099         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
 2100 
 2101         while (cqe->u0.dw[3]) {
 2102                 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
 2103                 if (cqe->u0.s.async_event) {
 2104                         evt_type = cqe->u0.s.event_type;
 2105                         optype = cqe->u0.s.async_type;
 2106                         if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
 2107                                 /* Link status evt */
 2108                                 acqe = (struct oce_async_cqe_link_state *)cqe;
 2109                                 process_link_state(sc, acqe);
 2110                         } else if ((evt_type == ASYNC_EVENT_GRP5) &&
 2111                                    (optype == ASYNC_EVENT_PVID_STATE)) {
 2112                                 /* GRP5 PVID */
 2113                                 gcqe =
 2114                                 (struct oce_async_event_grp5_pvid_state *)cqe;
 2115                                 if (gcqe->enabled)
 2116                                         sc->pvid = gcqe->tag & VLAN_VID_MASK;
 2117                                 else
 2118                                         sc->pvid = 0;
 2119 
 2120                         }
 2121                         else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
 2122                                 optype == ASYNC_EVENT_DEBUG_QNQ) {
 2123                                 dbgcqe =
 2124                                 (struct oce_async_event_qnq *)cqe;
 2125                                 if(dbgcqe->valid)
 2126                                         sc->qnqid = dbgcqe->vlan_tag;
 2127                                 sc->qnq_debug_event = TRUE;
 2128                         }
 2129                 }
 2130                 cqe->u0.dw[3] = 0;
 2131                 RING_GET(cq->ring, 1);
 2132                 bus_dmamap_sync(cq->ring->dma.tag,
 2133                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
 2134                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
 2135                 num_cqes++;
 2136         }
 2137 
 2138         if (num_cqes)
 2139                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
 2140 
 2141         return 0;
 2142 }
 2143 
 2144 
 2145 static void
 2146 setup_max_queues_want(POCE_SOFTC sc)
 2147 {
 2148         /* Check if it is FLEX machine. Is so dont use RSS */
 2149         if ((sc->function_mode & FNM_FLEX10_MODE) ||
 2150             (sc->function_mode & FNM_UMC_MODE)    ||
 2151             (sc->function_mode & FNM_VNIC_MODE)   ||
 2152             (!is_rss_enabled(sc))                 ||
 2153             (sc->flags & OCE_FLAGS_BE2)) {
 2154                 sc->nrqs = 1;
 2155                 sc->nwqs = 1;
 2156         }
 2157 }
 2158 
 2159 
 2160 static void
 2161 update_queues_got(POCE_SOFTC sc)
 2162 {
 2163         if (is_rss_enabled(sc)) {
 2164                 sc->nrqs = sc->intr_count + 1;
 2165                 sc->nwqs = sc->intr_count;
 2166         } else {
 2167                 sc->nrqs = 1;
 2168                 sc->nwqs = 1;
 2169         }
 2170 }
 2171 
 2172 static int
 2173 oce_check_ipv6_ext_hdr(struct mbuf *m)
 2174 {
 2175         struct ether_header *eh = mtod(m, struct ether_header *);
 2176         caddr_t m_datatemp = m->m_data;
 2177 
 2178         if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
 2179                 m->m_data += sizeof(struct ether_header);
 2180                 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
 2181 
 2182                 if((ip6->ip6_nxt != IPPROTO_TCP) && \
 2183                                 (ip6->ip6_nxt != IPPROTO_UDP)){
 2184                         struct ip6_ext *ip6e = NULL;
 2185                         m->m_data += sizeof(struct ip6_hdr);
 2186 
 2187                         ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
 2188                         if(ip6e->ip6e_len == 0xff) {
 2189                                 m->m_data = m_datatemp;
 2190                                 return TRUE;
 2191                         }
 2192                 }
 2193                 m->m_data = m_datatemp;
 2194         }
 2195         return FALSE;
 2196 }
 2197 
 2198 static int
 2199 is_be3_a1(POCE_SOFTC sc)
 2200 {
 2201         if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
 2202                 return TRUE;
 2203         }
 2204         return FALSE;
 2205 }
 2206 
 2207 static struct mbuf *
 2208 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
 2209 {
 2210         uint16_t vlan_tag = 0;
 2211 
 2212         if(!M_WRITABLE(m))
 2213                 return NULL;
 2214 
 2215 #if 0 /* XXX swildner: ETHER_VTAG */
 2216         /* Embed vlan tag in the packet if it is not part of it */
 2217         if(m->m_flags & M_VLANTAG) {
 2218                 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
 2219                 m->m_flags &= ~M_VLANTAG;
 2220         }
 2221 #endif
 2222 
 2223         /* if UMC, ignore vlan tag insertion and instead insert pvid */
 2224         if(sc->pvid) {
 2225                 if(!vlan_tag)
 2226                         vlan_tag = sc->pvid;
 2227                 *complete = FALSE;
 2228         }
 2229 
 2230 #if 0 /* XXX swildner: ETHER_VTAG */
 2231         if(vlan_tag) {
 2232                 m = ether_vlanencap(m, vlan_tag);
 2233         }
 2234 
 2235         if(sc->qnqid) {
 2236                 m = ether_vlanencap(m, sc->qnqid);
 2237                 *complete = FALSE;
 2238         }
 2239 #endif
 2240         return m;
 2241 }
 2242 
 2243 static int
 2244 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
 2245 {
 2246         if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
 2247                         oce_check_ipv6_ext_hdr(m)) {
 2248                 return TRUE;
 2249         }
 2250         return FALSE;
 2251 }
 2252 
 2253 static void
 2254 oce_get_config(POCE_SOFTC sc)
 2255 {
 2256         int rc = 0;
 2257         uint32_t max_rss = 0;
 2258 
 2259         if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
 2260                 max_rss = OCE_LEGACY_MODE_RSS;
 2261         else
 2262                 max_rss = OCE_MAX_RSS;
 2263 
 2264         if (!IS_BE(sc)) {
 2265                 rc = oce_get_func_config(sc);
 2266                 if (rc) {
 2267                         sc->nwqs = OCE_MAX_WQ;
 2268                         sc->nrssqs = max_rss;
 2269                         sc->nrqs = sc->nrssqs + 1;
 2270                 }
 2271         }
 2272         else {
 2273                 rc = oce_get_profile_config(sc);
 2274                 sc->nrssqs = max_rss;
 2275                 sc->nrqs = sc->nrssqs + 1;
 2276                 if (rc)
 2277                         sc->nwqs = OCE_MAX_WQ;
 2278         }
 2279 }

Cache object: 23109e72030acf0de54ba0b384400051


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.