The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mana/mana_en.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2021 Microsoft Corp.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  *
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/bus.h>
   36 #include <sys/kernel.h>
   37 #include <sys/kthread.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mbuf.h>
   40 #include <sys/smp.h>
   41 #include <sys/socket.h>
   42 #include <sys/sockio.h>
   43 #include <sys/time.h>
   44 #include <sys/eventhandler.h>
   45 
   46 #include <machine/bus.h>
   47 #include <machine/resource.h>
   48 #include <machine/in_cksum.h>
   49 
   50 #include <net/if.h>
   51 #include <net/if_var.h>
   52 #include <net/if_types.h>
   53 #include <net/if_vlan_var.h>
   54 #ifdef RSS
   55 #include <net/rss_config.h>
   56 #endif
   57 
   58 #include <netinet/in_systm.h>
   59 #include <netinet/in.h>
   60 #include <netinet/if_ether.h>
   61 #include <netinet/ip.h>
   62 #include <netinet/ip6.h>
   63 #include <netinet/tcp.h>
   64 #include <netinet/udp.h>
   65 
   66 #include "mana.h"
   67 #include "mana_sysctl.h"
   68 
   69 static int mana_up(struct mana_port_context *apc);
   70 static int mana_down(struct mana_port_context *apc);
   71 
   72 static void
   73 mana_rss_key_fill(void *k, size_t size)
   74 {
   75         static bool rss_key_generated = false;
   76         static uint8_t rss_key[MANA_HASH_KEY_SIZE];
   77 
   78         KASSERT(size <= MANA_HASH_KEY_SIZE,
   79             ("Request more buytes than MANA RSS key can hold"));
   80 
   81         if (!rss_key_generated) {
   82                 arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
   83                 rss_key_generated = true;
   84         }
   85         memcpy(k, rss_key, size);
   86 }
   87 
   88 static int
   89 mana_ifmedia_change(struct ifnet *ifp __unused)
   90 {
   91         return EOPNOTSUPP;
   92 }
   93 
   94 static void
   95 mana_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   96 {
   97         struct mana_port_context *apc = if_getsoftc(ifp);
   98 
   99         if (!apc) {
  100                 if_printf(ifp, "Port not available\n");
  101                 return;
  102         }
  103 
  104         MANA_APC_LOCK_LOCK(apc);
  105 
  106         ifmr->ifm_status = IFM_AVALID;
  107         ifmr->ifm_active = IFM_ETHER;
  108 
  109         if (!apc->port_is_up) {
  110                 MANA_APC_LOCK_UNLOCK(apc);
  111                 mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
  112                 return;
  113         }
  114 
  115         ifmr->ifm_status |= IFM_ACTIVE;
  116         ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
  117 
  118         MANA_APC_LOCK_UNLOCK(apc);
  119 }
  120 
  121 static uint64_t
  122 mana_get_counter(struct ifnet *ifp, ift_counter cnt)
  123 {
  124         struct mana_port_context *apc = if_getsoftc(ifp);
  125         struct mana_port_stats *stats = &apc->port_stats;
  126 
  127         switch (cnt) {
  128         case IFCOUNTER_IPACKETS:
  129                 return (counter_u64_fetch(stats->rx_packets));
  130         case IFCOUNTER_OPACKETS:
  131                 return (counter_u64_fetch(stats->tx_packets));
  132         case IFCOUNTER_IBYTES:
  133                 return (counter_u64_fetch(stats->rx_bytes));
  134         case IFCOUNTER_OBYTES:
  135                 return (counter_u64_fetch(stats->tx_bytes));
  136         case IFCOUNTER_IQDROPS:
  137                 return (counter_u64_fetch(stats->rx_drops));
  138         case IFCOUNTER_OQDROPS:
  139                 return (counter_u64_fetch(stats->tx_drops));
  140         default:
  141                 return (if_get_counter_default(ifp, cnt));
  142         }
  143 }
  144 
  145 static void
  146 mana_qflush(struct ifnet *ifp)
  147 {
  148         if_qflush(ifp);
  149 }
  150 
  151 int
  152 mana_restart(struct mana_port_context *apc)
  153 {
  154         int rc = 0;
  155 
  156         MANA_APC_LOCK_LOCK(apc);
  157         if (apc->port_is_up)
  158                  mana_down(apc);
  159 
  160         rc = mana_up(apc);
  161         MANA_APC_LOCK_UNLOCK(apc);
  162 
  163         return (rc);
  164 }
  165 
  166 static int
  167 mana_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
  168 {
  169         struct mana_port_context *apc = if_getsoftc(ifp);
  170         struct ifrsskey *ifrk;
  171         struct ifrsshash *ifrh;
  172         struct ifreq *ifr;
  173         uint16_t new_mtu;
  174         int rc = 0;
  175 
  176         switch (command) {
  177         case SIOCSIFMTU:
  178                 ifr = (struct ifreq *)data;
  179                 new_mtu = ifr->ifr_mtu;
  180                 if (ifp->if_mtu == new_mtu)
  181                         break;
  182                 if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
  183                     (new_mtu + 18 < MIN_FRAME_SIZE)) {
  184                         if_printf(ifp, "Invalid MTU. new_mtu: %d, "
  185                             "max allowed: %d, min allowed: %d\n",
  186                             new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
  187                         return EINVAL;
  188                 }
  189                 MANA_APC_LOCK_LOCK(apc);
  190                 if (apc->port_is_up)
  191                         mana_down(apc);
  192 
  193                 apc->frame_size = new_mtu + 18;
  194                 if_setmtu(ifp, new_mtu);
  195                 mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
  196 
  197                 rc = mana_up(apc);
  198                 MANA_APC_LOCK_UNLOCK(apc);
  199                 break;
  200 
  201         case SIOCSIFFLAGS:
  202                 if (ifp->if_flags & IFF_UP) {
  203                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
  204                                 MANA_APC_LOCK_LOCK(apc);
  205                                 if (!apc->port_is_up)
  206                                         rc = mana_up(apc);
  207                                 MANA_APC_LOCK_UNLOCK(apc);
  208                         }
  209                 } else {
  210                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  211                                 MANA_APC_LOCK_LOCK(apc);
  212                                 if (apc->port_is_up)
  213                                         mana_down(apc);
  214                                 MANA_APC_LOCK_UNLOCK(apc);
  215                         }
  216                 }
  217                 break;
  218 
  219         case SIOCSIFMEDIA:
  220         case SIOCGIFMEDIA:
  221         case SIOCGIFXMEDIA:
  222                 ifr = (struct ifreq *)data;
  223                 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
  224                 break;
  225 
  226         case SIOCGIFRSSKEY:
  227                 ifrk = (struct ifrsskey *)data;
  228                 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
  229                 ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
  230                 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
  231                 break;
  232 
  233         case SIOCGIFRSSHASH:
  234                 ifrh = (struct ifrsshash *)data;
  235                 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
  236                 ifrh->ifrh_types =
  237                     RSS_TYPE_TCP_IPV4 |
  238                     RSS_TYPE_UDP_IPV4 |
  239                     RSS_TYPE_TCP_IPV6 |
  240                     RSS_TYPE_UDP_IPV6;
  241                 break;
  242 
  243         default:
  244                 rc = ether_ioctl(ifp, command, data);
  245                 break;
  246         }
  247 
  248         return (rc);
  249 }
  250 
  251 static inline void
  252 mana_alloc_counters(counter_u64_t *begin, int size)
  253 {
  254         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
  255 
  256         for (; begin < end; ++begin)
  257                 *begin = counter_u64_alloc(M_WAITOK);
  258 }
  259 
  260 static inline void
  261 mana_free_counters(counter_u64_t *begin, int size)
  262 {
  263         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
  264 
  265         for (; begin < end; ++begin)
  266                 counter_u64_free(*begin);
  267 }
  268 
  269 static bool
  270 mana_can_tx(struct gdma_queue *wq)
  271 {
  272         return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
  273 }
  274 
  275 static inline int
  276 mana_tx_map_mbuf(struct mana_port_context *apc,
  277     struct mana_send_buf_info *tx_info,
  278     struct mbuf **m_head, struct mana_tx_package *tp,
  279     struct mana_stats *tx_stats)
  280 {
  281         struct gdma_dev *gd = apc->ac->gdma_dev;
  282         bus_dma_segment_t segs[MAX_MBUF_FRAGS];
  283         struct mbuf *m = *m_head;
  284         int err, nsegs, i;
  285 
  286         err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
  287             m, segs, &nsegs, BUS_DMA_NOWAIT);
  288         if (err == EFBIG) {
  289                 struct mbuf *m_new;
  290 
  291                 counter_u64_add(tx_stats->collapse, 1);
  292                 m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
  293                 if (unlikely(m_new == NULL)) {
  294                         counter_u64_add(tx_stats->collapse_err, 1);
  295                         return ENOBUFS;
  296                 } else {
  297                         *m_head = m = m_new;
  298                 }
  299 
  300                 mana_warn(NULL,
  301                     "Too many segs in orig mbuf, m_collapse called\n");
  302 
  303                 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
  304                     tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
  305         }
  306         if (!err) {
  307                 for (i = 0; i < nsegs; i++) {
  308                         tp->wqe_req.sgl[i].address = segs[i].ds_addr;
  309                         tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
  310                         tp->wqe_req.sgl[i].size = segs[i].ds_len;
  311                 }
  312                 tp->wqe_req.num_sge = nsegs;
  313 
  314                 tx_info->mbuf = *m_head;
  315 
  316                 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
  317                     BUS_DMASYNC_PREWRITE);
  318         }
  319 
  320         return err;
  321 }
  322 
  323 static inline void
  324 mana_tx_unmap_mbuf(struct mana_port_context *apc,
  325     struct mana_send_buf_info *tx_info)
  326 {
  327         bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
  328             BUS_DMASYNC_POSTWRITE);
  329         bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
  330         if (tx_info->mbuf) {
  331                 m_freem(tx_info->mbuf);
  332                 tx_info->mbuf = NULL;
  333         }
  334 }
  335 
  336 static inline int
  337 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
  338     struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
  339 {
  340         bus_dma_segment_t segs[1];
  341         struct mbuf *mbuf;
  342         int nsegs, err;
  343         uint32_t mlen;
  344 
  345         if (alloc_mbuf) {
  346                 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
  347                 if (unlikely(mbuf == NULL)) {
  348                         mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  349                         if (unlikely(mbuf == NULL)) {
  350                                 return ENOMEM;
  351                         }
  352                         mlen = MCLBYTES;
  353                 } else {
  354                         mlen = rxq->datasize;
  355                 }
  356 
  357                 mbuf->m_pkthdr.len = mbuf->m_len = mlen;
  358         } else {
  359                 if (rx_oob->mbuf) {
  360                         mbuf = rx_oob->mbuf;
  361                         mlen = rx_oob->mbuf->m_pkthdr.len;
  362                 } else {
  363                         return ENOMEM;
  364                 }
  365         }
  366 
  367         err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
  368             mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
  369 
  370         if (unlikely((err != 0) || (nsegs != 1))) {
  371                 mana_warn(NULL, "Failed to map mbuf, error: %d, "
  372                     "nsegs: %d\n", err, nsegs);
  373                 counter_u64_add(rxq->stats.dma_mapping_err, 1);
  374                 goto error;
  375         }
  376 
  377         bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
  378             BUS_DMASYNC_PREREAD);
  379 
  380         rx_oob->mbuf = mbuf;
  381         rx_oob->num_sge = 1;
  382         rx_oob->sgl[0].address = segs[0].ds_addr;
  383         rx_oob->sgl[0].size = mlen;
  384         rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
  385 
  386         return 0;
  387 
  388 error:
  389         m_freem(mbuf);
  390         return EFAULT;
  391 }
  392 
  393 static inline void
  394 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
  395     struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
  396 {
  397         bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
  398             BUS_DMASYNC_POSTREAD);
  399         bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
  400 
  401         if (free_mbuf && rx_oob->mbuf) {
  402                 m_freem(rx_oob->mbuf);
  403                 rx_oob->mbuf = NULL;
  404         }
  405 }
  406 
  407 
  408 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
  409 #define MANA_L3_PROTO(_mbuf)    ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
  410 #define MANA_L4_PROTO(_mbuf)    ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
  411 
  412 #define MANA_TXQ_FULL   (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
  413 
  414 static void
  415 mana_xmit(struct mana_txq *txq)
  416 {
  417         enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
  418         struct mana_send_buf_info *tx_info;
  419         struct ifnet *ndev = txq->ndev;
  420         struct mbuf *mbuf;
  421         struct mana_port_context *apc = if_getsoftc(ndev);
  422         struct mana_port_stats *port_stats = &apc->port_stats;
  423         struct gdma_dev *gd = apc->ac->gdma_dev;
  424         uint64_t packets, bytes;
  425         uint16_t next_to_use;
  426         struct mana_tx_package pkg = {};
  427         struct mana_stats *tx_stats;
  428         struct gdma_queue *gdma_sq;
  429         struct mana_cq *cq;
  430         int err, len;
  431 
  432         gdma_sq = txq->gdma_sq;
  433         cq = &apc->tx_qp[txq->idx].tx_cq;
  434         tx_stats = &txq->stats;
  435 
  436         packets = 0;
  437         bytes = 0;
  438         next_to_use = txq->next_to_use;
  439 
  440         while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
  441                 if (!apc->port_is_up ||
  442                     (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
  443                         drbr_putback(ndev, txq->txq_br, mbuf);
  444                         break;
  445                 }
  446 
  447                 if (!mana_can_tx(gdma_sq)) {
  448                         /* SQ is full. Set the IFF_DRV_OACTIVE flag */
  449                         if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
  450                         counter_u64_add(tx_stats->stop, 1);
  451                         uint64_t stops = counter_u64_fetch(tx_stats->stop);
  452                         uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
  453 #define MANA_TXQ_STOP_THRESHOLD         50
  454                         if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
  455                             stops > wakeups && txq->alt_txq_idx == txq->idx) {
  456                                 txq->alt_txq_idx =
  457                                     (txq->idx + (stops / wakeups))
  458                                     % apc->num_queues;
  459                                 counter_u64_add(tx_stats->alt_chg, 1);
  460                         }
  461 
  462                         drbr_putback(ndev, txq->txq_br, mbuf);
  463 
  464                         taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
  465                         break;
  466                 }
  467 
  468                 tx_info = &txq->tx_buf_info[next_to_use];
  469 
  470                 memset(&pkg, 0, sizeof(struct mana_tx_package));
  471                 pkg.wqe_req.sgl = pkg.sgl_array;
  472 
  473                 err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
  474                 if (unlikely(err)) {
  475                         mana_dbg(NULL,
  476                             "Failed to map tx mbuf, err %d\n", err);
  477 
  478                         counter_u64_add(tx_stats->dma_mapping_err, 1);
  479 
  480                         /* The mbuf is still there. Free it */
  481                         m_freem(mbuf);
  482                         /* Advance the drbr queue */
  483                         drbr_advance(ndev, txq->txq_br);
  484                         continue;
  485                 }
  486 
  487                 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
  488                 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
  489 
  490                 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
  491                         pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
  492                         pkt_fmt = MANA_LONG_PKT_FMT;
  493                 } else {
  494                         pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
  495                 }
  496 
  497                 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
  498 
  499                 if (pkt_fmt == MANA_SHORT_PKT_FMT)
  500                         pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
  501                 else
  502                         pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
  503 
  504                 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
  505                 pkg.wqe_req.flags = 0;
  506                 pkg.wqe_req.client_data_unit = 0;
  507 
  508                 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
  509                         if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
  510                                 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
  511                         else
  512                                 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
  513 
  514                         pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
  515                         pkg.tx_oob.s_oob.comp_tcp_csum = 1;
  516                         pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
  517 
  518                         pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
  519                         pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
  520                 } else if (mbuf->m_pkthdr.csum_flags &
  521                     (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
  522                         if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
  523                                 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
  524                                 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
  525                         } else {
  526                                 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
  527                         }
  528 
  529                         if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
  530                                 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
  531                                 pkg.tx_oob.s_oob.trans_off =
  532                                     mbuf->m_pkthdr.l3hlen;
  533                         } else {
  534                                 pkg.tx_oob.s_oob.comp_udp_csum = 1;
  535                         }
  536                 } else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
  537                         pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
  538                         pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
  539                 } else {
  540                         if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
  541                                 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
  542                         else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
  543                                 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
  544                 }
  545 
  546                 len = mbuf->m_pkthdr.len;
  547 
  548                 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
  549                     (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
  550                 if (unlikely(err)) {
  551                         /* Should not happen */
  552                         if_printf(ndev, "Failed to post TX OOB: %d\n", err);
  553 
  554                         mana_tx_unmap_mbuf(apc, tx_info);
  555 
  556                         drbr_advance(ndev, txq->txq_br);
  557                         continue;
  558                 }
  559 
  560                 next_to_use =
  561                     (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
  562 
  563                 (void)atomic_inc_return(&txq->pending_sends);
  564 
  565                 drbr_advance(ndev, txq->txq_br);
  566 
  567                 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
  568 
  569                 packets++;
  570                 bytes += len;
  571         }
  572 
  573         counter_enter();
  574         counter_u64_add_protected(tx_stats->packets, packets);
  575         counter_u64_add_protected(port_stats->tx_packets, packets);
  576         counter_u64_add_protected(tx_stats->bytes, bytes);
  577         counter_u64_add_protected(port_stats->tx_bytes, bytes);
  578         counter_exit();
  579 
  580         txq->next_to_use = next_to_use;
  581 }
  582 
  583 static void
  584 mana_xmit_taskfunc(void *arg, int pending)
  585 {
  586         struct mana_txq *txq = (struct mana_txq *)arg;
  587         struct ifnet *ndev = txq->ndev;
  588         struct mana_port_context *apc = if_getsoftc(ndev);
  589 
  590         while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
  591             (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
  592                 mtx_lock(&txq->txq_mtx);
  593                 mana_xmit(txq);
  594                 mtx_unlock(&txq->txq_mtx);
  595         }
  596 }
  597 
  598 #define PULLUP_HDR(m, len)                              \
  599 do {                                                    \
  600         if (unlikely((m)->m_len < (len))) {             \
  601                 (m) = m_pullup((m), (len));             \
  602                 if ((m) == NULL)                        \
  603                         return (NULL);                  \
  604         }                                               \
  605 } while (0)
  606 
  607 /*
  608  * If this function failed, the mbuf would be freed.
  609  */
  610 static inline struct mbuf *
  611 mana_tso_fixup(struct mbuf *mbuf)
  612 {
  613         struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
  614         struct tcphdr *th;
  615         uint16_t etype;
  616         int ehlen;
  617 
  618         if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
  619                 etype = ntohs(eh->evl_proto);
  620                 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
  621         } else {
  622                 etype = ntohs(eh->evl_encap_proto);
  623                 ehlen = ETHER_HDR_LEN;
  624         }
  625 
  626         if (etype == ETHERTYPE_IP) {
  627                 struct ip *ip;
  628                 int iphlen;
  629 
  630                 PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
  631                 ip = mtodo(mbuf, ehlen);
  632                 iphlen = ip->ip_hl << 2;
  633                 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
  634 
  635                 PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
  636                 th = mtodo(mbuf, ehlen + iphlen);
  637 
  638                 ip->ip_len = 0;
  639                 ip->ip_sum = 0;
  640                 th->th_sum = in_pseudo(ip->ip_src.s_addr,
  641                     ip->ip_dst.s_addr, htons(IPPROTO_TCP));
  642         } else if (etype == ETHERTYPE_IPV6) {
  643                 struct ip6_hdr *ip6;
  644 
  645                 PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
  646                 ip6 = mtodo(mbuf, ehlen);
  647                 if (ip6->ip6_nxt != IPPROTO_TCP) {
  648                         /* Realy something wrong, just return */
  649                         mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
  650                         m_freem(mbuf);
  651                         return NULL;
  652                 }
  653                 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
  654 
  655                 th = mtodo(mbuf, ehlen + sizeof(*ip6));
  656 
  657                 ip6->ip6_plen = 0;
  658                 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
  659         } else {
  660                 /* CSUM_TSO is set but not IP protocol. */
  661                 mana_warn(NULL, "TSO mbuf not right, freed.\n");
  662                 m_freem(mbuf);
  663                 return NULL;
  664         }
  665 
  666         MANA_L3_PROTO(mbuf) = etype;
  667 
  668         return (mbuf);
  669 }
  670 
  671 /*
  672  * If this function failed, the mbuf would be freed.
  673  */
  674 static inline struct mbuf *
  675 mana_mbuf_csum_check(struct mbuf *mbuf)
  676 {
  677         struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
  678         struct mbuf *mbuf_next;
  679         uint16_t etype;
  680         int offset;
  681         int ehlen;
  682 
  683         if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
  684                 etype = ntohs(eh->evl_proto);
  685                 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
  686         } else {
  687                 etype = ntohs(eh->evl_encap_proto);
  688                 ehlen = ETHER_HDR_LEN;
  689         }
  690 
  691         mbuf_next = m_getptr(mbuf, ehlen, &offset);
  692 
  693         MANA_L4_PROTO(mbuf) = 0;
  694         if (etype == ETHERTYPE_IP) {
  695                 const struct ip *ip;
  696                 int iphlen;
  697 
  698                 ip = (struct ip *)(mtodo(mbuf_next, offset));
  699                 iphlen = ip->ip_hl << 2;
  700                 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
  701 
  702                 MANA_L4_PROTO(mbuf) = ip->ip_p;
  703         } else if (etype == ETHERTYPE_IPV6) {
  704                 const struct ip6_hdr *ip6;
  705 
  706                 ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
  707                 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
  708 
  709                 MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
  710         } else {
  711                 MANA_L4_PROTO(mbuf) = 0;
  712         }
  713 
  714         MANA_L3_PROTO(mbuf) = etype;
  715 
  716         return (mbuf);
  717 }
  718 
  719 static int
  720 mana_start_xmit(struct ifnet *ifp, struct mbuf *m)
  721 {
  722         struct mana_port_context *apc = if_getsoftc(ifp);
  723         struct mana_txq *txq;
  724         int is_drbr_empty;
  725         uint16_t txq_id;
  726         int err;
  727 
  728         if (unlikely((!apc->port_is_up) ||
  729             (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
  730                 return ENODEV;
  731 
  732         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
  733                 m = mana_tso_fixup(m);
  734                 if (unlikely(m == NULL)) {
  735                         counter_enter();
  736                         counter_u64_add_protected(apc->port_stats.tx_drops, 1);
  737                         counter_exit();
  738                         return EIO;
  739                 }
  740         } else {
  741                 m = mana_mbuf_csum_check(m);
  742                 if (unlikely(m == NULL)) {
  743                         counter_enter();
  744                         counter_u64_add_protected(apc->port_stats.tx_drops, 1);
  745                         counter_exit();
  746                         return EIO;
  747                 }
  748         }
  749 
  750         if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
  751                 uint32_t hash = m->m_pkthdr.flowid;
  752                 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
  753                     apc->num_queues;
  754         } else {
  755                 txq_id = m->m_pkthdr.flowid % apc->num_queues;
  756         }
  757 
  758         if (apc->enable_tx_altq)
  759                 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
  760 
  761         txq = &apc->tx_qp[txq_id].txq;
  762 
  763         is_drbr_empty = drbr_empty(ifp, txq->txq_br);
  764         err = drbr_enqueue(ifp, txq->txq_br, m);
  765         if (unlikely(err)) {
  766                 mana_warn(NULL, "txq %u failed to enqueue: %d\n",
  767                     txq_id, err);
  768                 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
  769                 return err;
  770         }
  771 
  772         if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
  773                 mana_xmit(txq);
  774                 mtx_unlock(&txq->txq_mtx);
  775         } else {
  776                 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
  777         }
  778 
  779         return 0;
  780 }
  781 
  782 static void
  783 mana_cleanup_port_context(struct mana_port_context *apc)
  784 {
  785         bus_dma_tag_destroy(apc->tx_buf_tag);
  786         bus_dma_tag_destroy(apc->rx_buf_tag);
  787         apc->rx_buf_tag = NULL;
  788 
  789         free(apc->rxqs, M_DEVBUF);
  790         apc->rxqs = NULL;
  791 
  792         mana_free_counters((counter_u64_t *)&apc->port_stats,
  793             sizeof(struct mana_port_stats));
  794 }
  795 
  796 static int
  797 mana_init_port_context(struct mana_port_context *apc)
  798 {
  799         device_t dev = apc->ac->gdma_dev->gdma_context->dev;
  800         uint32_t tso_maxsize;
  801         int err;
  802 
  803         tso_maxsize = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
  804             (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
  805 
  806         /* Create DMA tag for tx bufs */
  807         err = bus_dma_tag_create(bus_get_dma_tag(dev),  /* parent */
  808             1, 0,                       /* alignment, boundary  */
  809             BUS_SPACE_MAXADDR,          /* lowaddr              */
  810             BUS_SPACE_MAXADDR,          /* highaddr             */
  811             NULL, NULL,                 /* filter, filterarg    */
  812             tso_maxsize,                /* maxsize              */
  813             MAX_MBUF_FRAGS,             /* nsegments            */
  814             tso_maxsize,                /* maxsegsize           */
  815             0,                          /* flags                */
  816             NULL, NULL,                 /* lockfunc, lockfuncarg*/
  817             &apc->tx_buf_tag);
  818         if (unlikely(err)) {
  819                 device_printf(dev, "Feiled to create TX DMA tag\n");
  820                 return err;
  821         }
  822 
  823         /* Create DMA tag for rx bufs */
  824         err = bus_dma_tag_create(bus_get_dma_tag(dev),  /* parent */
  825             64, 0,                      /* alignment, boundary  */
  826             BUS_SPACE_MAXADDR,          /* lowaddr              */
  827             BUS_SPACE_MAXADDR,          /* highaddr             */
  828             NULL, NULL,                 /* filter, filterarg    */
  829             MJUMPAGESIZE,               /* maxsize              */
  830             1,                          /* nsegments            */
  831             MJUMPAGESIZE,               /* maxsegsize           */
  832             0,                          /* flags                */
  833             NULL, NULL,                 /* lockfunc, lockfuncarg*/
  834             &apc->rx_buf_tag);
  835         if (unlikely(err)) {
  836                 device_printf(dev, "Feiled to create RX DMA tag\n");
  837                 return err;
  838         }
  839 
  840         apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
  841             M_DEVBUF, M_WAITOK | M_ZERO);
  842 
  843         if (!apc->rxqs) {
  844                 bus_dma_tag_destroy(apc->tx_buf_tag);
  845                 bus_dma_tag_destroy(apc->rx_buf_tag);
  846                 apc->rx_buf_tag = NULL;
  847                 return ENOMEM;
  848         }
  849 
  850         return 0;
  851 }
  852 
  853 static int
  854 mana_send_request(struct mana_context *ac, void *in_buf,
  855     uint32_t in_len, void *out_buf, uint32_t out_len)
  856 {
  857         struct gdma_context *gc = ac->gdma_dev->gdma_context;
  858         struct gdma_resp_hdr *resp = out_buf;
  859         struct gdma_req_hdr *req = in_buf;
  860         device_t dev = gc->dev;
  861         static atomic_t activity_id;
  862         int err;
  863 
  864         req->dev_id = gc->mana.dev_id;
  865         req->activity_id = atomic_inc_return(&activity_id);
  866 
  867         mana_dbg(NULL, "activity_id  = %u\n", activity_id);
  868 
  869         err = mana_gd_send_request(gc, in_len, in_buf, out_len,
  870             out_buf);
  871         if (err || resp->status) {
  872                 device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
  873                         err, resp->status);
  874                 return err ? err : EPROTO;
  875         }
  876 
  877         if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
  878             req->activity_id != resp->activity_id) {
  879                 device_printf(dev,
  880                     "Unexpected mana message response: %x,%x,%x,%x\n",
  881                     req->dev_id.as_uint32, resp->dev_id.as_uint32,
  882                     req->activity_id, resp->activity_id);
  883                 return EPROTO;
  884         }
  885 
  886         return 0;
  887 }
  888 
  889 static int
  890 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
  891     const enum mana_command_code expected_code,
  892     const uint32_t min_size)
  893 {
  894         if (resp_hdr->response.msg_type != expected_code)
  895                 return EPROTO;
  896 
  897         if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
  898                 return EPROTO;
  899 
  900         if (resp_hdr->response.msg_size < min_size)
  901                 return EPROTO;
  902 
  903         return 0;
  904 }
  905 
  906 static int
  907 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
  908     uint32_t proto_minor_ver, uint32_t proto_micro_ver,
  909     uint16_t *max_num_vports)
  910 {
  911         struct gdma_context *gc = ac->gdma_dev->gdma_context;
  912         struct mana_query_device_cfg_resp resp = {};
  913         struct mana_query_device_cfg_req req = {};
  914         device_t dev = gc->dev;
  915         int err = 0;
  916 
  917         mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
  918             sizeof(req), sizeof(resp));
  919         req.proto_major_ver = proto_major_ver;
  920         req.proto_minor_ver = proto_minor_ver;
  921         req.proto_micro_ver = proto_micro_ver;
  922 
  923         err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
  924         if (err) {
  925                 device_printf(dev, "Failed to query config: %d", err);
  926                 return err;
  927         }
  928 
  929         err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
  930             sizeof(resp));
  931         if (err || resp.hdr.status) {
  932                 device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
  933                     resp.hdr.status);
  934                 if (!err)
  935                         err = EPROTO;
  936                 return err;
  937         }
  938 
  939         *max_num_vports = resp.max_num_vports;
  940 
  941         mana_dbg(NULL, "mana max_num_vports from device = %d\n",
  942             *max_num_vports);
  943 
  944         return 0;
  945 }
  946 
  947 static int
  948 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
  949     uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
  950 {
  951         struct mana_query_vport_cfg_resp resp = {};
  952         struct mana_query_vport_cfg_req req = {};
  953         int err;
  954 
  955         mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
  956             sizeof(req), sizeof(resp));
  957 
  958         req.vport_index = vport_index;
  959 
  960         err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
  961             sizeof(resp));
  962         if (err)
  963                 return err;
  964 
  965         err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
  966             sizeof(resp));
  967         if (err)
  968                 return err;
  969 
  970         if (resp.hdr.status)
  971                 return EPROTO;
  972 
  973         *max_sq = resp.max_num_sq;
  974         *max_rq = resp.max_num_rq;
  975         *num_indir_entry = resp.num_indirection_ent;
  976 
  977         apc->port_handle = resp.vport;
  978         memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
  979 
  980         return 0;
  981 }
  982 
  983 void
  984 mana_uncfg_vport(struct mana_port_context *apc)
  985 {
  986         MANA_APC_LOCK_LOCK(apc);
  987         apc->vport_use_count--;
  988         if (apc->vport_use_count < 0) {
  989                 mana_err(NULL,
  990                     "WARNING: vport_use_count less than 0: %u\n",
  991                     apc->vport_use_count);
  992         }
  993         MANA_APC_LOCK_UNLOCK(apc);
  994 }
  995 
  996 int
  997 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
  998     uint32_t doorbell_pg_id)
  999 {
 1000         struct mana_config_vport_resp resp = {};
 1001         struct mana_config_vport_req req = {};
 1002         int err;
 1003 
 1004         /* This function is used to program the Ethernet port in the hardware
 1005          * table. It can be called from the Ethernet driver or the RDMA driver.
 1006          *
 1007          * For Ethernet usage, the hardware supports only one active user on a
 1008          * physical port. The driver checks on the port usage before programming
 1009          * the hardware when creating the RAW QP (RDMA driver) or exposing the
 1010          * device to kernel NET layer (Ethernet driver).
 1011          *
 1012          * Because the RDMA driver doesn't know in advance which QP type the
 1013          * user will create, it exposes the device with all its ports. The user
 1014          * may not be able to create RAW QP on a port if this port is already
 1015          * in used by the Ethernet driver from the kernel.
 1016          *
 1017          * This physical port limitation only applies to the RAW QP. For RC QP,
 1018          * the hardware doesn't have this limitation. The user can create RC
 1019          * QPs on a physical port up to the hardware limits independent of the
 1020          * Ethernet usage on the same port.
 1021          */
 1022         MANA_APC_LOCK_LOCK(apc);
 1023         if (apc->vport_use_count > 0) {
 1024                 MANA_APC_LOCK_UNLOCK(apc);
 1025                 return EBUSY;
 1026         }
 1027         apc->vport_use_count++;
 1028         MANA_APC_LOCK_UNLOCK(apc);
 1029 
 1030         mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
 1031             sizeof(req), sizeof(resp));
 1032         req.vport = apc->port_handle;
 1033         req.pdid = protection_dom_id;
 1034         req.doorbell_pageid = doorbell_pg_id;
 1035 
 1036         err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 1037             sizeof(resp));
 1038         if (err) {
 1039                 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
 1040                 goto out;
 1041         }
 1042 
 1043         err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
 1044             sizeof(resp));
 1045         if (err || resp.hdr.status) {
 1046                 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
 1047                     err, resp.hdr.status);
 1048                 if (!err)
 1049                         err = EPROTO;
 1050 
 1051                 goto out;
 1052         }
 1053 
 1054         apc->tx_shortform_allowed = resp.short_form_allowed;
 1055         apc->tx_vp_offset = resp.tx_vport_offset;
 1056 
 1057         if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
 1058             apc->port_handle, protection_dom_id, doorbell_pg_id);
 1059 
 1060 out:
 1061         if (err)
 1062                 mana_uncfg_vport(apc);
 1063 
 1064         return err;
 1065 }
 1066 
 1067 static int
 1068 mana_cfg_vport_steering(struct mana_port_context *apc,
 1069     enum TRI_STATE rx,
 1070     bool update_default_rxobj, bool update_key,
 1071     bool update_tab)
 1072 {
 1073         uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
 1074         struct mana_cfg_rx_steer_req *req = NULL;
 1075         struct mana_cfg_rx_steer_resp resp = {};
 1076         struct ifnet *ndev = apc->ndev;
 1077         mana_handle_t *req_indir_tab;
 1078         uint32_t req_buf_size;
 1079         int err;
 1080 
 1081         req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
 1082         req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
 1083         if (!req)
 1084                 return ENOMEM;
 1085 
 1086         mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
 1087             sizeof(resp));
 1088 
 1089         req->vport = apc->port_handle;
 1090         req->num_indir_entries = num_entries;
 1091         req->indir_tab_offset = sizeof(*req);
 1092         req->rx_enable = rx;
 1093         req->rss_enable = apc->rss_state;
 1094         req->update_default_rxobj = update_default_rxobj;
 1095         req->update_hashkey = update_key;
 1096         req->update_indir_tab = update_tab;
 1097         req->default_rxobj = apc->default_rxobj;
 1098 
 1099         if (update_key)
 1100                 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
 1101 
 1102         if (update_tab) {
 1103                 req_indir_tab = (mana_handle_t *)(req + 1);
 1104                 memcpy(req_indir_tab, apc->rxobj_table,
 1105                        req->num_indir_entries * sizeof(mana_handle_t));
 1106         }
 1107 
 1108         err = mana_send_request(apc->ac, req, req_buf_size, &resp,
 1109             sizeof(resp));
 1110         if (err) {
 1111                 if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
 1112                 goto out;
 1113         }
 1114 
 1115         err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
 1116             sizeof(resp));
 1117         if (err) {
 1118                 if_printf(ndev, "vPort RX configuration failed: %d\n", err);
 1119                 goto out;
 1120         }
 1121 
 1122         if (resp.hdr.status) {
 1123                 if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
 1124                     resp.hdr.status);
 1125                 err = EPROTO;
 1126         }
 1127 
 1128         if_printf(ndev, "Configured steering vPort %ju entries %u\n",
 1129             apc->port_handle, num_entries);
 1130 
 1131 out:
 1132         free(req, M_DEVBUF);
 1133         return err;
 1134 }
 1135 
 1136 int
 1137 mana_create_wq_obj(struct mana_port_context *apc,
 1138     mana_handle_t vport,
 1139     uint32_t wq_type, struct mana_obj_spec *wq_spec,
 1140     struct mana_obj_spec *cq_spec,
 1141     mana_handle_t *wq_obj)
 1142 {
 1143         struct mana_create_wqobj_resp resp = {};
 1144         struct mana_create_wqobj_req req = {};
 1145         struct ifnet *ndev = apc->ndev;
 1146         int err;
 1147 
 1148         mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
 1149             sizeof(req), sizeof(resp));
 1150         req.vport = vport;
 1151         req.wq_type = wq_type;
 1152         req.wq_gdma_region = wq_spec->gdma_region;
 1153         req.cq_gdma_region = cq_spec->gdma_region;
 1154         req.wq_size = wq_spec->queue_size;
 1155         req.cq_size = cq_spec->queue_size;
 1156         req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
 1157         req.cq_parent_qid = cq_spec->attached_eq;
 1158 
 1159         err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 1160             sizeof(resp));
 1161         if (err) {
 1162                 if_printf(ndev, "Failed to create WQ object: %d\n", err);
 1163                 goto out;
 1164         }
 1165 
 1166         err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
 1167             sizeof(resp));
 1168         if (err || resp.hdr.status) {
 1169                 if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
 1170                     resp.hdr.status);
 1171                 if (!err)
 1172                         err = EPROTO;
 1173                 goto out;
 1174         }
 1175 
 1176         if (resp.wq_obj == INVALID_MANA_HANDLE) {
 1177                 if_printf(ndev, "Got an invalid WQ object handle\n");
 1178                 err = EPROTO;
 1179                 goto out;
 1180         }
 1181 
 1182         *wq_obj = resp.wq_obj;
 1183         wq_spec->queue_index = resp.wq_id;
 1184         cq_spec->queue_index = resp.cq_id;
 1185 
 1186         return 0;
 1187 out:
 1188         return err;
 1189 }
 1190 
 1191 void
 1192 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
 1193     mana_handle_t wq_obj)
 1194 {
 1195         struct mana_destroy_wqobj_resp resp = {};
 1196         struct mana_destroy_wqobj_req req = {};
 1197         struct ifnet *ndev = apc->ndev;
 1198         int err;
 1199 
 1200         mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
 1201             sizeof(req), sizeof(resp));
 1202         req.wq_type = wq_type;
 1203         req.wq_obj_handle = wq_obj;
 1204 
 1205         err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 1206             sizeof(resp));
 1207         if (err) {
 1208                 if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
 1209                 return;
 1210         }
 1211 
 1212         err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
 1213             sizeof(resp));
 1214         if (err || resp.hdr.status)
 1215                 if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
 1216                     err, resp.hdr.status);
 1217 }
 1218 
 1219 static void
 1220 mana_destroy_eq(struct mana_context *ac)
 1221 {
 1222         struct gdma_context *gc = ac->gdma_dev->gdma_context;
 1223         struct gdma_queue *eq;
 1224         int i;
 1225 
 1226         if (!ac->eqs)
 1227                 return;
 1228 
 1229         for (i = 0; i < gc->max_num_queues; i++) {
 1230                 eq = ac->eqs[i].eq;
 1231                 if (!eq)
 1232                         continue;
 1233 
 1234                 mana_gd_destroy_queue(gc, eq);
 1235         }
 1236 
 1237         free(ac->eqs, M_DEVBUF);
 1238         ac->eqs = NULL;
 1239 }
 1240 
 1241 static int
 1242 mana_create_eq(struct mana_context *ac)
 1243 {
 1244         struct gdma_dev *gd = ac->gdma_dev;
 1245         struct gdma_context *gc = gd->gdma_context;
 1246         struct gdma_queue_spec spec = {};
 1247         int err;
 1248         int i;
 1249 
 1250         ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
 1251             M_DEVBUF, M_WAITOK | M_ZERO);
 1252         if (!ac->eqs)
 1253                 return ENOMEM;
 1254 
 1255         spec.type = GDMA_EQ;
 1256         spec.monitor_avl_buf = false;
 1257         spec.queue_size = EQ_SIZE;
 1258         spec.eq.callback = NULL;
 1259         spec.eq.context = ac->eqs;
 1260         spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
 1261 
 1262         for (i = 0; i < gc->max_num_queues; i++) {
 1263                 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
 1264                 if (err)
 1265                         goto out;
 1266         }
 1267 
 1268         return 0;
 1269 out:
 1270         mana_destroy_eq(ac);
 1271         return err;
 1272 }
 1273 
 1274 static int
 1275 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
 1276 {
 1277         struct mana_fence_rq_resp resp = {};
 1278         struct mana_fence_rq_req req = {};
 1279         int err;
 1280 
 1281         init_completion(&rxq->fence_event);
 1282 
 1283         mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
 1284             sizeof(req), sizeof(resp));
 1285         req.wq_obj_handle = rxq->rxobj;
 1286 
 1287         err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 1288             sizeof(resp));
 1289         if (err) {
 1290                 if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
 1291                     rxq->rxq_idx, err);
 1292                 return err;
 1293         }
 1294 
 1295         err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
 1296         if (err || resp.hdr.status) {
 1297                 if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
 1298                     rxq->rxq_idx, err, resp.hdr.status);
 1299                 if (!err)
 1300                         err = EPROTO;
 1301 
 1302                 return err;
 1303         }
 1304 
 1305         if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
 1306                 if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
 1307                     rxq->rxq_idx);
 1308                 return ETIMEDOUT;
 1309         }
 1310 
 1311         return 0;
 1312 }
 1313 
 1314 static void
 1315 mana_fence_rqs(struct mana_port_context *apc)
 1316 {
 1317         unsigned int rxq_idx;
 1318         struct mana_rxq *rxq;
 1319         int err;
 1320 
 1321         for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
 1322                 rxq = apc->rxqs[rxq_idx];
 1323                 err = mana_fence_rq(apc, rxq);
 1324 
 1325                 /* In case of any error, use sleep instead. */
 1326                 if (err)
 1327                         gdma_msleep(100);
 1328         }
 1329 }
 1330 
 1331 static int
 1332 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
 1333 {
 1334         uint32_t used_space_old;
 1335         uint32_t used_space_new;
 1336 
 1337         used_space_old = wq->head - wq->tail;
 1338         used_space_new = wq->head - (wq->tail + num_units);
 1339 
 1340         if (used_space_new > used_space_old) {
 1341                 mana_err(NULL,
 1342                     "WARNING: new used space %u greater than old one %u\n",
 1343                     used_space_new, used_space_old);
 1344                 return ERANGE;
 1345         }
 1346 
 1347         wq->tail += num_units;
 1348         return 0;
 1349 }
 1350 
 1351 static void
 1352 mana_poll_tx_cq(struct mana_cq *cq)
 1353 {
 1354         struct gdma_comp *completions = cq->gdma_comp_buf;
 1355         struct gdma_posted_wqe_info *wqe_info;
 1356         struct mana_send_buf_info *tx_info;
 1357         unsigned int pkt_transmitted = 0;
 1358         unsigned int wqe_unit_cnt = 0;
 1359         struct mana_txq *txq = cq->txq;
 1360         struct mana_port_context *apc;
 1361         uint16_t next_to_complete;
 1362         struct ifnet *ndev;
 1363         int comp_read;
 1364         int txq_idx = txq->idx;;
 1365         int i;
 1366         int sa_drop = 0;
 1367 
 1368         struct gdma_queue *gdma_wq;
 1369         unsigned int avail_space;
 1370         bool txq_full = false;
 1371 
 1372         ndev = txq->ndev;
 1373         apc = if_getsoftc(ndev);
 1374 
 1375         comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
 1376             CQE_POLLING_BUFFER);
 1377 
 1378         if (comp_read < 1)
 1379                 return;
 1380 
 1381         next_to_complete = txq->next_to_complete;
 1382 
 1383         for (i = 0; i < comp_read; i++) {
 1384                 struct mana_tx_comp_oob *cqe_oob;
 1385 
 1386                 if (!completions[i].is_sq) {
 1387                         mana_err(NULL, "WARNING: Not for SQ\n");
 1388                         return;
 1389                 }
 1390 
 1391                 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
 1392                 if (cqe_oob->cqe_hdr.client_type !=
 1393                                  MANA_CQE_COMPLETION) {
 1394                         mana_err(NULL,
 1395                             "WARNING: Invalid CQE client type %u\n",
 1396                             cqe_oob->cqe_hdr.client_type);
 1397                         return;
 1398                 }
 1399 
 1400                 switch (cqe_oob->cqe_hdr.cqe_type) {
 1401                 case CQE_TX_OKAY:
 1402                         break;
 1403 
 1404                 case CQE_TX_SA_DROP:
 1405                 case CQE_TX_MTU_DROP:
 1406                 case CQE_TX_INVALID_OOB:
 1407                 case CQE_TX_INVALID_ETH_TYPE:
 1408                 case CQE_TX_HDR_PROCESSING_ERROR:
 1409                 case CQE_TX_VF_DISABLED:
 1410                 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
 1411                 case CQE_TX_VPORT_DISABLED:
 1412                 case CQE_TX_VLAN_TAGGING_VIOLATION:
 1413                         sa_drop ++;
 1414                         mana_err(NULL,
 1415                             "TX: txq %d CQE error %d, ntc = %d, "
 1416                             "pending sends = %d: err ignored.\n",
 1417                             txq_idx, cqe_oob->cqe_hdr.cqe_type,
 1418                             next_to_complete, txq->pending_sends);
 1419                         break;
 1420 
 1421                 default:
 1422                         /* If the CQE type is unexpected, log an error,
 1423                          * and go through the error path.
 1424                          */
 1425                         mana_err(NULL,
 1426                             "ERROR: TX: Unexpected CQE type %d: HW BUG?\n",
 1427                             cqe_oob->cqe_hdr.cqe_type);
 1428                         return;
 1429                 }
 1430                 if (txq->gdma_txq_id != completions[i].wq_num) {
 1431                         mana_dbg(NULL,
 1432                             "txq gdma id not match completion wq num: "
 1433                             "%d != %d\n",
 1434                             txq->gdma_txq_id, completions[i].wq_num);
 1435                         break;
 1436                 }
 1437 
 1438                 tx_info = &txq->tx_buf_info[next_to_complete];
 1439                 if (!tx_info->mbuf) {
 1440                         mana_err(NULL,
 1441                             "WARNING: txq %d Empty mbuf on tx_info: %u, "
 1442                             "ntu = %u, pending_sends = %d, "
 1443                             "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
 1444                             txq_idx, next_to_complete, txq->next_to_use,
 1445                             txq->pending_sends, pkt_transmitted, sa_drop,
 1446                             i, comp_read);
 1447                         break;
 1448                 }
 1449 
 1450                 wqe_info = &tx_info->wqe_inf;
 1451                 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
 1452 
 1453                 mana_tx_unmap_mbuf(apc, tx_info);
 1454                 mb();
 1455 
 1456                 next_to_complete =
 1457                     (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
 1458 
 1459                 pkt_transmitted++;
 1460         }
 1461 
 1462         txq->next_to_complete = next_to_complete;
 1463 
 1464         if (wqe_unit_cnt == 0) {
 1465                 mana_err(NULL,
 1466                     "WARNING: TX ring not proceeding!\n");
 1467                 return;
 1468         }
 1469 
 1470         mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
 1471 
 1472         /* Ensure tail updated before checking q stop */
 1473         wmb();
 1474 
 1475         gdma_wq = txq->gdma_sq;
 1476         avail_space = mana_gd_wq_avail_space(gdma_wq);
 1477 
 1478 
 1479         if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
 1480                 txq_full = true;
 1481         }
 1482 
 1483         /* Ensure checking txq_full before apc->port_is_up. */
 1484         rmb();
 1485 
 1486         if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
 1487                 /* Grab the txq lock and re-test */
 1488                 mtx_lock(&txq->txq_mtx);
 1489                 avail_space = mana_gd_wq_avail_space(gdma_wq);
 1490 
 1491                 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
 1492                     apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
 1493                         /* Clear the Q full flag */
 1494                         if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
 1495                             IFF_DRV_OACTIVE);
 1496                         counter_u64_add(txq->stats.wakeup, 1);
 1497                         if (txq->alt_txq_idx != txq->idx) {
 1498                                 uint64_t stops = counter_u64_fetch(txq->stats.stop);
 1499                                 uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
 1500                                 /* Reset alt_txq_idx back if it is not overloaded */
 1501                                 if (stops < wakeups) {
 1502                                         txq->alt_txq_idx = txq->idx;
 1503                                         counter_u64_add(txq->stats.alt_reset, 1);
 1504                                 }
 1505                         }
 1506                         rmb();
 1507                         /* Schedule a tx enqueue task */
 1508                         taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
 1509                 }
 1510                 mtx_unlock(&txq->txq_mtx);
 1511         }
 1512 
 1513         if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
 1514                 mana_err(NULL,
 1515                     "WARNING: TX %d pending_sends error: %d\n",
 1516                     txq->idx, txq->pending_sends);
 1517 
 1518         cq->work_done = pkt_transmitted;
 1519 }
 1520 
 1521 static void
 1522 mana_post_pkt_rxq(struct mana_rxq *rxq)
 1523 {
 1524         struct mana_recv_buf_oob *recv_buf_oob;
 1525         uint32_t curr_index;
 1526         int err;
 1527 
 1528         curr_index = rxq->buf_index++;
 1529         if (rxq->buf_index == rxq->num_rx_buf)
 1530                 rxq->buf_index = 0;
 1531 
 1532         recv_buf_oob = &rxq->rx_oobs[curr_index];
 1533 
 1534         err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
 1535             &recv_buf_oob->wqe_inf);
 1536         if (err) {
 1537                 mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
 1538                     rxq->rxq_idx, err);
 1539                 return;
 1540         }
 1541 
 1542         if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
 1543                 mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
 1544                     rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
 1545         }
 1546 }
 1547 
 1548 static void
 1549 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
 1550     struct mana_rxq *rxq)
 1551 {
 1552         struct mana_stats *rx_stats = &rxq->stats;
 1553         struct ifnet *ndev = rxq->ndev;
 1554         uint32_t pkt_len = cqe->ppi[0].pkt_len;
 1555         uint16_t rxq_idx = rxq->rxq_idx;
 1556         struct mana_port_context *apc;
 1557         bool do_lro = false;
 1558         bool do_if_input;
 1559 
 1560         apc = if_getsoftc(ndev);
 1561         rxq->rx_cq.work_done++;
 1562 
 1563         if (!mbuf) {
 1564                 return;
 1565         }
 1566 
 1567         mbuf->m_flags |= M_PKTHDR;
 1568         mbuf->m_pkthdr.len = pkt_len;
 1569         mbuf->m_len = pkt_len;
 1570         mbuf->m_pkthdr.rcvif = ndev;
 1571 
 1572         if ((ndev->if_capenable & IFCAP_RXCSUM ||
 1573             ndev->if_capenable & IFCAP_RXCSUM_IPV6) &&
 1574             (cqe->rx_iphdr_csum_succeed)) {
 1575                 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
 1576                 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1577                 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
 1578                         mbuf->m_pkthdr.csum_flags |=
 1579                             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 1580                         mbuf->m_pkthdr.csum_data = 0xffff;
 1581 
 1582                         if (cqe->rx_tcp_csum_succeed)
 1583                                 do_lro = true;
 1584                 }
 1585         }
 1586 
 1587         if (cqe->rx_hashtype != 0) {
 1588                 mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
 1589 
 1590                 uint16_t hashtype = cqe->rx_hashtype;
 1591                 if (hashtype & NDIS_HASH_IPV4_MASK) {
 1592                         hashtype &= NDIS_HASH_IPV4_MASK;
 1593                         switch (hashtype) {
 1594                         case NDIS_HASH_TCP_IPV4:
 1595                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
 1596                                 break;
 1597                         case NDIS_HASH_UDP_IPV4:
 1598                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
 1599                                 break;
 1600                         default:
 1601                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
 1602                         }
 1603                 } else if (hashtype & NDIS_HASH_IPV6_MASK) {
 1604                         hashtype &= NDIS_HASH_IPV6_MASK;
 1605                         switch (hashtype) {
 1606                         case NDIS_HASH_TCP_IPV6:
 1607                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
 1608                                 break;
 1609                         case NDIS_HASH_TCP_IPV6_EX:
 1610                                 M_HASHTYPE_SET(mbuf,
 1611                                     M_HASHTYPE_RSS_TCP_IPV6_EX);
 1612                                 break;
 1613                         case NDIS_HASH_UDP_IPV6:
 1614                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
 1615                                 break;
 1616                         case NDIS_HASH_UDP_IPV6_EX:
 1617                                 M_HASHTYPE_SET(mbuf,
 1618                                     M_HASHTYPE_RSS_UDP_IPV6_EX);
 1619                                 break;
 1620                         default:
 1621                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
 1622                         }
 1623                 } else {
 1624                         M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
 1625                 }
 1626         } else {
 1627                 mbuf->m_pkthdr.flowid = rxq_idx;
 1628                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
 1629         }
 1630 
 1631         do_if_input = true;
 1632         if ((ndev->if_capenable & IFCAP_LRO) && do_lro) {
 1633                 if (rxq->lro.lro_cnt != 0 &&
 1634                     tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
 1635                         do_if_input = false;
 1636         }
 1637         if (do_if_input) {
 1638                 ndev->if_input(ndev, mbuf);
 1639         }
 1640 
 1641         counter_enter();
 1642         counter_u64_add_protected(rx_stats->packets, 1);
 1643         counter_u64_add_protected(apc->port_stats.rx_packets, 1);
 1644         counter_u64_add_protected(rx_stats->bytes, pkt_len);
 1645         counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
 1646         counter_exit();
 1647 }
 1648 
 1649 static void
 1650 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
 1651     struct gdma_comp *cqe)
 1652 {
 1653         struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
 1654         struct mana_recv_buf_oob *rxbuf_oob;
 1655         struct ifnet *ndev = rxq->ndev;
 1656         struct mana_port_context *apc;
 1657         struct mbuf *old_mbuf;
 1658         uint32_t curr, pktlen;
 1659         int err;
 1660 
 1661         switch (oob->cqe_hdr.cqe_type) {
 1662         case CQE_RX_OKAY:
 1663                 break;
 1664 
 1665         case CQE_RX_TRUNCATED:
 1666                 apc = if_getsoftc(ndev);
 1667                 counter_u64_add(apc->port_stats.rx_drops, 1);
 1668                 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
 1669                 if_printf(ndev, "Dropped a truncated packet\n");
 1670                 goto drop;
 1671 
 1672         case CQE_RX_COALESCED_4:
 1673                 if_printf(ndev, "RX coalescing is unsupported\n");
 1674                 return;
 1675 
 1676         case CQE_RX_OBJECT_FENCE:
 1677                 complete(&rxq->fence_event);
 1678                 return;
 1679 
 1680         default:
 1681                 if_printf(ndev, "Unknown RX CQE type = %d\n",
 1682                     oob->cqe_hdr.cqe_type);
 1683                 return;
 1684         }
 1685 
 1686         if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
 1687                 return;
 1688 
 1689         pktlen = oob->ppi[0].pkt_len;
 1690 
 1691         if (pktlen == 0) {
 1692                 /* data packets should never have packetlength of zero */
 1693                 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n",
 1694                     rxq->gdma_id, cq->gdma_id, rxq->rxobj);
 1695                 return;
 1696         }
 1697 
 1698         curr = rxq->buf_index;
 1699         rxbuf_oob = &rxq->rx_oobs[curr];
 1700         if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
 1701                 mana_err(NULL, "WARNING: Rx Incorrect complete "
 1702                     "WQE size %u\n",
 1703                     rxbuf_oob->wqe_inf.wqe_size_in_bu);
 1704         }
 1705 
 1706         apc = if_getsoftc(ndev);
 1707 
 1708         old_mbuf = rxbuf_oob->mbuf;
 1709 
 1710         /* Unload DMA map for the old mbuf */
 1711         mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
 1712 
 1713         /* Load a new mbuf to replace the old one */
 1714         err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
 1715         if (err) {
 1716                 mana_dbg(NULL,
 1717                     "failed to load rx mbuf, err = %d, packet dropped.\n",
 1718                     err);
 1719                 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
 1720                 /*
 1721                  * Failed to load new mbuf, rxbuf_oob->mbuf is still
 1722                  * pointing to the old one. Drop the packet.
 1723                  */
 1724                  old_mbuf = NULL;
 1725                  /* Reload the existing mbuf */
 1726                  mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
 1727         }
 1728 
 1729         mana_rx_mbuf(old_mbuf, oob, rxq);
 1730 
 1731 drop:
 1732         mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
 1733 
 1734         mana_post_pkt_rxq(rxq);
 1735 }
 1736 
 1737 static void
 1738 mana_poll_rx_cq(struct mana_cq *cq)
 1739 {
 1740         struct gdma_comp *comp = cq->gdma_comp_buf;
 1741         int comp_read, i;
 1742 
 1743         comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
 1744         KASSERT(comp_read <= CQE_POLLING_BUFFER,
 1745             ("comp_read %d great than buf size %d",
 1746             comp_read, CQE_POLLING_BUFFER));
 1747 
 1748         for (i = 0; i < comp_read; i++) {
 1749                 if (comp[i].is_sq == true) {
 1750                         mana_err(NULL,
 1751                             "WARNING: CQE not for receive queue\n");
 1752                         return;
 1753                 }
 1754 
 1755                 /* verify recv cqe references the right rxq */
 1756                 if (comp[i].wq_num != cq->rxq->gdma_id) {
 1757                         mana_err(NULL,
 1758                             "WARNING: Received CQE %d  not for "
 1759                             "this receive queue %d\n",
 1760                             comp[i].wq_num,  cq->rxq->gdma_id);
 1761                         return;
 1762                 }
 1763 
 1764                 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
 1765         }
 1766 
 1767         tcp_lro_flush_all(&cq->rxq->lro);
 1768 }
 1769 
 1770 static void
 1771 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
 1772 {
 1773         struct mana_cq *cq = context;
 1774         uint8_t arm_bit;
 1775 
 1776         KASSERT(cq->gdma_cq == gdma_queue,
 1777             ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
 1778 
 1779         if (cq->type == MANA_CQ_TYPE_RX) {
 1780                 mana_poll_rx_cq(cq);
 1781         } else {
 1782                 mana_poll_tx_cq(cq);
 1783         }
 1784 
 1785         if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
 1786                 arm_bit = SET_ARM_BIT;
 1787         else
 1788                 arm_bit = 0;
 1789 
 1790         mana_gd_ring_cq(gdma_queue, arm_bit);
 1791 }
 1792 
 1793 #define MANA_POLL_BUDGET        8
 1794 #define MANA_RX_BUDGET          256
 1795 #define MANA_TX_BUDGET          MAX_SEND_BUFFERS_PER_QUEUE
 1796 
 1797 static void
 1798 mana_poll(void *arg, int pending)
 1799 {
 1800         struct mana_cq *cq = arg;
 1801         int i;
 1802 
 1803         cq->work_done = 0;
 1804         if (cq->type == MANA_CQ_TYPE_RX) {
 1805                 cq->budget = MANA_RX_BUDGET;
 1806         } else {
 1807                 cq->budget = MANA_TX_BUDGET;
 1808         }
 1809 
 1810         for (i = 0; i < MANA_POLL_BUDGET; i++) {
 1811                 /*
 1812                  * If this is the last loop, set the budget big enough
 1813                  * so it will arm the CQ any way.
 1814                  */
 1815                 if (i == (MANA_POLL_BUDGET - 1))
 1816                         cq->budget = CQE_POLLING_BUFFER + 1;
 1817 
 1818                 mana_cq_handler(cq, cq->gdma_cq);
 1819 
 1820                 if (cq->work_done < cq->budget)
 1821                         break;
 1822 
 1823                 cq->work_done = 0;
 1824         }
 1825 }
 1826 
 1827 static void
 1828 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
 1829 {
 1830         struct mana_cq *cq = arg;
 1831 
 1832         taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
 1833 }
 1834 
 1835 static void
 1836 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
 1837 {
 1838         struct gdma_dev *gd = apc->ac->gdma_dev;
 1839 
 1840         if (!cq->gdma_cq)
 1841                 return;
 1842 
 1843         /* Drain cleanup taskqueue */
 1844         if (cq->cleanup_tq) {
 1845                 while (taskqueue_cancel(cq->cleanup_tq,
 1846                     &cq->cleanup_task, NULL)) {
 1847                         taskqueue_drain(cq->cleanup_tq,
 1848                             &cq->cleanup_task);
 1849                 }
 1850 
 1851                 taskqueue_free(cq->cleanup_tq);
 1852         }
 1853 
 1854         mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
 1855 }
 1856 
 1857 static void
 1858 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
 1859 {
 1860         struct gdma_dev *gd = apc->ac->gdma_dev;
 1861         struct mana_send_buf_info *txbuf_info;
 1862         uint32_t pending_sends;
 1863         int i;
 1864 
 1865         if (!txq->gdma_sq)
 1866                 return;
 1867 
 1868         if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
 1869                 mana_err(NULL,
 1870                     "WARNING: txq pending sends not zero: %u\n",
 1871                     pending_sends);
 1872         }
 1873 
 1874         if (txq->next_to_use != txq->next_to_complete) {
 1875                 mana_err(NULL,
 1876                     "WARNING: txq buf not completed, "
 1877                     "next use %u, next complete %u\n",
 1878                     txq->next_to_use, txq->next_to_complete);
 1879         }
 1880 
 1881         /* Flush buf ring. Grab txq mtx lock */
 1882         if (txq->txq_br) {
 1883                 mtx_lock(&txq->txq_mtx);
 1884                 drbr_flush(apc->ndev, txq->txq_br);
 1885                 mtx_unlock(&txq->txq_mtx);
 1886                 buf_ring_free(txq->txq_br, M_DEVBUF);
 1887         }
 1888 
 1889         /* Drain taskqueue */
 1890         if (txq->enqueue_tq) {
 1891                 while (taskqueue_cancel(txq->enqueue_tq,
 1892                     &txq->enqueue_task, NULL)) {
 1893                         taskqueue_drain(txq->enqueue_tq,
 1894                             &txq->enqueue_task);
 1895                 }
 1896 
 1897                 taskqueue_free(txq->enqueue_tq);
 1898         }
 1899 
 1900         if (txq->tx_buf_info) {
 1901                 /* Free all mbufs which are still in-flight */
 1902                 for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
 1903                         txbuf_info = &txq->tx_buf_info[i];
 1904                         if (txbuf_info->mbuf) {
 1905                                 mana_tx_unmap_mbuf(apc, txbuf_info);
 1906                         }
 1907                 }
 1908 
 1909                 free(txq->tx_buf_info, M_DEVBUF);
 1910         }
 1911 
 1912         mana_free_counters((counter_u64_t *)&txq->stats,
 1913             sizeof(txq->stats));
 1914 
 1915         mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
 1916 
 1917         mtx_destroy(&txq->txq_mtx);
 1918 }
 1919 
 1920 static void
 1921 mana_destroy_txq(struct mana_port_context *apc)
 1922 {
 1923         int i;
 1924 
 1925         if (!apc->tx_qp)
 1926                 return;
 1927 
 1928         for (i = 0; i < apc->num_queues; i++) {
 1929                 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
 1930 
 1931                 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
 1932 
 1933                 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
 1934         }
 1935 
 1936         free(apc->tx_qp, M_DEVBUF);
 1937         apc->tx_qp = NULL;
 1938 }
 1939 
 1940 static int
 1941 mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
 1942 {
 1943         struct mana_context *ac = apc->ac;
 1944         struct gdma_dev *gd = ac->gdma_dev;
 1945         struct mana_obj_spec wq_spec;
 1946         struct mana_obj_spec cq_spec;
 1947         struct gdma_queue_spec spec;
 1948         struct gdma_context *gc;
 1949         struct mana_txq *txq;
 1950         struct mana_cq *cq;
 1951         uint32_t txq_size;
 1952         uint32_t cq_size;
 1953         int err;
 1954         int i;
 1955 
 1956         apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
 1957             M_DEVBUF, M_WAITOK | M_ZERO);
 1958         if (!apc->tx_qp)
 1959                 return ENOMEM;
 1960 
 1961         /*  The minimum size of the WQE is 32 bytes, hence
 1962          *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
 1963          *  the SQ can store. This value is then used to size other queues
 1964          *  to prevent overflow.
 1965          */
 1966         txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
 1967         KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
 1968             ("txq size not page aligned"));
 1969 
 1970         cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
 1971         cq_size = ALIGN(cq_size, PAGE_SIZE);
 1972 
 1973         gc = gd->gdma_context;
 1974 
 1975         for (i = 0; i < apc->num_queues; i++) {
 1976                 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
 1977 
 1978                 /* Create SQ */
 1979                 txq = &apc->tx_qp[i].txq;
 1980 
 1981                 txq->ndev = net;
 1982                 txq->vp_offset = apc->tx_vp_offset;
 1983                 txq->idx = i;
 1984                 txq->alt_txq_idx = i;
 1985 
 1986                 memset(&spec, 0, sizeof(spec));
 1987                 spec.type = GDMA_SQ;
 1988                 spec.monitor_avl_buf = true;
 1989                 spec.queue_size = txq_size;
 1990                 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
 1991                 if (err)
 1992                         goto out;
 1993 
 1994                 /* Create SQ's CQ */
 1995                 cq = &apc->tx_qp[i].tx_cq;
 1996                 cq->type = MANA_CQ_TYPE_TX;
 1997 
 1998                 cq->txq = txq;
 1999 
 2000                 memset(&spec, 0, sizeof(spec));
 2001                 spec.type = GDMA_CQ;
 2002                 spec.monitor_avl_buf = false;
 2003                 spec.queue_size = cq_size;
 2004                 spec.cq.callback = mana_schedule_task;
 2005                 spec.cq.parent_eq = ac->eqs[i].eq;
 2006                 spec.cq.context = cq;
 2007                 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
 2008                 if (err)
 2009                         goto out;
 2010 
 2011                 memset(&wq_spec, 0, sizeof(wq_spec));
 2012                 memset(&cq_spec, 0, sizeof(cq_spec));
 2013 
 2014                 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
 2015                 wq_spec.queue_size = txq->gdma_sq->queue_size;
 2016 
 2017                 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
 2018                 cq_spec.queue_size = cq->gdma_cq->queue_size;
 2019                 cq_spec.modr_ctx_id = 0;
 2020                 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
 2021 
 2022                 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
 2023                     &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
 2024 
 2025                 if (err)
 2026                         goto out;
 2027 
 2028                 txq->gdma_sq->id = wq_spec.queue_index;
 2029                 cq->gdma_cq->id = cq_spec.queue_index;
 2030 
 2031                 txq->gdma_sq->mem_info.dma_region_handle =
 2032                     GDMA_INVALID_DMA_REGION;
 2033                 cq->gdma_cq->mem_info.dma_region_handle =
 2034                     GDMA_INVALID_DMA_REGION;
 2035 
 2036                 txq->gdma_txq_id = txq->gdma_sq->id;
 2037 
 2038                 cq->gdma_id = cq->gdma_cq->id;
 2039 
 2040                 mana_dbg(NULL,
 2041                     "txq %d, txq gdma id %d, txq cq gdma id %d\n",
 2042                     i, txq->gdma_txq_id, cq->gdma_id);;
 2043 
 2044                 if (cq->gdma_id >= gc->max_num_cqs) {
 2045                         if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
 2046                         err = EINVAL;
 2047                         goto out;
 2048                 }
 2049 
 2050                 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
 2051 
 2052                 /* Initialize tx specific data */
 2053                 txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
 2054                     sizeof(struct mana_send_buf_info),
 2055                     M_DEVBUF, M_WAITOK | M_ZERO);
 2056                 if (unlikely(txq->tx_buf_info == NULL)) {
 2057                         if_printf(net,
 2058                             "Failed to allocate tx buf info for SQ %u\n",
 2059                             txq->gdma_sq->id);
 2060                         err = ENOMEM;
 2061                         goto out;
 2062                 }
 2063 
 2064 
 2065                 snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
 2066                     "mana:tx(%d)", i);
 2067                 mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
 2068 
 2069                 txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
 2070                     M_DEVBUF, M_WAITOK, &txq->txq_mtx);
 2071                 if (unlikely(txq->txq_br == NULL)) {
 2072                         if_printf(net,
 2073                             "Failed to allocate buf ring for SQ %u\n",
 2074                             txq->gdma_sq->id);
 2075                         err = ENOMEM;
 2076                         goto out;
 2077                 }
 2078 
 2079                 /* Allocate taskqueue for deferred send */
 2080                 TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
 2081                 txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
 2082                     M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
 2083                 if (unlikely(txq->enqueue_tq == NULL)) {
 2084                         if_printf(net,
 2085                             "Unable to create tx %d enqueue task queue\n", i);
 2086                         err = ENOMEM;
 2087                         goto out;
 2088                 }
 2089                 taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
 2090                     "mana txq p%u-tx%d", apc->port_idx, i);
 2091 
 2092                 mana_alloc_counters((counter_u64_t *)&txq->stats,
 2093                     sizeof(txq->stats));
 2094 
 2095                 /* Allocate and start the cleanup task on CQ */
 2096                 cq->do_not_ring_db = false;
 2097 
 2098                 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
 2099                 cq->cleanup_tq =
 2100                     taskqueue_create_fast("mana tx cq cleanup",
 2101                     M_WAITOK, taskqueue_thread_enqueue,
 2102                     &cq->cleanup_tq);
 2103 
 2104                 if (apc->last_tx_cq_bind_cpu < 0)
 2105                         apc->last_tx_cq_bind_cpu = CPU_FIRST();
 2106                 cq->cpu = apc->last_tx_cq_bind_cpu;
 2107                 apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
 2108 
 2109                 if (apc->bind_cleanup_thread_cpu) {
 2110                         cpuset_t cpu_mask;
 2111                         CPU_SETOF(cq->cpu, &cpu_mask);
 2112                         taskqueue_start_threads_cpuset(&cq->cleanup_tq,
 2113                             1, PI_NET, &cpu_mask,
 2114                             "mana cq p%u-tx%u-cpu%d",
 2115                             apc->port_idx, txq->idx, cq->cpu);
 2116                 } else {
 2117                         taskqueue_start_threads(&cq->cleanup_tq, 1,
 2118                             PI_NET, "mana cq p%u-tx%u",
 2119                             apc->port_idx, txq->idx);
 2120                 }
 2121 
 2122                 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
 2123         }
 2124 
 2125         return 0;
 2126 out:
 2127         mana_destroy_txq(apc);
 2128         return err;
 2129 }
 2130 
 2131 static void
 2132 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
 2133     bool validate_state)
 2134 {
 2135         struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
 2136         struct mana_recv_buf_oob *rx_oob;
 2137         int i;
 2138 
 2139         if (!rxq)
 2140                 return;
 2141 
 2142         if (validate_state) {
 2143                 /*
 2144                  * XXX Cancel and drain cleanup task queue here.
 2145                  */
 2146                 ;
 2147         }
 2148 
 2149         mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
 2150 
 2151         mana_deinit_cq(apc, &rxq->rx_cq);
 2152 
 2153         mana_free_counters((counter_u64_t *)&rxq->stats,
 2154             sizeof(rxq->stats));
 2155 
 2156         /* Free LRO resources */
 2157         tcp_lro_free(&rxq->lro);
 2158 
 2159         for (i = 0; i < rxq->num_rx_buf; i++) {
 2160                 rx_oob = &rxq->rx_oobs[i];
 2161 
 2162                 if (rx_oob->mbuf)
 2163                         mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
 2164 
 2165                 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
 2166         }
 2167 
 2168         if (rxq->gdma_rq)
 2169                 mana_gd_destroy_queue(gc, rxq->gdma_rq);
 2170 
 2171         free(rxq, M_DEVBUF);
 2172 }
 2173 
 2174 #define MANA_WQE_HEADER_SIZE 16
 2175 #define MANA_WQE_SGE_SIZE 16
 2176 
 2177 static int
 2178 mana_alloc_rx_wqe(struct mana_port_context *apc,
 2179     struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
 2180 {
 2181         struct mana_recv_buf_oob *rx_oob;
 2182         uint32_t buf_idx;
 2183         int err;
 2184 
 2185         if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
 2186                 mana_err(NULL,
 2187                     "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
 2188         }
 2189 
 2190         *rxq_size = 0;
 2191         *cq_size = 0;
 2192 
 2193         for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
 2194                 rx_oob = &rxq->rx_oobs[buf_idx];
 2195                 memset(rx_oob, 0, sizeof(*rx_oob));
 2196 
 2197                 err = bus_dmamap_create(apc->rx_buf_tag, 0,
 2198                     &rx_oob->dma_map);
 2199                 if (err) {
 2200                         mana_err(NULL,
 2201                             "Failed to  create rx DMA map for buf %d\n",
 2202                             buf_idx);
 2203                         return err;
 2204                 }
 2205 
 2206                 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
 2207                 if (err) {
 2208                         mana_err(NULL,
 2209                             "Failed to  create rx DMA map for buf %d\n",
 2210                             buf_idx);
 2211                         bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
 2212                         return err;
 2213                 }
 2214 
 2215                 rx_oob->wqe_req.sgl = rx_oob->sgl;
 2216                 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
 2217                 rx_oob->wqe_req.inline_oob_size = 0;
 2218                 rx_oob->wqe_req.inline_oob_data = NULL;
 2219                 rx_oob->wqe_req.flags = 0;
 2220                 rx_oob->wqe_req.client_data_unit = 0;
 2221 
 2222                 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
 2223                                    MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
 2224                 *cq_size += COMP_ENTRY_SIZE;
 2225         }
 2226 
 2227         return 0;
 2228 }
 2229 
 2230 static int
 2231 mana_push_wqe(struct mana_rxq *rxq)
 2232 {
 2233         struct mana_recv_buf_oob *rx_oob;
 2234         uint32_t buf_idx;
 2235         int err;
 2236 
 2237         for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
 2238                 rx_oob = &rxq->rx_oobs[buf_idx];
 2239 
 2240                 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
 2241                     &rx_oob->wqe_inf);
 2242                 if (err)
 2243                         return ENOSPC;
 2244         }
 2245 
 2246         return 0;
 2247 }
 2248 
 2249 static struct mana_rxq *
 2250 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
 2251     struct mana_eq *eq, struct ifnet *ndev)
 2252 {
 2253         struct gdma_dev *gd = apc->ac->gdma_dev;
 2254         struct mana_obj_spec wq_spec;
 2255         struct mana_obj_spec cq_spec;
 2256         struct gdma_queue_spec spec;
 2257         struct mana_cq *cq = NULL;
 2258         uint32_t cq_size, rq_size;
 2259         struct gdma_context *gc;
 2260         struct mana_rxq *rxq;
 2261         int err;
 2262 
 2263         gc = gd->gdma_context;
 2264 
 2265         rxq = malloc(sizeof(*rxq) +
 2266             RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
 2267             M_DEVBUF, M_WAITOK | M_ZERO);
 2268         if (!rxq)
 2269                 return NULL;
 2270 
 2271         rxq->ndev = ndev;
 2272         rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
 2273         rxq->rxq_idx = rxq_idx;
 2274         /*
 2275          * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
 2276          * Now we just allow maximum size of 4096.
 2277          */
 2278         rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
 2279         if (rxq->datasize > MAX_FRAME_SIZE)
 2280                 rxq->datasize = MAX_FRAME_SIZE;
 2281 
 2282         mana_dbg(NULL, "Setting rxq %d datasize %d\n",
 2283             rxq_idx, rxq->datasize);
 2284 
 2285         rxq->rxobj = INVALID_MANA_HANDLE;
 2286 
 2287         err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
 2288         if (err)
 2289                 goto out;
 2290 
 2291         /* Create LRO for the RQ */
 2292         if (ndev->if_capenable & IFCAP_LRO) {
 2293                 err = tcp_lro_init(&rxq->lro);
 2294                 if (err) {
 2295                         if_printf(ndev, "Failed to create LRO for rxq %d\n",
 2296                             rxq_idx);
 2297                 } else {
 2298                         rxq->lro.ifp = ndev;
 2299                 }
 2300         }
 2301 
 2302         mana_alloc_counters((counter_u64_t *)&rxq->stats,
 2303             sizeof(rxq->stats));
 2304 
 2305         rq_size = ALIGN(rq_size, PAGE_SIZE);
 2306         cq_size = ALIGN(cq_size, PAGE_SIZE);
 2307 
 2308         /* Create RQ */
 2309         memset(&spec, 0, sizeof(spec));
 2310         spec.type = GDMA_RQ;
 2311         spec.monitor_avl_buf = true;
 2312         spec.queue_size = rq_size;
 2313         err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
 2314         if (err)
 2315                 goto out;
 2316 
 2317         /* Create RQ's CQ */
 2318         cq = &rxq->rx_cq;
 2319         cq->type = MANA_CQ_TYPE_RX;
 2320         cq->rxq = rxq;
 2321 
 2322         memset(&spec, 0, sizeof(spec));
 2323         spec.type = GDMA_CQ;
 2324         spec.monitor_avl_buf = false;
 2325         spec.queue_size = cq_size;
 2326         spec.cq.callback = mana_schedule_task;
 2327         spec.cq.parent_eq = eq->eq;
 2328         spec.cq.context = cq;
 2329         err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
 2330         if (err)
 2331                 goto out;
 2332 
 2333         memset(&wq_spec, 0, sizeof(wq_spec));
 2334         memset(&cq_spec, 0, sizeof(cq_spec));
 2335         wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
 2336         wq_spec.queue_size = rxq->gdma_rq->queue_size;
 2337 
 2338         cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
 2339         cq_spec.queue_size = cq->gdma_cq->queue_size;
 2340         cq_spec.modr_ctx_id = 0;
 2341         cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
 2342 
 2343         err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
 2344             &wq_spec, &cq_spec, &rxq->rxobj);
 2345         if (err)
 2346                 goto out;
 2347 
 2348         rxq->gdma_rq->id = wq_spec.queue_index;
 2349         cq->gdma_cq->id = cq_spec.queue_index;
 2350 
 2351         rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
 2352         cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
 2353 
 2354         rxq->gdma_id = rxq->gdma_rq->id;
 2355         cq->gdma_id = cq->gdma_cq->id;
 2356 
 2357         err = mana_push_wqe(rxq);
 2358         if (err)
 2359                 goto out;
 2360 
 2361         if (cq->gdma_id >= gc->max_num_cqs) {
 2362                 err = EINVAL;
 2363                 goto out;
 2364         }
 2365 
 2366         gc->cq_table[cq->gdma_id] = cq->gdma_cq;
 2367 
 2368         /* Allocate and start the cleanup task on CQ */
 2369         cq->do_not_ring_db = false;
 2370 
 2371         NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
 2372         cq->cleanup_tq =
 2373             taskqueue_create_fast("mana rx cq cleanup",
 2374             M_WAITOK, taskqueue_thread_enqueue,
 2375             &cq->cleanup_tq);
 2376 
 2377         if (apc->last_rx_cq_bind_cpu < 0)
 2378                 apc->last_rx_cq_bind_cpu = CPU_FIRST();
 2379         cq->cpu = apc->last_rx_cq_bind_cpu;
 2380         apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
 2381 
 2382         if (apc->bind_cleanup_thread_cpu) {
 2383                 cpuset_t cpu_mask;
 2384                 CPU_SETOF(cq->cpu, &cpu_mask);
 2385                 taskqueue_start_threads_cpuset(&cq->cleanup_tq,
 2386                     1, PI_NET, &cpu_mask,
 2387                     "mana cq p%u-rx%u-cpu%d",
 2388                     apc->port_idx, rxq->rxq_idx, cq->cpu);
 2389         } else {
 2390                 taskqueue_start_threads(&cq->cleanup_tq, 1,
 2391                     PI_NET, "mana cq p%u-rx%u",
 2392                     apc->port_idx, rxq->rxq_idx);
 2393         }
 2394 
 2395         mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
 2396 out:
 2397         if (!err)
 2398                 return rxq;
 2399 
 2400         if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
 2401 
 2402         mana_destroy_rxq(apc, rxq, false);
 2403 
 2404         if (cq)
 2405                 mana_deinit_cq(apc, cq);
 2406 
 2407         return NULL;
 2408 }
 2409 
 2410 static int
 2411 mana_add_rx_queues(struct mana_port_context *apc, struct ifnet *ndev)
 2412 {
 2413         struct mana_context *ac = apc->ac;
 2414         struct mana_rxq *rxq;
 2415         int err = 0;
 2416         int i;
 2417 
 2418         for (i = 0; i < apc->num_queues; i++) {
 2419                 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
 2420                 if (!rxq) {
 2421                         err = ENOMEM;
 2422                         goto out;
 2423                 }
 2424 
 2425                 apc->rxqs[i] = rxq;
 2426         }
 2427 
 2428         apc->default_rxobj = apc->rxqs[0]->rxobj;
 2429 out:
 2430         return err;
 2431 }
 2432 
 2433 static void
 2434 mana_destroy_vport(struct mana_port_context *apc)
 2435 {
 2436         struct mana_rxq *rxq;
 2437         uint32_t rxq_idx;
 2438 
 2439         for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
 2440                 rxq = apc->rxqs[rxq_idx];
 2441                 if (!rxq)
 2442                         continue;
 2443 
 2444                 mana_destroy_rxq(apc, rxq, true);
 2445                 apc->rxqs[rxq_idx] = NULL;
 2446         }
 2447 
 2448         mana_destroy_txq(apc);
 2449 
 2450         mana_uncfg_vport(apc);
 2451 }
 2452 
 2453 static int
 2454 mana_create_vport(struct mana_port_context *apc, struct ifnet *net)
 2455 {
 2456         struct gdma_dev *gd = apc->ac->gdma_dev;
 2457         int err;
 2458 
 2459         apc->default_rxobj = INVALID_MANA_HANDLE;
 2460 
 2461         err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
 2462         if (err)
 2463                 return err;
 2464 
 2465         return mana_create_txq(apc, net);
 2466 }
 2467 
 2468 
 2469 static void mana_rss_table_init(struct mana_port_context *apc)
 2470 {
 2471         int i;
 2472 
 2473         for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
 2474                 apc->indir_table[i] = i % apc->num_queues;
 2475 }
 2476 
 2477 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
 2478                     bool update_hash, bool update_tab)
 2479 {
 2480         uint32_t queue_idx;
 2481         int err;
 2482         int i;
 2483 
 2484         if (update_tab) {
 2485                 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
 2486                         queue_idx = apc->indir_table[i];
 2487                         apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
 2488                 }
 2489         }
 2490 
 2491         err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
 2492         if (err)
 2493                 return err;
 2494 
 2495         mana_fence_rqs(apc);
 2496 
 2497         return 0;
 2498 }
 2499 
 2500 static int
 2501 mana_init_port(struct ifnet *ndev)
 2502 {
 2503         struct mana_port_context *apc = if_getsoftc(ndev);
 2504         uint32_t max_txq, max_rxq, max_queues;
 2505         int port_idx = apc->port_idx;
 2506         uint32_t num_indirect_entries;
 2507         int err;
 2508 
 2509         err = mana_init_port_context(apc);
 2510         if (err)
 2511                 return err;
 2512 
 2513         err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
 2514             &num_indirect_entries);
 2515         if (err) {
 2516                 if_printf(ndev, "Failed to query info for vPort %d\n",
 2517                     port_idx);
 2518                 goto reset_apc;
 2519         }
 2520 
 2521         max_queues = min_t(uint32_t, max_txq, max_rxq);
 2522         if (apc->max_queues > max_queues)
 2523                 apc->max_queues = max_queues;
 2524 
 2525         if (apc->num_queues > apc->max_queues)
 2526                 apc->num_queues = apc->max_queues;
 2527 
 2528         return 0;
 2529 
 2530 reset_apc:
 2531         bus_dma_tag_destroy(apc->rx_buf_tag);
 2532         apc->rx_buf_tag = NULL;
 2533         free(apc->rxqs, M_DEVBUF);
 2534         apc->rxqs = NULL;
 2535         return err;
 2536 }
 2537 
 2538 int
 2539 mana_alloc_queues(struct ifnet *ndev)
 2540 {
 2541         struct mana_port_context *apc = if_getsoftc(ndev);
 2542         int err;
 2543 
 2544         err = mana_create_vport(apc, ndev);
 2545         if (err)
 2546                 return err;
 2547 
 2548         err = mana_add_rx_queues(apc, ndev);
 2549         if (err)
 2550                 goto destroy_vport;
 2551 
 2552         apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
 2553 
 2554         mana_rss_table_init(apc);
 2555 
 2556         err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
 2557         if (err)
 2558                 goto destroy_vport;
 2559 
 2560         return 0;
 2561 
 2562 destroy_vport:
 2563         mana_destroy_vport(apc);
 2564         return err;
 2565 }
 2566 
 2567 static int
 2568 mana_up(struct mana_port_context *apc)
 2569 {
 2570         int err;
 2571 
 2572         mana_dbg(NULL, "mana_up called\n");
 2573 
 2574         err = mana_alloc_queues(apc->ndev);
 2575         if (err) {
 2576                 mana_err(NULL, "Faile alloc mana queues: %d\n", err);
 2577                 return err;
 2578         }
 2579 
 2580         /* Add queue specific sysctl */
 2581         mana_sysctl_add_queues(apc);
 2582 
 2583         apc->port_is_up = true;
 2584 
 2585         /* Ensure port state updated before txq state */
 2586         wmb();
 2587 
 2588         if_link_state_change(apc->ndev, LINK_STATE_UP);
 2589         if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
 2590 
 2591         return 0;
 2592 }
 2593 
 2594 
 2595 static void
 2596 mana_init(void *arg)
 2597 {
 2598         struct mana_port_context *apc = (struct mana_port_context *)arg;
 2599 
 2600         MANA_APC_LOCK_LOCK(apc);
 2601         if (!apc->port_is_up) {
 2602                 mana_up(apc);
 2603         }
 2604         MANA_APC_LOCK_UNLOCK(apc);
 2605 }
 2606 
 2607 static int
 2608 mana_dealloc_queues(struct ifnet *ndev)
 2609 {
 2610         struct mana_port_context *apc = if_getsoftc(ndev);
 2611         struct mana_txq *txq;
 2612         int i, err;
 2613 
 2614         if (apc->port_is_up)
 2615                 return EINVAL;
 2616 
 2617         /* No packet can be transmitted now since apc->port_is_up is false.
 2618          * There is still a tiny chance that mana_poll_tx_cq() can re-enable
 2619          * a txq because it may not timely see apc->port_is_up being cleared
 2620          * to false, but it doesn't matter since mana_start_xmit() drops any
 2621          * new packets due to apc->port_is_up being false.
 2622          *
 2623          * Drain all the in-flight TX packets
 2624          */
 2625         for (i = 0; i < apc->num_queues; i++) {
 2626                 txq = &apc->tx_qp[i].txq;
 2627 
 2628                 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
 2629                 struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
 2630 
 2631                 tx_cq->do_not_ring_db = true;
 2632                 rx_cq->do_not_ring_db = true;
 2633 
 2634                 /* Schedule a cleanup task */
 2635                 taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
 2636 
 2637                 while (atomic_read(&txq->pending_sends) > 0)
 2638                         usleep_range(1000, 2000);
 2639         }
 2640 
 2641         /* We're 100% sure the queues can no longer be woken up, because
 2642          * we're sure now mana_poll_tx_cq() can't be running.
 2643          */
 2644 
 2645         apc->rss_state = TRI_STATE_FALSE;
 2646         err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
 2647         if (err) {
 2648                 if_printf(ndev, "Failed to disable vPort: %d\n", err);
 2649                 return err;
 2650         }
 2651 
 2652         mana_destroy_vport(apc);
 2653 
 2654         return 0;
 2655 }
 2656 
 2657 static int
 2658 mana_down(struct mana_port_context *apc)
 2659 {
 2660         int err = 0;
 2661 
 2662         apc->port_st_save = apc->port_is_up;
 2663         apc->port_is_up = false;
 2664 
 2665         /* Ensure port state updated before txq state */
 2666         wmb();
 2667 
 2668         if (apc->port_st_save) {
 2669                 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
 2670                     IFF_DRV_RUNNING);
 2671                 if_link_state_change(apc->ndev, LINK_STATE_DOWN);
 2672 
 2673                 mana_sysctl_free_queues(apc);
 2674 
 2675                 err = mana_dealloc_queues(apc->ndev);
 2676                 if (err) {
 2677                         if_printf(apc->ndev,
 2678                             "Failed to bring down mana interface: %d\n", err);
 2679                 }
 2680         }
 2681 
 2682         return err;
 2683 }
 2684 
 2685 int
 2686 mana_detach(struct ifnet *ndev)
 2687 {
 2688         struct mana_port_context *apc = if_getsoftc(ndev);
 2689         int err;
 2690 
 2691         ether_ifdetach(ndev);
 2692 
 2693         if (!apc)
 2694                 return 0;
 2695 
 2696         MANA_APC_LOCK_LOCK(apc);
 2697         err = mana_down(apc);
 2698         MANA_APC_LOCK_UNLOCK(apc);
 2699 
 2700         mana_cleanup_port_context(apc);
 2701 
 2702         MANA_APC_LOCK_DESTROY(apc);
 2703 
 2704         free(apc, M_DEVBUF);
 2705 
 2706         return err;
 2707 }
 2708 
 2709 static int
 2710 mana_probe_port(struct mana_context *ac, int port_idx,
 2711     struct ifnet **ndev_storage)
 2712 {
 2713         struct gdma_context *gc = ac->gdma_dev->gdma_context;
 2714         struct mana_port_context *apc;
 2715         struct ifnet *ndev;
 2716         int err;
 2717 
 2718         ndev = if_alloc_dev(IFT_ETHER, gc->dev);
 2719         if (!ndev) {
 2720                 mana_err(NULL, "Failed to allocate ifnet struct\n");
 2721                 return ENOMEM;
 2722         }
 2723 
 2724         *ndev_storage = ndev;
 2725 
 2726         apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
 2727         if (!apc) {
 2728                 mana_err(NULL, "Failed to allocate port context\n");
 2729                 err = ENOMEM;
 2730                 goto free_net;
 2731         }
 2732 
 2733         apc->ac = ac;
 2734         apc->ndev = ndev;
 2735         apc->max_queues = gc->max_num_queues;
 2736         apc->num_queues = min_t(unsigned int,
 2737             gc->max_num_queues, MANA_MAX_NUM_QUEUES);
 2738         apc->port_handle = INVALID_MANA_HANDLE;
 2739         apc->port_idx = port_idx;
 2740         apc->frame_size = DEFAULT_FRAME_SIZE;
 2741         apc->last_tx_cq_bind_cpu = -1;
 2742         apc->last_rx_cq_bind_cpu = -1;
 2743         apc->vport_use_count = 0;
 2744 
 2745         MANA_APC_LOCK_INIT(apc);
 2746 
 2747         if_initname(ndev, device_get_name(gc->dev), port_idx);
 2748         if_setdev(ndev,gc->dev);
 2749         if_setsoftc(ndev, apc);
 2750 
 2751         if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
 2752         if_setinitfn(ndev, mana_init);
 2753         if_settransmitfn(ndev, mana_start_xmit);
 2754         if_setqflushfn(ndev, mana_qflush);
 2755         if_setioctlfn(ndev, mana_ioctl);
 2756         if_setgetcounterfn(ndev, mana_get_counter);
 2757 
 2758         if_setmtu(ndev, ETHERMTU);
 2759         if_setbaudrate(ndev, IF_Gbps(100));
 2760 
 2761         mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
 2762 
 2763         err = mana_init_port(ndev);
 2764         if (err)
 2765                 goto reset_apc;
 2766 
 2767         ndev->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
 2768         ndev->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
 2769         ndev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
 2770 
 2771         ndev->if_capabilities |= IFCAP_LRO | IFCAP_LINKSTATE;
 2772 
 2773         /* Enable all available capabilities by default. */
 2774         ndev->if_capenable = ndev->if_capabilities;
 2775 
 2776         /* TSO parameters */
 2777         ndev->if_hw_tsomax = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
 2778             (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
 2779         ndev->if_hw_tsomaxsegcount = MAX_MBUF_FRAGS;
 2780         ndev->if_hw_tsomaxsegsize = PAGE_SIZE;
 2781 
 2782         ifmedia_init(&apc->media, IFM_IMASK,
 2783             mana_ifmedia_change, mana_ifmedia_status);
 2784         ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
 2785         ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
 2786 
 2787         ether_ifattach(ndev, apc->mac_addr);
 2788 
 2789         /* Initialize statistics */
 2790         mana_alloc_counters((counter_u64_t *)&apc->port_stats,
 2791             sizeof(struct mana_port_stats));
 2792         mana_sysctl_add_port(apc);
 2793 
 2794         /* Tell the stack that the interface is not active */
 2795         if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
 2796 
 2797         return 0;
 2798 
 2799 reset_apc:
 2800         free(apc, M_DEVBUF);
 2801 free_net:
 2802         *ndev_storage = NULL;
 2803         if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
 2804         if_free(ndev);
 2805         return err;
 2806 }
 2807 
 2808 int mana_probe(struct gdma_dev *gd)
 2809 {
 2810         struct gdma_context *gc = gd->gdma_context;
 2811         device_t dev = gc->dev;
 2812         struct mana_context *ac;
 2813         int err;
 2814         int i;
 2815 
 2816         device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
 2817                  MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
 2818 
 2819         err = mana_gd_register_device(gd);
 2820         if (err)
 2821                 return err;
 2822 
 2823         ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
 2824         if (!ac)
 2825                 return ENOMEM;
 2826 
 2827         ac->gdma_dev = gd;
 2828         ac->num_ports = 1;
 2829         gd->driver_data = ac;
 2830 
 2831         err = mana_create_eq(ac);
 2832         if (err)
 2833                 goto out;
 2834 
 2835         err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
 2836             MANA_MICRO_VERSION, &ac->num_ports);
 2837         if (err)
 2838                 goto out;
 2839 
 2840         if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
 2841                 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
 2842 
 2843         for (i = 0; i < ac->num_ports; i++) {
 2844                 err = mana_probe_port(ac, i, &ac->ports[i]);
 2845                 if (err) {
 2846                         device_printf(dev,
 2847                             "Failed to probe mana port %d\n", i);
 2848                         break;
 2849                 }
 2850         }
 2851 
 2852 out:
 2853         if (err)
 2854                 mana_remove(gd);
 2855 
 2856         return err;
 2857 }
 2858 
 2859 void
 2860 mana_remove(struct gdma_dev *gd)
 2861 {
 2862         struct gdma_context *gc = gd->gdma_context;
 2863         struct mana_context *ac = gd->driver_data;
 2864         device_t dev = gc->dev;
 2865         struct ifnet *ndev;
 2866         int i;
 2867 
 2868         for (i = 0; i < ac->num_ports; i++) {
 2869                 ndev = ac->ports[i];
 2870                 if (!ndev) {
 2871                         if (i == 0)
 2872                                 device_printf(dev, "No net device to remove\n");
 2873                         goto out;
 2874                 }
 2875 
 2876                 mana_detach(ndev);
 2877 
 2878                 if_free(ndev);
 2879         }
 2880 
 2881         mana_destroy_eq(ac);
 2882 
 2883 out:
 2884         mana_gd_deregister_device(gd);
 2885         gd->driver_data = NULL;
 2886         gd->gdma_context = NULL;
 2887         free(ac, M_DEVBUF);
 2888 }

Cache object: 10428ea34665d6b2e6c997ba6e30f414


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.