The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/bnxt/bnxt_txrx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Broadcom NetXtreme-C/E network driver.
    3  *
    4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
    5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
   17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
   20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   26  * THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/types.h>
   33 #include <sys/socket.h>
   34 #include <sys/endian.h>
   35 #include <net/if.h>
   36 #include <net/if_var.h>
   37 #include <net/ethernet.h>
   38 #include <net/iflib.h>
   39 
   40 #include "opt_inet.h"
   41 #include "opt_inet6.h"
   42 #include "opt_rss.h"
   43 
   44 #include "bnxt.h"
   45 
   46 /*
   47  * Function prototypes
   48  */
   49 
   50 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
   51 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
   52 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
   53 
   54 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
   55 
   56 /*                              uint16_t rxqid, uint8_t flid,
   57     uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
   58     uint16_t buf_size);
   59 */
   60 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
   61     qidx_t pidx);
   62 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
   63     qidx_t budget);
   64 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
   65 
   66 static int bnxt_intr(void *sc);
   67 
   68 struct if_txrx bnxt_txrx  = {
   69         .ift_txd_encap = bnxt_isc_txd_encap,
   70         .ift_txd_flush = bnxt_isc_txd_flush,
   71         .ift_txd_credits_update = bnxt_isc_txd_credits_update,
   72         .ift_rxd_available = bnxt_isc_rxd_available,
   73         .ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get,
   74         .ift_rxd_refill = bnxt_isc_rxd_refill,
   75         .ift_rxd_flush = bnxt_isc_rxd_flush,
   76         .ift_legacy_intr = bnxt_intr
   77 };
   78 
   79 /*
   80  * Device Dependent Packet Transmit and Receive Functions
   81  */
   82 
   83 static const uint16_t bnxt_tx_lhint[] = {
   84         TX_BD_SHORT_FLAGS_LHINT_LT512,
   85         TX_BD_SHORT_FLAGS_LHINT_LT1K,
   86         TX_BD_SHORT_FLAGS_LHINT_LT2K,
   87         TX_BD_SHORT_FLAGS_LHINT_LT2K,
   88         TX_BD_SHORT_FLAGS_LHINT_GTE2K,
   89 };
   90 
   91 static int
   92 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
   93 {
   94         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
   95         struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
   96         struct tx_bd_long *tbd;
   97         struct tx_bd_long_hi *tbdh;
   98         bool need_hi = false;
   99         uint16_t flags_type;
  100         uint16_t lflags;
  101         uint32_t cfa_meta;
  102         int seg = 0;
  103 
  104         /* If we have offloads enabled, we need to use two BDs. */
  105         if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
  106             pi->ipi_mflags & M_VLANTAG)
  107                 need_hi = true;
  108 
  109         /* TODO: Devices before Cu+B1 need to not mix long and short BDs */
  110         need_hi = true;
  111 
  112         pi->ipi_new_pidx = pi->ipi_pidx;
  113         tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
  114         pi->ipi_ndescs = 0;
  115         /* No need to byte-swap the opaque value */
  116         tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
  117         tbd->len = htole16(pi->ipi_segs[seg].ds_len);
  118         tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
  119         flags_type = ((pi->ipi_nsegs + need_hi) <<
  120             TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
  121         if (pi->ipi_len >= 2048)
  122                 flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
  123         else
  124                 flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
  125 
  126         if (need_hi) {
  127                 flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
  128 
  129                 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
  130                 tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
  131                 tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz);
  132                 tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
  133                     pi->ipi_tcp_hlen) >> 1);
  134                 tbdh->cfa_action = 0;
  135                 lflags = 0;
  136                 cfa_meta = 0;
  137                 if (pi->ipi_mflags & M_VLANTAG) {
  138                         /* TODO: Do we need to byte-swap the vtag here? */
  139                         cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
  140                             pi->ipi_vtag;
  141                         cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
  142                 }
  143                 tbdh->cfa_meta = htole32(cfa_meta);
  144                 if (pi->ipi_csum_flags & CSUM_TSO) {
  145                         lflags |= TX_BD_LONG_LFLAGS_LSO |
  146                             TX_BD_LONG_LFLAGS_T_IPID;
  147                 }
  148                 else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
  149                         lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
  150                             TX_BD_LONG_LFLAGS_IP_CHKSUM;
  151                 }
  152                 else if(pi->ipi_csum_flags & CSUM_IP) {
  153                         lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
  154                 }
  155                 tbdh->lflags = htole16(lflags);
  156         }
  157         else {
  158                 flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
  159         }
  160 
  161         for (; seg < pi->ipi_nsegs; seg++) {
  162                 tbd->flags_type = htole16(flags_type);
  163                 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
  164                 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
  165                 tbd->len = htole16(pi->ipi_segs[seg].ds_len);
  166                 tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
  167                 flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
  168         }
  169         flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
  170         tbd->flags_type = htole16(flags_type);
  171         pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
  172 
  173         return 0;
  174 }
  175 
  176 static void
  177 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
  178 {
  179         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
  180         struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
  181 
  182         /* pidx is what we last set ipi_new_pidx to */
  183         softc->db_ops.bnxt_db_tx(tx_ring, pidx);
  184         return;
  185 }
  186 
  187 static int
  188 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
  189 {
  190         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
  191         struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
  192         struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
  193         int avail = 0;
  194         uint32_t cons = cpr->cons;
  195         bool v_bit = cpr->v_bit;
  196         bool last_v_bit;
  197         uint32_t last_cons;
  198         uint16_t type;
  199         uint16_t err;
  200 
  201         for (;;) {
  202                 last_cons = cons;
  203                 last_v_bit = v_bit;
  204                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  205                 CMPL_PREFETCH_NEXT(cpr, cons);
  206 
  207                 if (!CMP_VALID(&cmpl[cons], v_bit))
  208                         goto done;
  209 
  210                 type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
  211                 switch (type) {
  212                 case TX_CMPL_TYPE_TX_L2:
  213                         err = (le16toh(cmpl[cons].errors_v) &
  214                             TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
  215                             TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
  216                         if (err)
  217                                 device_printf(softc->dev,
  218                                     "TX completion error %u\n", err);
  219                         /* No need to byte-swap the opaque value */
  220                         avail += cmpl[cons].opaque >> 24;
  221                         /*
  222                          * If we're not clearing, iflib only cares if there's
  223                          * at least one buffer.  Don't scan the whole ring in
  224                          * this case.
  225                          */
  226                         if (!clear)
  227                                 goto done;
  228                         break;
  229                 default:
  230                         if (type & 1) {
  231                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  232                                 if (!CMP_VALID(&cmpl[cons], v_bit))
  233                                         goto done;
  234                         }
  235                         device_printf(softc->dev,
  236                             "Unhandled TX completion type %u\n", type);
  237                         break;
  238                 }
  239         }
  240 done:
  241 
  242         if (clear && avail) {
  243                 cpr->cons = last_cons;
  244                 cpr->v_bit = last_v_bit;
  245                 softc->db_ops.bnxt_db_tx_cq(cpr, 0);
  246         }
  247 
  248         return avail;
  249 }
  250 
  251 static void
  252 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
  253 {
  254         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
  255         struct bnxt_ring *rx_ring;
  256         struct rx_prod_pkt_bd *rxbd;
  257         uint16_t type;
  258         uint16_t i;
  259         uint16_t rxqid;
  260         uint16_t count;
  261         uint32_t pidx;
  262         uint8_t flid;
  263         uint64_t *paddrs;
  264         qidx_t  *frag_idxs;
  265 
  266         rxqid = iru->iru_qsidx;
  267         count = iru->iru_count;
  268         pidx = iru->iru_pidx;
  269         flid = iru->iru_flidx;
  270         paddrs = iru->iru_paddrs;
  271         frag_idxs = iru->iru_idxs;
  272 
  273         if (flid == 0) {
  274                 rx_ring = &softc->rx_rings[rxqid];
  275                 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
  276         }
  277         else {
  278                 rx_ring = &softc->ag_rings[rxqid];
  279                 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
  280         }
  281         rxbd = (void *)rx_ring->vaddr;
  282 
  283         for (i=0; i<count; i++) {
  284                 rxbd[pidx].flags_type = htole16(type);
  285                 rxbd[pidx].len = htole16(softc->rx_buf_size);
  286                 /* No need to byte-swap the opaque value */
  287                 rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
  288                     | (frag_idxs[i]));
  289                 rxbd[pidx].addr = htole64(paddrs[i]);
  290                 if (++pidx == rx_ring->ring_size)
  291                         pidx = 0;
  292         }
  293         return;
  294 }
  295 
  296 static void
  297 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
  298     qidx_t pidx)
  299 {
  300         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
  301         struct bnxt_ring *rx_ring;
  302 
  303         if (flid == 0)
  304                 rx_ring = &softc->rx_rings[rxqid];
  305         else
  306                 rx_ring = &softc->ag_rings[rxqid];
  307 
  308         /*
  309          * We *must* update the completion ring before updating the RX ring
  310          * or we will overrun the completion ring and the device will wedge for
  311          * RX.
  312          */
  313         softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[rxqid], 0);
  314         softc->db_ops.bnxt_db_rx(rx_ring, pidx);
  315         return;
  316 }
  317 
  318 static int
  319 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
  320 {
  321         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
  322         struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
  323         struct rx_pkt_cmpl *rcp;
  324         struct rx_tpa_end_cmpl *rtpae;
  325         struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
  326         int avail = 0;
  327         uint32_t cons = cpr->cons;
  328         bool v_bit = cpr->v_bit;
  329         uint8_t ags;
  330         int i;
  331         uint16_t type;
  332 
  333         for (;;) {
  334                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  335                 CMPL_PREFETCH_NEXT(cpr, cons);
  336 
  337                 if (!CMP_VALID(&cmp[cons], v_bit))
  338                         goto cmpl_invalid;
  339 
  340                 type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
  341                 switch (type) {
  342                 case CMPL_BASE_TYPE_RX_L2:
  343                         rcp = (void *)&cmp[cons];
  344                         ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
  345                             RX_PKT_CMPL_AGG_BUFS_SFT;
  346                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  347                         CMPL_PREFETCH_NEXT(cpr, cons);
  348 
  349                         if (!CMP_VALID(&cmp[cons], v_bit))
  350                                 goto cmpl_invalid;
  351 
  352                         /* Now account for all the AG completions */
  353                         for (i=0; i<ags; i++) {
  354                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  355                                 CMPL_PREFETCH_NEXT(cpr, cons);
  356                                 if (!CMP_VALID(&cmp[cons], v_bit))
  357                                         goto cmpl_invalid;
  358                         }
  359                         avail++;
  360                         break;
  361                 case CMPL_BASE_TYPE_RX_TPA_END:
  362                         rtpae = (void *)&cmp[cons];
  363                         ags = (rtpae->agg_bufs_v1 &
  364                             RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
  365                             RX_TPA_END_CMPL_AGG_BUFS_SFT;
  366                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  367                         CMPL_PREFETCH_NEXT(cpr, cons);
  368 
  369                         if (!CMP_VALID(&cmp[cons], v_bit))
  370                                 goto cmpl_invalid;
  371                         /* Now account for all the AG completions */
  372                         for (i=0; i<ags; i++) {
  373                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  374                                 CMPL_PREFETCH_NEXT(cpr, cons);
  375                                 if (!CMP_VALID(&cmp[cons], v_bit))
  376                                         goto cmpl_invalid;
  377                         }
  378                         avail++;
  379                         break;
  380                 case CMPL_BASE_TYPE_RX_TPA_START:
  381                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  382                         CMPL_PREFETCH_NEXT(cpr, cons);
  383 
  384                         if (!CMP_VALID(&cmp[cons], v_bit))
  385                                 goto cmpl_invalid;
  386                         break;
  387                 case CMPL_BASE_TYPE_RX_AGG:
  388                         break;
  389                 default:
  390                         device_printf(softc->dev,
  391                             "Unhandled completion type %d on RXQ %d\n",
  392                             type, rxqid);
  393 
  394                         /* Odd completion types use two completions */
  395                         if (type & 1) {
  396                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
  397                                 CMPL_PREFETCH_NEXT(cpr, cons);
  398 
  399                                 if (!CMP_VALID(&cmp[cons], v_bit))
  400                                         goto cmpl_invalid;
  401                         }
  402                         break;
  403                 }
  404                 if (avail > budget)
  405                         break;
  406         }
  407 cmpl_invalid:
  408 
  409         return avail;
  410 }
  411 
  412 static void
  413 bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
  414 {
  415         uint8_t rss_profile_id;
  416 
  417         rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
  418         switch (rss_profile_id) {
  419         case BNXT_RSS_HASH_TYPE_TCPV4:
  420                 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
  421                 break;
  422         case BNXT_RSS_HASH_TYPE_UDPV4:
  423                 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
  424                 break;
  425         case BNXT_RSS_HASH_TYPE_IPV4:
  426                 ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
  427                 break;
  428         case BNXT_RSS_HASH_TYPE_TCPV6:
  429                 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
  430                 break;
  431         case BNXT_RSS_HASH_TYPE_UDPV6:
  432                 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
  433                 break;
  434         case BNXT_RSS_HASH_TYPE_IPV6:
  435                 ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
  436                 break;
  437         default:
  438                 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
  439                 break;
  440         }
  441 }
  442 
  443 static int
  444 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
  445     struct bnxt_cp_ring *cpr, uint16_t flags_type)
  446 {
  447         struct rx_pkt_cmpl *rcp;
  448         struct rx_pkt_cmpl_hi *rcph;
  449         struct rx_abuf_cmpl *acp;
  450         uint32_t flags2;
  451         uint32_t errors;
  452         uint8_t ags;
  453         int i;
  454 
  455         rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
  456 
  457         /* Extract from the first 16-byte BD */
  458         if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
  459                 ri->iri_flowid = le32toh(rcp->rss_hash);
  460                 bnxt_set_rsstype(ri, rcp->rss_hash_type);
  461         }
  462         else {
  463                 ri->iri_rsstype = M_HASHTYPE_NONE;
  464         }
  465         ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
  466             RX_PKT_CMPL_AGG_BUFS_SFT;
  467         ri->iri_nfrags = ags + 1;
  468         /* No need to byte-swap the opaque value */
  469         ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
  470         ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
  471         ri->iri_frags[0].irf_len = le16toh(rcp->len);
  472         ri->iri_len = le16toh(rcp->len);
  473 
  474         /* Now the second 16-byte BD */
  475         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
  476         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
  477         rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
  478 
  479         flags2 = le32toh(rcph->flags2);
  480         errors = le16toh(rcph->errors_v2);
  481         if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
  482             RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
  483                 ri->iri_flags |= M_VLANTAG;
  484                 /* TODO: Should this be the entire 16-bits? */
  485                 ri->iri_vtag = le32toh(rcph->metadata) &
  486                     (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
  487                     RX_PKT_CMPL_METADATA_PRI_MASK);
  488         }
  489         if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
  490                 ri->iri_csum_flags |= CSUM_IP_CHECKED;
  491                 if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
  492                         ri->iri_csum_flags |= CSUM_IP_VALID;
  493         }
  494         if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
  495                       RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
  496                 ri->iri_csum_flags |= CSUM_L4_CALC;
  497                 if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
  498                                 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
  499                         ri->iri_csum_flags |= CSUM_L4_VALID;
  500                         ri->iri_csum_data = 0xffff;
  501                 }
  502         }
  503 
  504         /* And finally the ag ring stuff. */
  505         for (i=1; i < ri->iri_nfrags; i++) {
  506                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
  507                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
  508                 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
  509 
  510                 /* No need to byte-swap the opaque value */
  511                 ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
  512                 ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
  513                 ri->iri_frags[i].irf_len = le16toh(acp->len);
  514                 ri->iri_len += le16toh(acp->len);
  515         }
  516 
  517         return 0;
  518 }
  519 
  520 static int
  521 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
  522     struct bnxt_cp_ring *cpr, uint16_t flags_type)
  523 {
  524         struct rx_tpa_end_cmpl *agend =
  525             &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
  526         struct rx_abuf_cmpl *acp;
  527         struct bnxt_full_tpa_start *tpas;
  528         uint32_t flags2;
  529         uint8_t ags;
  530         uint8_t agg_id;
  531         int i;
  532 
  533         /* Get the agg_id */
  534         agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
  535             RX_TPA_END_CMPL_AGG_ID_SFT;
  536         tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
  537 
  538         /* Extract from the first 16-byte BD */
  539         if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
  540                 ri->iri_flowid = le32toh(tpas->low.rss_hash);
  541                 bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
  542         }
  543         else {
  544                 ri->iri_rsstype = M_HASHTYPE_NONE;
  545         }
  546         ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
  547             RX_TPA_END_CMPL_AGG_BUFS_SFT;
  548         ri->iri_nfrags = ags + 1;
  549         /* No need to byte-swap the opaque value */
  550         ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
  551         ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
  552         ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
  553         ri->iri_len = le16toh(tpas->low.len);
  554 
  555         /* Now the second 16-byte BD */
  556         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
  557         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
  558 
  559         flags2 = le32toh(tpas->high.flags2);
  560         if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
  561             RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
  562                 ri->iri_flags |= M_VLANTAG;
  563                 /* TODO: Should this be the entire 16-bits? */
  564                 ri->iri_vtag = le32toh(tpas->high.metadata) &
  565                     (RX_TPA_START_CMPL_METADATA_VID_MASK |
  566                     RX_TPA_START_CMPL_METADATA_DE |
  567                     RX_TPA_START_CMPL_METADATA_PRI_MASK);
  568         }
  569         if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
  570                 ri->iri_csum_flags |= CSUM_IP_CHECKED;
  571                 ri->iri_csum_flags |= CSUM_IP_VALID;
  572         }
  573         if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
  574                 ri->iri_csum_flags |= CSUM_L4_CALC;
  575                 ri->iri_csum_flags |= CSUM_L4_VALID;
  576                 ri->iri_csum_data = 0xffff;
  577         }
  578 
  579         /* Now the ag ring stuff. */
  580         for (i=1; i < ri->iri_nfrags; i++) {
  581                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
  582                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
  583                 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
  584 
  585                 /* No need to byte-swap the opaque value */
  586                 ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
  587                 ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
  588                 ri->iri_frags[i].irf_len = le16toh(acp->len);
  589                 ri->iri_len += le16toh(acp->len);
  590         }
  591 
  592         /* And finally, the empty BD at the end... */
  593         ri->iri_nfrags++;
  594         /* No need to byte-swap the opaque value */
  595         ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
  596         ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
  597         ri->iri_frags[i].irf_len = le16toh(agend->len);
  598         ri->iri_len += le16toh(agend->len);
  599 
  600         return 0;
  601 }
  602 
  603 /* If we return anything but zero, iflib will assert... */
  604 static int
  605 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
  606 {
  607         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
  608         struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
  609         struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
  610         struct cmpl_base *cmp;
  611         struct rx_tpa_start_cmpl *rtpa;
  612         uint16_t flags_type;
  613         uint16_t type;
  614         uint8_t agg_id;
  615 
  616         for (;;) {
  617                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
  618                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
  619                 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
  620                 cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
  621 
  622                 flags_type = le16toh(cmp->type);
  623                 type = flags_type & CMPL_BASE_TYPE_MASK;
  624 
  625                 switch (type) {
  626                 case CMPL_BASE_TYPE_RX_L2:
  627                         return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
  628                 case CMPL_BASE_TYPE_RX_TPA_END:
  629                         return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
  630                 case CMPL_BASE_TYPE_RX_TPA_START:
  631                         rtpa = (void *)&cmp_q[cpr->cons];
  632                         agg_id = (rtpa->agg_id &
  633                             RX_TPA_START_CMPL_AGG_ID_MASK) >>
  634                             RX_TPA_START_CMPL_AGG_ID_SFT;
  635                         softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
  636 
  637                         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
  638                         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
  639                         CMPL_PREFETCH_NEXT(cpr, cpr->cons);
  640 
  641                         softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
  642                             ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
  643                         break;
  644                 default:
  645                         device_printf(softc->dev,
  646                             "Unhandled completion type %d on RXQ %d get\n",
  647                             type, ri->iri_qsidx);
  648                         if (type & 1) {
  649                                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
  650                                     cpr->v_bit);
  651                                 ri->iri_cidx = RING_NEXT(&cpr->ring,
  652                                     ri->iri_cidx);
  653                                 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
  654                         }
  655                         break;
  656                 }
  657         }
  658 
  659         return 0;
  660 }
  661 
  662 static int
  663 bnxt_intr(void *sc)
  664 {
  665         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
  666 
  667         device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);
  668         return ENOSYS;
  669 }

Cache object: 1f9dbf956a4acee771a4d2874a7c2697


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.