The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ixgbe/ix_txrx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2 
    3   Copyright (c) 2001-2017, Intel Corporation
    4   All rights reserved.
    5 
    6   Redistribution and use in source and binary forms, with or without
    7   modification, are permitted provided that the following conditions are met:
    8 
    9    1. Redistributions of source code must retain the above copyright notice,
   10       this list of conditions and the following disclaimer.
   11 
   12    2. Redistributions in binary form must reproduce the above copyright
   13       notice, this list of conditions and the following disclaimer in the
   14       documentation and/or other materials provided with the distribution.
   15 
   16    3. Neither the name of the Intel Corporation nor the names of its
   17       contributors may be used to endorse or promote products derived from
   18       this software without specific prior written permission.
   19 
   20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30   POSSIBILITY OF SUCH DAMAGE.
   31 
   32 ******************************************************************************/
   33 /*$FreeBSD$*/
   34 
   35 #ifndef IXGBE_STANDALONE_BUILD
   36 #include "opt_inet.h"
   37 #include "opt_inet6.h"
   38 #include "opt_rss.h"
   39 #endif
   40 
   41 #include "ixgbe.h"
   42 
   43 /************************************************************************
   44  * Local Function prototypes
   45  ************************************************************************/
   46 static int ixgbe_isc_txd_encap(void *, if_pkt_info_t);
   47 static void ixgbe_isc_txd_flush(void *, uint16_t, qidx_t);
   48 static int ixgbe_isc_txd_credits_update(void *, uint16_t, bool);
   49 
   50 static void ixgbe_isc_rxd_refill(void *, if_rxd_update_t);
   51 static void ixgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
   52 static int ixgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
   53 static int ixgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
   54 
   55 static void ixgbe_rx_checksum(uint32_t, if_rxd_info_t, uint32_t);
   56 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *,
   57     if_pkt_info_t);
   58 
   59 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
   60 static int ixgbe_determine_rsstype(uint16_t pkt_info);
   61 
   62 struct if_txrx ixgbe_txrx  = {
   63         .ift_txd_encap = ixgbe_isc_txd_encap,
   64         .ift_txd_flush = ixgbe_isc_txd_flush,
   65         .ift_txd_credits_update = ixgbe_isc_txd_credits_update,
   66         .ift_rxd_available = ixgbe_isc_rxd_available,
   67         .ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
   68         .ift_rxd_refill = ixgbe_isc_rxd_refill,
   69         .ift_rxd_flush = ixgbe_isc_rxd_flush,
   70         .ift_legacy_intr = NULL
   71 };
   72 
   73 /************************************************************************
   74  * ixgbe_tx_ctx_setup
   75  *
   76  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
   77  *
   78  ************************************************************************/
   79 static int
   80 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
   81 {
   82         uint32_t vlan_macip_lens, type_tucmd_mlhl;
   83         uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
   84         u8  ehdrlen;
   85 
   86         offload = true;
   87         olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
   88         /* VLAN MACLEN IPLEN */
   89         vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
   90 
   91         /*
   92          * Some of our VF devices need a context descriptor for every
   93          * packet.  That means the ehdrlen needs to be non-zero in order
   94          * for the host driver not to flag a malicious event. The stack
   95          * will most likely populate this for all other reasons of why
   96          * this function was called.
   97          */
   98         if (pi->ipi_ehdrlen == 0) {
   99                 ehdrlen = ETHER_HDR_LEN;
  100                 ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
  101         } else
  102                 ehdrlen = pi->ipi_ehdrlen;
  103         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
  104 
  105         pktlen = pi->ipi_len;
  106         /* First check if TSO is to be used */
  107         if (pi->ipi_csum_flags & CSUM_TSO) {
  108                 /* This is used in the transmit desc in encap */
  109                 pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
  110                 mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
  111                 mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
  112         }
  113 
  114         olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
  115 
  116         if (pi->ipi_flags & IPI_TX_IPV4) {
  117                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
  118                 /* Tell transmit desc to also do IPv4 checksum. */
  119                 if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
  120                         olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
  121         } else if (pi->ipi_flags & IPI_TX_IPV6)
  122                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
  123         else
  124                 offload = false;
  125 
  126         vlan_macip_lens |= pi->ipi_ip_hlen;
  127 
  128         switch (pi->ipi_ipproto) {
  129         case IPPROTO_TCP:
  130                 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
  131                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
  132                 else
  133                         offload = false;
  134                 break;
  135         case IPPROTO_UDP:
  136                 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
  137                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
  138                 else
  139                         offload = false;
  140                 break;
  141         case IPPROTO_SCTP:
  142                 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
  143                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
  144                 else
  145                         offload = false;
  146                 break;
  147         default:
  148                 offload = false;
  149                 break;
  150         }
  151         /* Insert L4 checksum into data descriptors */
  152         if (offload)
  153                 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
  154 
  155         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
  156 
  157         /* Now copy bits into descriptor */
  158         TXD->vlan_macip_lens = htole32(vlan_macip_lens);
  159         TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
  160         TXD->seqnum_seed = htole32(0);
  161         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
  162 
  163         return (olinfo_status);
  164 } /* ixgbe_tx_ctx_setup */
  165 
  166 /************************************************************************
  167  * ixgbe_isc_txd_encap
  168  ************************************************************************/
  169 static int
  170 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
  171 {
  172         struct ixgbe_softc               *sc = arg;
  173         if_softc_ctx_t                   scctx = sc->shared;
  174         struct ix_tx_queue               *que = &sc->tx_queues[pi->ipi_qsidx];
  175         struct tx_ring                   *txr = &que->txr;
  176         int                              nsegs = pi->ipi_nsegs;
  177         bus_dma_segment_t                *segs = pi->ipi_segs;
  178         union ixgbe_adv_tx_desc          *txd = NULL;
  179         struct ixgbe_adv_tx_context_desc *TXD;
  180         int                              i, j, first, pidx_last;
  181         uint32_t                         olinfo_status, cmd, flags;
  182         qidx_t                           ntxd;
  183 
  184         cmd =  (IXGBE_ADVTXD_DTYP_DATA |
  185                 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
  186 
  187         if (pi->ipi_mflags & M_VLANTAG)
  188                 cmd |= IXGBE_ADVTXD_DCMD_VLE;
  189 
  190         i = first = pi->ipi_pidx;
  191         flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
  192         ntxd = scctx->isc_ntxd[0];
  193 
  194         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
  195         if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
  196             (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
  197             pi->ipi_vtag) {
  198                 /*********************************************
  199                  * Set up the appropriate offload context
  200                  * this will consume the first descriptor
  201                  *********************************************/
  202                 olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
  203                 if (pi->ipi_csum_flags & CSUM_TSO) {
  204                         cmd |= IXGBE_ADVTXD_DCMD_TSE;
  205                         ++txr->tso_tx;
  206                 }
  207 
  208                 if (++i == scctx->isc_ntxd[0])
  209                         i = 0;
  210         } else {
  211                 /* Indicate the whole packet as payload when not doing TSO */
  212                 olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
  213         }
  214 
  215         olinfo_status |= IXGBE_ADVTXD_CC;
  216         pidx_last = 0;
  217         for (j = 0; j < nsegs; j++) {
  218                 bus_size_t seglen;
  219 
  220                 txd = &txr->tx_base[i];
  221                 seglen = segs[j].ds_len;
  222 
  223                 txd->read.buffer_addr = htole64(segs[j].ds_addr);
  224                 txd->read.cmd_type_len = htole32(cmd | seglen);
  225                 txd->read.olinfo_status = htole32(olinfo_status);
  226 
  227                 pidx_last = i;
  228                 if (++i == scctx->isc_ntxd[0]) {
  229                         i = 0;
  230                 }
  231         }
  232 
  233         if (flags) {
  234                 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
  235                 txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
  236         }
  237         txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
  238 
  239         txr->bytes += pi->ipi_len;
  240         pi->ipi_new_pidx = i;
  241 
  242         ++txr->total_packets;
  243 
  244         return (0);
  245 } /* ixgbe_isc_txd_encap */
  246 
  247 /************************************************************************
  248  * ixgbe_isc_txd_flush
  249  ************************************************************************/
  250 static void
  251 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
  252 {
  253         struct ixgbe_softc     *sc = arg;
  254         struct ix_tx_queue *que = &sc->tx_queues[txqid];
  255         struct tx_ring     *txr = &que->txr;
  256 
  257         IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
  258 } /* ixgbe_isc_txd_flush */
  259 
  260 /************************************************************************
  261  * ixgbe_isc_txd_credits_update
  262  ************************************************************************/
  263 static int
  264 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
  265 {
  266         struct ixgbe_softc *sc = arg;
  267         if_softc_ctx_t     scctx = sc->shared;
  268         struct ix_tx_queue *que = &sc->tx_queues[txqid];
  269         struct tx_ring     *txr = &que->txr;
  270         qidx_t             processed = 0;
  271         int                updated;
  272         qidx_t             cur, prev, ntxd, rs_cidx;
  273         int32_t            delta;
  274         uint8_t            status;
  275 
  276         rs_cidx = txr->tx_rs_cidx;
  277         if (rs_cidx == txr->tx_rs_pidx)
  278                 return (0);
  279 
  280         cur = txr->tx_rsq[rs_cidx];
  281         status = txr->tx_base[cur].wb.status;
  282         updated = !!(status & IXGBE_TXD_STAT_DD);
  283 
  284         if (!updated)
  285                 return (0);
  286 
  287         /* If clear is false just let caller know that there
  288          * are descriptors to reclaim */
  289         if (!clear)
  290                 return (1);
  291 
  292         prev = txr->tx_cidx_processed;
  293         ntxd = scctx->isc_ntxd[0];
  294         do {
  295                 MPASS(prev != cur);
  296                 delta = (int32_t)cur - (int32_t)prev;
  297                 if (delta < 0)
  298                         delta += ntxd;
  299                 MPASS(delta > 0);
  300 
  301                 processed += delta;
  302                 prev = cur;
  303                 rs_cidx = (rs_cidx + 1) & (ntxd - 1);
  304                 if (rs_cidx == txr->tx_rs_pidx)
  305                         break;
  306 
  307                 cur = txr->tx_rsq[rs_cidx];
  308                 status = txr->tx_base[cur].wb.status;
  309         } while ((status & IXGBE_TXD_STAT_DD));
  310 
  311         txr->tx_rs_cidx = rs_cidx;
  312         txr->tx_cidx_processed = prev;
  313 
  314         return (processed);
  315 } /* ixgbe_isc_txd_credits_update */
  316 
  317 /************************************************************************
  318  * ixgbe_isc_rxd_refill
  319  ************************************************************************/
  320 static void
  321 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
  322 {
  323         struct ixgbe_softc *sc   = arg;
  324         struct ix_rx_queue *que  = &sc->rx_queues[iru->iru_qsidx];
  325         struct rx_ring *rxr      = &que->rxr;
  326         uint64_t *paddrs;
  327         int i;
  328         uint32_t next_pidx, pidx;
  329         uint16_t count;
  330 
  331         paddrs = iru->iru_paddrs;
  332         pidx = iru->iru_pidx;
  333         count = iru->iru_count;
  334 
  335         for (i = 0, next_pidx = pidx; i < count; i++) {
  336                 rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
  337                 if (++next_pidx == sc->shared->isc_nrxd[0])
  338                         next_pidx = 0;
  339         }
  340 } /* ixgbe_isc_rxd_refill */
  341 
  342 /************************************************************************
  343  * ixgbe_isc_rxd_flush
  344  ************************************************************************/
  345 static void
  346 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
  347 {
  348         struct ixgbe_softc *sc  = arg;
  349         struct ix_rx_queue *que = &sc->rx_queues[qsidx];
  350         struct rx_ring     *rxr = &que->rxr;
  351 
  352         IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
  353 } /* ixgbe_isc_rxd_flush */
  354 
  355 /************************************************************************
  356  * ixgbe_isc_rxd_available
  357  ************************************************************************/
  358 static int
  359 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
  360 {
  361         struct ixgbe_softc      *sc = arg;
  362         struct ix_rx_queue      *que = &sc->rx_queues[qsidx];
  363         struct rx_ring          *rxr = &que->rxr;
  364         union ixgbe_adv_rx_desc *rxd;
  365         uint32_t                 staterr;
  366         int                      cnt, i, nrxd;
  367 
  368         nrxd = sc->shared->isc_nrxd[0];
  369         for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
  370                 rxd = &rxr->rx_base[i];
  371                 staterr = le32toh(rxd->wb.upper.status_error);
  372 
  373                 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
  374                         break;
  375                 if (++i == nrxd)
  376                         i = 0;
  377                 if (staterr & IXGBE_RXD_STAT_EOP)
  378                         cnt++;
  379         }
  380         return (cnt);
  381 } /* ixgbe_isc_rxd_available */
  382 
  383 /************************************************************************
  384  * ixgbe_isc_rxd_pkt_get
  385  *
  386  *   Routine sends data which has been dma'ed into host memory
  387  *   to upper layer. Initialize ri structure.
  388  *
  389  *   Returns 0 upon success, errno on failure
  390  ************************************************************************/
  391 
  392 static int
  393 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
  394 {
  395         struct ixgbe_softc       *sc = arg;
  396         if_softc_ctx_t           scctx = sc->shared;
  397         struct ix_rx_queue       *que = &sc->rx_queues[ri->iri_qsidx];
  398         struct rx_ring           *rxr = &que->rxr;
  399         union ixgbe_adv_rx_desc  *rxd;
  400 
  401         uint16_t                  pkt_info, len, cidx, i;
  402         uint32_t                  ptype;
  403         uint32_t                  staterr = 0;
  404         bool                      eop;
  405 
  406         i = 0;
  407         cidx = ri->iri_cidx;
  408         do {
  409                 rxd = &rxr->rx_base[cidx];
  410                 staterr = le32toh(rxd->wb.upper.status_error);
  411                 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
  412 
  413                 /* Error Checking then decrement count */
  414                 MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
  415 
  416                 len = le16toh(rxd->wb.upper.length);
  417                 ptype = le32toh(rxd->wb.lower.lo_dword.data) &
  418                         IXGBE_RXDADV_PKTTYPE_MASK;
  419 
  420                 ri->iri_len += len;
  421                 rxr->bytes += len;
  422 
  423                 rxd->wb.upper.status_error = 0;
  424                 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
  425 
  426                 /* Make sure bad packets are discarded */
  427                 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
  428                         if (sc->feat_en & IXGBE_FEATURE_VF)
  429                                 if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS, 1);
  430 
  431                         rxr->rx_discarded++;
  432                         return (EBADMSG);
  433                 }
  434                 ri->iri_frags[i].irf_flid = 0;
  435                 ri->iri_frags[i].irf_idx = cidx;
  436                 ri->iri_frags[i].irf_len = len;
  437                 if (++cidx == sc->shared->isc_nrxd[0])
  438                         cidx = 0;
  439                 i++;
  440                 /* even a 16K packet shouldn't consume more than 8 clusters */
  441                 MPASS(i < 9);
  442         } while (!eop);
  443 
  444         rxr->rx_packets++;
  445         rxr->packets++;
  446         rxr->rx_bytes += ri->iri_len;
  447 
  448         if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
  449                 ixgbe_rx_checksum(staterr, ri,  ptype);
  450 
  451         ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
  452         ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
  453         if ((sc->feat_en & IXGBE_FEATURE_RSS) == 0) {
  454                 if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
  455                         ri->iri_rsstype = M_HASHTYPE_NONE;
  456                 else
  457                         ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
  458         }
  459         if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP)) {
  460                 ri->iri_vtag = le16toh(rxd->wb.upper.vlan);
  461                 ri->iri_flags |= M_VLANTAG;
  462         }
  463 
  464         ri->iri_nfrags = i;
  465         return (0);
  466 } /* ixgbe_isc_rxd_pkt_get */
  467 
  468 /************************************************************************
  469  * ixgbe_rx_checksum
  470  *
  471  *   Verify that the hardware indicated that the checksum is valid.
  472  *   Inform the stack about the status of checksum so that stack
  473  *   doesn't spend time verifying the checksum.
  474  ************************************************************************/
  475 static void
  476 ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
  477 {
  478         uint16_t status = (uint16_t)staterr;
  479         uint8_t errors = (uint8_t)(staterr >> 24);
  480 
  481         /* If there is a layer 3 or 4 error we are done */
  482         if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
  483                 return;
  484 
  485         /* IP Checksum Good */
  486         if (status & IXGBE_RXD_STAT_IPCS)
  487                 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
  488 
  489         /* Valid L4E checksum */
  490         if (__predict_true(status & IXGBE_RXD_STAT_L4CS)) {
  491                 /* SCTP header present. */
  492                 if (__predict_false((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
  493                     (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
  494                         ri->iri_csum_flags |= CSUM_SCTP_VALID;
  495                 } else {
  496                         ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
  497                         ri->iri_csum_data = htons(0xffff);
  498                 }
  499         }
  500 } /* ixgbe_rx_checksum */
  501 
  502 /************************************************************************
  503  * ixgbe_determine_rsstype
  504  *
  505  *   Parse the packet type to determine the appropriate hash
  506  ************************************************************************/
  507 static int
  508 ixgbe_determine_rsstype(uint16_t pkt_info)
  509 {
  510         switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
  511         case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
  512                 return M_HASHTYPE_RSS_TCP_IPV4;
  513         case IXGBE_RXDADV_RSSTYPE_IPV4:
  514                 return M_HASHTYPE_RSS_IPV4;
  515         case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
  516                 return M_HASHTYPE_RSS_TCP_IPV6;
  517         case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
  518                 return M_HASHTYPE_RSS_IPV6_EX;
  519         case IXGBE_RXDADV_RSSTYPE_IPV6:
  520                 return M_HASHTYPE_RSS_IPV6;
  521         case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
  522                 return M_HASHTYPE_RSS_TCP_IPV6_EX;
  523         case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
  524                 return M_HASHTYPE_RSS_UDP_IPV4;
  525         case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
  526                 return M_HASHTYPE_RSS_UDP_IPV6;
  527         case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
  528                 return M_HASHTYPE_RSS_UDP_IPV6_EX;
  529         default:
  530                 return M_HASHTYPE_OPAQUE;
  531         }
  532 } /* ixgbe_determine_rsstype */

Cache object: 7eda34b99fc074abbfd17c174cf6abe6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.