The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/e1000/igb_txrx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 /* $FreeBSD$ */
   30 #include "if_em.h"
   31 
   32 #ifdef RSS
   33 #include <net/rss_config.h>
   34 #include <netinet/in_rss.h>
   35 #endif
   36 
   37 #ifdef VERBOSE_DEBUG
   38 #define DPRINTF device_printf
   39 #else
   40 #define DPRINTF(...)
   41 #endif
   42 
   43 /*********************************************************************
   44  *  Local Function prototypes
   45  *********************************************************************/
   46 static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi);
   47 static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
   48 static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
   49 
   50 static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru);
   51 
   52 static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
   53     qidx_t pidx);
   54 static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
   55     qidx_t budget);
   56 
   57 static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
   58 
   59 static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
   60     uint32_t *cmd_type_len, uint32_t *olinfo_status);
   61 static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi,
   62     uint32_t *cmd_type_len, uint32_t *olinfo_status);
   63 
   64 static void igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype);
   65 static int igb_determine_rsstype(uint16_t pkt_info);
   66 
   67 extern void igb_if_enable_intr(if_ctx_t ctx);
   68 extern int em_intr(void *arg);
   69 
   70 struct if_txrx igb_txrx = {
   71         .ift_txd_encap = igb_isc_txd_encap,
   72         .ift_txd_flush = igb_isc_txd_flush,
   73         .ift_txd_credits_update = igb_isc_txd_credits_update,
   74         .ift_rxd_available = igb_isc_rxd_available,
   75         .ift_rxd_pkt_get = igb_isc_rxd_pkt_get,
   76         .ift_rxd_refill = igb_isc_rxd_refill,
   77         .ift_rxd_flush = igb_isc_rxd_flush,
   78         .ift_legacy_intr = em_intr
   79 };
   80 
   81 /**********************************************************************
   82  *
   83  *  Setup work for hardware segmentation offload (TSO) on
   84  *  adapters using advanced tx descriptors
   85  *
   86  **********************************************************************/
   87 static int
   88 igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
   89     uint32_t *olinfo_status)
   90 {
   91         struct e1000_adv_tx_context_desc *TXD;
   92         struct e1000_softc *sc = txr->sc;
   93         uint32_t type_tucmd_mlhl = 0, vlan_macip_lens = 0;
   94         uint32_t mss_l4len_idx = 0;
   95         uint32_t paylen;
   96 
   97         switch(pi->ipi_etype) {
   98         case ETHERTYPE_IPV6:
   99                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
  100                 break;
  101         case ETHERTYPE_IP:
  102                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
  103                 /* Tell transmit desc to also do IPv4 checksum. */
  104                 *olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
  105                 break;
  106         default:
  107                 panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
  108                       __func__, ntohs(pi->ipi_etype));
  109                 break;
  110         }
  111 
  112         TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
  113 
  114         /* This is used in the transmit desc in encap */
  115         paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
  116 
  117         /* VLAN MACLEN IPLEN */
  118         if (pi->ipi_mflags & M_VLANTAG) {
  119                 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
  120         }
  121 
  122         vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
  123         vlan_macip_lens |= pi->ipi_ip_hlen;
  124         TXD->vlan_macip_lens = htole32(vlan_macip_lens);
  125 
  126         /* ADV DTYPE TUCMD */
  127         type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
  128         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
  129         TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
  130 
  131         /* MSS L4LEN IDX */
  132         mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT);
  133         mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
  134         /* 82575 needs the queue index added */
  135         if (sc->hw.mac.type == e1000_82575)
  136                 mss_l4len_idx |= txr->me << 4;
  137         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
  138 
  139         TXD->u.seqnum_seed = htole32(0);
  140         *cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
  141         *olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  142         *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
  143 
  144         return (1);
  145 }
  146 
  147 /*********************************************************************
  148  *
  149  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
  150  *
  151  **********************************************************************/
  152 static int
  153 igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
  154     uint32_t *olinfo_status)
  155 {
  156         struct e1000_adv_tx_context_desc *TXD;
  157         struct e1000_softc *sc = txr->sc;
  158         uint32_t vlan_macip_lens, type_tucmd_mlhl;
  159         uint32_t mss_l4len_idx;
  160         mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
  161 
  162         /* First check if TSO is to be used */
  163         if (pi->ipi_csum_flags & CSUM_TSO)
  164                 return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status));
  165 
  166         /* Indicate the whole packet as payload when not doing TSO */
  167         *olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
  168 
  169         /* Now ready a context descriptor */
  170         TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
  171 
  172         /*
  173         ** In advanced descriptors the vlan tag must
  174         ** be placed into the context descriptor. Hence
  175         ** we need to make one even if not doing offloads.
  176         */
  177         if (pi->ipi_mflags & M_VLANTAG) {
  178                 vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
  179         } else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) {
  180                 return (0);
  181         }
  182 
  183         /* Set the ether header length */
  184         vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
  185 
  186         switch(pi->ipi_etype) {
  187         case ETHERTYPE_IP:
  188                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
  189                 break;
  190         case ETHERTYPE_IPV6:
  191                 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
  192                 break;
  193         default:
  194                 break;
  195         }
  196 
  197         vlan_macip_lens |= pi->ipi_ip_hlen;
  198         type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
  199 
  200         switch (pi->ipi_ipproto) {
  201         case IPPROTO_TCP:
  202                 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) {
  203                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
  204                         *olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  205                 }
  206                 break;
  207         case IPPROTO_UDP:
  208                 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) {
  209                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
  210                         *olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  211                 }
  212                 break;
  213         case IPPROTO_SCTP:
  214                 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) {
  215                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
  216                         *olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  217                 }
  218                 break;
  219         default:
  220                 break;
  221         }
  222 
  223         /* 82575 needs the queue index added */
  224         if (sc->hw.mac.type == e1000_82575)
  225                 mss_l4len_idx = txr->me << 4;
  226 
  227         /* Now copy bits into descriptor */
  228         TXD->vlan_macip_lens = htole32(vlan_macip_lens);
  229         TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
  230         TXD->u.seqnum_seed = htole32(0);
  231         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
  232 
  233         return (1);
  234 }
  235 
  236 static int
  237 igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
  238 {
  239         struct e1000_softc *sc = arg;
  240         if_softc_ctx_t scctx = sc->shared;
  241         struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
  242         struct tx_ring *txr = &que->txr;
  243         int nsegs = pi->ipi_nsegs;
  244         bus_dma_segment_t *segs = pi->ipi_segs;
  245         union e1000_adv_tx_desc *txd = NULL;
  246         int i, j, pidx_last;
  247         uint32_t olinfo_status, cmd_type_len, txd_flags;
  248         qidx_t ntxd;
  249 
  250         pidx_last = olinfo_status = 0;
  251         /* Basic descriptor defines */
  252         cmd_type_len = (E1000_ADVTXD_DTYP_DATA |
  253                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
  254 
  255         if (pi->ipi_mflags & M_VLANTAG)
  256                 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
  257 
  258         i = pi->ipi_pidx;
  259         ntxd = scctx->isc_ntxd[0];
  260         txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0;
  261         /* Consume the first descriptor */
  262         i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
  263         if (i == scctx->isc_ntxd[0])
  264                 i = 0;
  265 
  266         /* 82575 needs the queue index added */
  267         if (sc->hw.mac.type == e1000_82575)
  268                 olinfo_status |= txr->me << 4;
  269 
  270         for (j = 0; j < nsegs; j++) {
  271                 bus_size_t seglen;
  272                 bus_addr_t segaddr;
  273 
  274                 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
  275                 seglen = segs[j].ds_len;
  276                 segaddr = htole64(segs[j].ds_addr);
  277 
  278                 txd->read.buffer_addr = segaddr;
  279                 txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS |
  280                     cmd_type_len | seglen);
  281                 txd->read.olinfo_status = htole32(olinfo_status);
  282                 pidx_last = i;
  283                 if (++i == scctx->isc_ntxd[0]) {
  284                         i = 0;
  285                 }
  286         }
  287         if (txd_flags) {
  288                 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
  289                 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
  290                 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
  291         }
  292 
  293         txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags);
  294         pi->ipi_new_pidx = i;
  295 
  296         return (0);
  297 }
  298 
  299 static void
  300 igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
  301 {
  302         struct e1000_softc *sc  = arg;
  303         struct em_tx_queue *que = &sc->tx_queues[txqid];
  304         struct tx_ring *txr     = &que->txr;
  305 
  306         E1000_WRITE_REG(&sc->hw, E1000_TDT(txr->me), pidx);
  307 }
  308 
  309 static int
  310 igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
  311 {
  312         struct e1000_softc *sc = arg;
  313         if_softc_ctx_t scctx = sc->shared;
  314         struct em_tx_queue *que = &sc->tx_queues[txqid];
  315         struct tx_ring *txr = &que->txr;
  316 
  317         qidx_t processed = 0;
  318         int updated;
  319         qidx_t cur, prev, ntxd, rs_cidx;
  320         int32_t delta;
  321         uint8_t status;
  322 
  323         rs_cidx = txr->tx_rs_cidx;
  324         if (rs_cidx == txr->tx_rs_pidx)
  325                 return (0);
  326         cur = txr->tx_rsq[rs_cidx];
  327         status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
  328         updated = !!(status & E1000_TXD_STAT_DD);
  329 
  330         if (!updated)
  331                 return (0);
  332 
  333         /* If clear is false just let caller know that there
  334          * are descriptors to reclaim */
  335         if (!clear)
  336                 return (1);
  337 
  338         prev = txr->tx_cidx_processed;
  339         ntxd = scctx->isc_ntxd[0];
  340         do {
  341                 MPASS(prev != cur);
  342                 delta = (int32_t)cur - (int32_t)prev;
  343                 if (delta < 0)
  344                         delta += ntxd;
  345                 MPASS(delta > 0);
  346 
  347                 processed += delta;
  348                 prev  = cur;
  349                 rs_cidx = (rs_cidx + 1) & (ntxd-1);
  350                 if (rs_cidx  == txr->tx_rs_pidx)
  351                         break;
  352                 cur = txr->tx_rsq[rs_cidx];
  353                 status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
  354         } while ((status & E1000_TXD_STAT_DD));
  355 
  356         txr->tx_rs_cidx = rs_cidx;
  357         txr->tx_cidx_processed = prev;
  358         return (processed);
  359 }
  360 
  361 static void
  362 igb_isc_rxd_refill(void *arg, if_rxd_update_t iru)
  363 {
  364         struct e1000_softc *sc = arg;
  365         if_softc_ctx_t scctx = sc->shared;
  366         uint16_t rxqid = iru->iru_qsidx;
  367         struct em_rx_queue *que = &sc->rx_queues[rxqid];
  368         union e1000_adv_rx_desc *rxd;
  369         struct rx_ring *rxr = &que->rxr;
  370         uint64_t *paddrs;
  371         uint32_t next_pidx, pidx;
  372         uint16_t count;
  373         int i;
  374 
  375         paddrs = iru->iru_paddrs;
  376         pidx = iru->iru_pidx;
  377         count = iru->iru_count;
  378 
  379         for (i = 0, next_pidx = pidx; i < count; i++) {
  380                 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx];
  381 
  382                 rxd->read.pkt_addr = htole64(paddrs[i]);
  383                 if (++next_pidx == scctx->isc_nrxd[0])
  384                         next_pidx = 0;
  385         }
  386 }
  387 
  388 static void
  389 igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
  390 {
  391         struct e1000_softc *sc = arg;
  392         struct em_rx_queue *que = &sc->rx_queues[rxqid];
  393         struct rx_ring *rxr = &que->rxr;
  394 
  395         E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
  396 }
  397 
  398 static int
  399 igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
  400 {
  401         struct e1000_softc *sc = arg;
  402         if_softc_ctx_t scctx = sc->shared;
  403         struct em_rx_queue *que = &sc->rx_queues[rxqid];
  404         struct rx_ring *rxr = &que->rxr;
  405         union e1000_adv_rx_desc *rxd;
  406         uint32_t staterr = 0;
  407         int cnt, i;
  408 
  409         for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
  410                 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i];
  411                 staterr = le32toh(rxd->wb.upper.status_error);
  412 
  413                 if ((staterr & E1000_RXD_STAT_DD) == 0)
  414                         break;
  415                 if (++i == scctx->isc_nrxd[0])
  416                         i = 0;
  417                 if (staterr & E1000_RXD_STAT_EOP)
  418                         cnt++;
  419         }
  420         return (cnt);
  421 }
  422 
  423 /****************************************************************
  424  * Routine sends data which has been dma'ed into host memory
  425  * to upper layer. Initialize ri structure. 
  426  *
  427  * Returns 0 upon success, errno on failure
  428  ***************************************************************/
  429 
  430 static int
  431 igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
  432 {
  433         struct e1000_softc *sc = arg;
  434         if_softc_ctx_t scctx = sc->shared;
  435         struct em_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
  436         struct rx_ring *rxr = &que->rxr;
  437         union e1000_adv_rx_desc *rxd;
  438 
  439         uint16_t pkt_info, len;
  440         uint32_t ptype, staterr;
  441         int i, cidx;
  442         bool eop;
  443 
  444         staterr = i = 0;
  445         cidx = ri->iri_cidx;
  446 
  447         do {
  448                 rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx];
  449                 staterr = le32toh(rxd->wb.upper.status_error);
  450                 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
  451 
  452                 MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
  453 
  454                 len = le16toh(rxd->wb.upper.length);
  455                 ptype = le32toh(rxd->wb.lower.lo_dword.data) &  IGB_PKTTYPE_MASK;
  456 
  457                 ri->iri_len += len;
  458                 rxr->rx_bytes += ri->iri_len;
  459 
  460                 rxd->wb.upper.status_error = 0;
  461                 eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
  462 
  463                 /* Make sure bad packets are discarded */
  464                 if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
  465                         sc->dropped_pkts++;
  466                         ++rxr->rx_discarded;
  467                         return (EBADMSG);
  468                 }
  469                 ri->iri_frags[i].irf_flid = 0;
  470                 ri->iri_frags[i].irf_idx = cidx;
  471                 ri->iri_frags[i].irf_len = len;
  472 
  473                 if (++cidx == scctx->isc_nrxd[0])
  474                         cidx = 0;
  475 #ifdef notyet
  476                 if (rxr->hdr_split == true) {
  477                         ri->iri_frags[i].irf_flid = 1;
  478                         ri->iri_frags[i].irf_idx = cidx;
  479                         if (++cidx == scctx->isc_nrxd[0])
  480                                 cidx = 0;
  481                 }
  482 #endif
  483                 i++;
  484         } while (!eop);
  485 
  486         rxr->rx_packets++;
  487 
  488         if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
  489                 igb_rx_checksum(staterr, ri, ptype);
  490 
  491         if (staterr & E1000_RXD_STAT_VP) {
  492                 if (((sc->hw.mac.type == e1000_i350) ||
  493                     (sc->hw.mac.type == e1000_i354)) &&
  494                     (staterr & E1000_RXDEXT_STATERR_LB))
  495                         ri->iri_vtag = be16toh(rxd->wb.upper.vlan);
  496                 else
  497                         ri->iri_vtag = le16toh(rxd->wb.upper.vlan);
  498                 ri->iri_flags |= M_VLANTAG;
  499         }
  500 
  501         ri->iri_flowid =
  502                 le32toh(rxd->wb.lower.hi_dword.rss);
  503         ri->iri_rsstype = igb_determine_rsstype(pkt_info);
  504         ri->iri_nfrags = i;
  505 
  506         return (0);
  507 }
  508 
  509 /*********************************************************************
  510  *
  511  *  Verify that the hardware indicated that the checksum is valid.
  512  *  Inform the stack about the status of checksum so that stack
  513  *  doesn't spend time verifying the checksum.
  514  *
  515  *********************************************************************/
  516 static void
  517 igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
  518 {
  519         uint16_t status = (uint16_t)staterr;
  520         uint8_t errors = (uint8_t)(staterr >> 24);
  521 
  522         if (__predict_false(status & E1000_RXD_STAT_IXSM))
  523                 return;
  524 
  525         /* If there is a layer 3 or 4 error we are done */
  526         if (__predict_false(errors & (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
  527                 return;
  528 
  529         /* IP Checksum Good */
  530         if (status & E1000_RXD_STAT_IPCS)
  531                 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
  532 
  533         /* Valid L4E checksum */
  534         if (__predict_true(status &
  535             (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) {
  536                 /* SCTP header present */
  537                 if (__predict_false((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
  538                     (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)) {
  539                         ri->iri_csum_flags |= CSUM_SCTP_VALID;
  540                 } else {
  541                         ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
  542                         ri->iri_csum_data = htons(0xffff);
  543                 }
  544         }
  545 }
  546 
  547 /********************************************************************
  548  *
  549  *  Parse the packet type to determine the appropriate hash
  550  *
  551  ******************************************************************/
  552 static int
  553 igb_determine_rsstype(uint16_t pkt_info)
  554 {
  555         switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
  556         case E1000_RXDADV_RSSTYPE_IPV4_TCP:
  557                 return M_HASHTYPE_RSS_TCP_IPV4;
  558         case E1000_RXDADV_RSSTYPE_IPV4:
  559                 return M_HASHTYPE_RSS_IPV4;
  560         case E1000_RXDADV_RSSTYPE_IPV6_TCP:
  561                 return M_HASHTYPE_RSS_TCP_IPV6;
  562         case E1000_RXDADV_RSSTYPE_IPV6_EX:
  563                 return M_HASHTYPE_RSS_IPV6_EX;
  564         case E1000_RXDADV_RSSTYPE_IPV6:
  565                 return M_HASHTYPE_RSS_IPV6;
  566         case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
  567                 return M_HASHTYPE_RSS_TCP_IPV6_EX;
  568         default:
  569                 return M_HASHTYPE_OPAQUE;
  570         }
  571 }

Cache object: 8e02969139ebadc9dd9dc97a59a69f28


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.