The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/netmap/ixgbe_netmap.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   23  * SUCH DAMAGE.
   24  */
   25 
   26 /*
   27  * $FreeBSD: releng/9.1/sys/dev/netmap/ixgbe_netmap.h 235549 2012-05-17 15:02:51Z luigi $
   28  * $Id: ixgbe_netmap.h 10627 2012-02-23 19:37:15Z luigi $
   29  *
   30  * netmap modifications for ixgbe
   31  *
   32  * This file is meant to be a reference on how to implement
   33  * netmap support for a network driver.
   34  * This file contains code but only static or inline functions
   35  * that are used by a single driver. To avoid replication of
   36  * code we just #include it near the beginning of the
   37  * standard driver.
   38  */
   39 
   40 #include <net/netmap.h>
   41 #include <sys/selinfo.h>
   42 /*
   43  * Some drivers may need the following headers. Others
   44  * already include them by default
   45 
   46 #include <vm/vm.h>
   47 #include <vm/pmap.h>
   48 
   49  */
   50 #include <dev/netmap/netmap_kern.h>
   51 
   52 /*
   53  * ix_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
   54  *      During regular operations the CRC is stripped, but on some
   55  *      hardware reception of frames not multiple of 64 is slower,
   56  *      so using crcstrip=0 helps in benchmarks.
   57  *
   58  * ix_rx_miss, ix_rx_miss_bufs:
   59  *      count packets that might be missed due to lost interrupts.
   60  *
   61  * ix_use_dd
   62  *      use the dd bit for completed tx transmissions.
   63  *      This is tricky, much better to use TDH for now.
   64  */
   65 SYSCTL_DECL(_dev_netmap);
   66 static int ix_write_len;
   67 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_write_len,
   68     CTLFLAG_RW, &ix_write_len, 0, "write rx len");
   69 static int ix_rx_miss, ix_rx_miss_bufs, ix_use_dd, ix_crcstrip;
   70 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
   71     CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames");
   72 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_use_dd,
   73     CTLFLAG_RW, &ix_use_dd, 0, "use dd instead of tdh to detect tx frames");
   74 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss,
   75     CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr");
   76 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs,
   77     CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs");
   78 
   79 /*
   80  * wrapper to export locks to the generic netmap code.
   81  */
   82 static void
   83 ixgbe_netmap_lock_wrapper(struct ifnet *_a, int what, u_int queueid)
   84 {
   85         struct adapter *adapter = _a->if_softc;
   86 
   87         ASSERT(queueid < adapter->num_queues);
   88         switch (what) {
   89         case NETMAP_CORE_LOCK:
   90                 IXGBE_CORE_LOCK(adapter);
   91                 break;
   92         case NETMAP_CORE_UNLOCK:
   93                 IXGBE_CORE_UNLOCK(adapter);
   94                 break;
   95         case NETMAP_TX_LOCK:
   96                 IXGBE_TX_LOCK(&adapter->tx_rings[queueid]);
   97                 break;
   98         case NETMAP_TX_UNLOCK:
   99                 IXGBE_TX_UNLOCK(&adapter->tx_rings[queueid]);
  100                 break;
  101         case NETMAP_RX_LOCK:
  102                 IXGBE_RX_LOCK(&adapter->rx_rings[queueid]);
  103                 break;
  104         case NETMAP_RX_UNLOCK:
  105                 IXGBE_RX_UNLOCK(&adapter->rx_rings[queueid]);
  106                 break;
  107         }
  108 }
  109 
  110 
  111 static void
  112 set_crcstrip(struct ixgbe_hw *hw, int onoff)
  113 {
  114         /* crc stripping is set in two places:
  115          * IXGBE_HLREG0 (modified on init_locked and hw reset)
  116          * IXGBE_RDRXCTL (set by the original driver in
  117          *      ixgbe_setup_hw_rsc() called in init_locked.
  118          *      We disable the setting when netmap is compiled in).
  119          * We update the values here, but also in ixgbe.c because
  120          * init_locked sometimes is called outside our control.
  121          */
  122         uint32_t hl, rxc;
  123 
  124         hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  125         rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  126         if (netmap_verbose)
  127                 D("%s read  HLREG 0x%x rxc 0x%x",
  128                         onoff ? "enter" : "exit", hl, rxc);
  129         /* hw requirements ... */
  130         rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
  131         rxc |= IXGBE_RDRXCTL_RSCACKC;
  132         if (onoff && !ix_crcstrip) {
  133                 /* keep the crc. Fast rx */
  134                 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
  135                 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
  136         } else {
  137                 /* reset default mode */
  138                 hl |= IXGBE_HLREG0_RXCRCSTRP;
  139                 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
  140         }
  141         if (netmap_verbose)
  142                 D("%s write HLREG 0x%x rxc 0x%x",
  143                         onoff ? "enter" : "exit", hl, rxc);
  144         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
  145         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
  146 }
  147 
  148 /*
  149  * Register/unregister. We are already under core lock.
  150  * Only called on the first register or the last unregister.
  151  */
  152 static int
  153 ixgbe_netmap_reg(struct ifnet *ifp, int onoff)
  154 {
  155         struct adapter *adapter = ifp->if_softc;
  156         struct netmap_adapter *na = NA(ifp);
  157         int error = 0;
  158 
  159         if (na == NULL)
  160                 return EINVAL; /* no netmap support here */
  161 
  162         ixgbe_disable_intr(adapter);
  163 
  164         /* Tell the stack that the interface is no longer active */
  165         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  166 
  167         set_crcstrip(&adapter->hw, onoff);
  168         if (onoff) { /* enable netmap mode */
  169                 ifp->if_capenable |= IFCAP_NETMAP;
  170 
  171                 /* save if_transmit and replace with our routine */
  172                 na->if_transmit = ifp->if_transmit;
  173                 ifp->if_transmit = netmap_start;
  174 
  175                 /*
  176                  * reinitialize the adapter, now with netmap flag set,
  177                  * so the rings will be set accordingly.
  178                  */
  179                 ixgbe_init_locked(adapter);
  180                 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
  181                         error = ENOMEM;
  182                         goto fail;
  183                 }
  184         } else { /* reset normal mode (explicit request or netmap failed) */
  185 fail:
  186                 /* restore if_transmit */
  187                 ifp->if_transmit = na->if_transmit;
  188                 ifp->if_capenable &= ~IFCAP_NETMAP;
  189                 /* initialize the card, this time in standard mode */
  190                 ixgbe_init_locked(adapter);     /* also enables intr */
  191         }
  192         set_crcstrip(&adapter->hw, onoff);
  193         return (error);
  194 }
  195 
  196 
  197 /*
  198  * Reconcile kernel and user view of the transmit ring.
  199  * This routine might be called frequently so it must be efficient.
  200  *
  201  * Userspace has filled tx slots up to ring->cur (excluded).
  202  * The last unused slot previously known to the kernel was kring->nkr_hwcur,
  203  * and the last interrupt reported kring->nr_hwavail slots available.
  204  *
  205  * This function runs under lock (acquired from the caller or internally).
  206  * It must first update ring->avail to what the kernel knows,
  207  * subtract the newly used slots (ring->cur - kring->nkr_hwcur)
  208  * from both avail and nr_hwavail, and set ring->nkr_hwcur = ring->cur
  209  * issuing a dmamap_sync on all slots.
  210  *
  211  * Since ring comes from userspace, its content must be read only once,
  212  * and validated before being used to update the kernel's structures.
  213  * (this is also true for every use of ring in the kernel).
  214  *
  215  * ring->avail is never used, only checked for bogus values.
  216  *
  217  * do_lock is set iff the function is called from the ioctl handler.
  218  * In this case, grab a lock around the body, and also reclaim transmitted
  219  * buffers irrespective of interrupt mitigation.
  220  */
  221 static int
  222 ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
  223 {
  224         struct adapter *adapter = ifp->if_softc;
  225         struct tx_ring *txr = &adapter->tx_rings[ring_nr];
  226         struct netmap_adapter *na = NA(adapter->ifp);
  227         struct netmap_kring *kring = &na->tx_rings[ring_nr];
  228         struct netmap_ring *ring = kring->ring;
  229         u_int j, k = ring->cur, l, n = 0, lim = kring->nkr_num_slots - 1;
  230 
  231         /*
  232          * ixgbe can generate an interrupt on every tx packet, but it
  233          * seems very expensive, so we interrupt once every half ring,
  234          * or when requested with NS_REPORT
  235          */
  236         int report_frequency = kring->nkr_num_slots >> 1;
  237 
  238         if (k > lim)
  239                 return netmap_ring_reinit(kring);
  240         if (do_lock)
  241                 IXGBE_TX_LOCK(txr);
  242 
  243         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
  244                         BUS_DMASYNC_POSTREAD);
  245 
  246         /*
  247          * Process new packets to send. j is the current index in the
  248          * netmap ring, l is the corresponding index in the NIC ring.
  249          * The two numbers differ because upon a *_init() we reset
  250          * the NIC ring but leave the netmap ring unchanged.
  251          * For the transmit ring, we have
  252          *
  253          *              j = kring->nr_hwcur
  254          *              l = IXGBE_TDT (not tracked in the driver)
  255          * and
  256          *              j == (l + kring->nkr_hwofs) % ring_size
  257          *
  258          * In this driver kring->nkr_hwofs >= 0, but for other
  259          * drivers it might be negative as well.
  260          */
  261         j = kring->nr_hwcur;
  262         if (j != k) {   /* we have new packets to send */
  263                 prefetch(&ring->slot[j]);
  264                 l = netmap_idx_k2n(kring, j); /* NIC index */
  265                 prefetch(&txr->tx_buffers[l]);
  266                 for (n = 0; j != k; n++) {
  267                         /*
  268                          * Collect per-slot info.
  269                          * Note that txbuf and curr are indexed by l.
  270                          *
  271                          * In this driver we collect the buffer address
  272                          * (using the PNMB() macro) because we always
  273                          * need to rewrite it into the NIC ring.
  274                          * Many other drivers preserve the address, so
  275                          * we only need to access it if NS_BUF_CHANGED
  276                          * is set.
  277                          * XXX note, on this device the dmamap* calls are
  278                          * not necessary because tag is 0, however just accessing
  279                          * the per-packet tag kills 1Mpps at 900 MHz.
  280                          */
  281                         struct netmap_slot *slot = &ring->slot[j];
  282                         union ixgbe_adv_tx_desc *curr = &txr->tx_base[l];
  283                         struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[l];
  284                         uint64_t paddr;
  285                         // XXX type for flags and len ?
  286                         int flags = ((slot->flags & NS_REPORT) ||
  287                                 j == 0 || j == report_frequency) ?
  288                                         IXGBE_TXD_CMD_RS : 0;
  289                         u_int len = slot->len;
  290                         void *addr = PNMB(slot, &paddr);
  291 
  292                         j = (j == lim) ? 0 : j + 1;
  293                         l = (l == lim) ? 0 : l + 1;
  294                         prefetch(&ring->slot[j]);
  295                         prefetch(&txr->tx_buffers[l]);
  296 
  297                         /*
  298                          * Quick check for valid addr and len.
  299                          * NMB() returns netmap_buffer_base for invalid
  300                          * buffer indexes (but the address is still a
  301                          * valid one to be used in a ring). slot->len is
  302                          * unsigned so no need to check for negative values.
  303                          */
  304                         if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
  305 ring_reset:
  306                                 if (do_lock)
  307                                         IXGBE_TX_UNLOCK(txr);
  308                                 return netmap_ring_reinit(kring);
  309                         }
  310 
  311                         if (slot->flags & NS_BUF_CHANGED) {
  312                                 /* buffer has changed, unload and reload map */
  313                                 netmap_reload_map(txr->txtag, txbuf->map, addr);
  314                                 slot->flags &= ~NS_BUF_CHANGED;
  315                         }
  316                         slot->flags &= ~NS_REPORT;
  317                         /*
  318                          * Fill the slot in the NIC ring.
  319                          * In this driver we need to rewrite the buffer
  320                          * address in the NIC ring. Other drivers do not
  321                          * need this.
  322                          * Use legacy descriptor, it is faster.
  323                          */
  324                         curr->read.buffer_addr = htole64(paddr);
  325                         curr->read.olinfo_status = 0;
  326                         curr->read.cmd_type_len = htole32(len | flags |
  327                                 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP);
  328 
  329                         /* make sure changes to the buffer are synced */
  330                         bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_PREWRITE);
  331                 }
  332                 kring->nr_hwcur = k; /* the saved ring->cur */
  333                 /* decrease avail by number of packets  sent */
  334                 kring->nr_hwavail -= n;
  335 
  336                 /* synchronize the NIC ring */
  337                 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
  338                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  339                 /* (re)start the transmitter up to slot l (excluded) */
  340                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), l);
  341         }
  342 
  343         /*
  344          * Reclaim buffers for completed transmissions.
  345          * Because this is expensive (we read a NIC register etc.)
  346          * we only do it in specific cases (see below).
  347          * In all cases kring->nr_kflags indicates which slot will be
  348          * checked upon a tx interrupt (nkr_num_slots means none).
  349          */
  350         if (do_lock) {
  351                 j = 1; /* forced reclaim, ignore interrupts */
  352                 kring->nr_kflags = kring->nkr_num_slots;
  353         } else if (kring->nr_hwavail > 0) {
  354                 j = 0; /* buffers still available: no reclaim, ignore intr. */
  355                 kring->nr_kflags = kring->nkr_num_slots;
  356         } else {
  357                 /*
  358                  * no buffers available, locate a slot for which we request
  359                  * ReportStatus (approximately half ring after next_to_clean)
  360                  * and record it in kring->nr_kflags.
  361                  * If the slot has DD set, do the reclaim looking at TDH,
  362                  * otherwise we go to sleep (in netmap_poll()) and will be
  363                  * woken up when slot nr_kflags will be ready.
  364                  */
  365                 struct ixgbe_legacy_tx_desc *txd =
  366                     (struct ixgbe_legacy_tx_desc *)txr->tx_base;
  367 
  368                 j = txr->next_to_clean + kring->nkr_num_slots/2;
  369                 if (j >= kring->nkr_num_slots)
  370                         j -= kring->nkr_num_slots;
  371                 // round to the closest with dd set
  372                 j= (j < kring->nkr_num_slots / 4 || j >= kring->nkr_num_slots*3/4) ?
  373                         0 : report_frequency;
  374                 kring->nr_kflags = j; /* the slot to check */
  375                 j = txd[j].upper.fields.status & IXGBE_TXD_STAT_DD;     // XXX cpu_to_le32 ?
  376         }
  377         if (j) {
  378                 int delta;
  379 
  380                 /*
  381                  * Record completed transmissions.
  382                  * We (re)use the driver's txr->next_to_clean to keep
  383                  * track of the most recently completed transmission.
  384                  *
  385                  * The datasheet discourages the use of TDH to find out the
  386                  * number of sent packets. We should rather check the DD
  387                  * status bit in a packet descriptor. However, we only set
  388                  * the "report status" bit for some descriptors (a kind of
  389                  * interrupt mitigation), so we can only check on those.
  390                  * For the time being we use TDH, as we do it infrequently
  391                  * enough not to pose performance problems.
  392                  */
  393             if (ix_use_dd) {
  394                 struct ixgbe_legacy_tx_desc *txd =
  395                     (struct ixgbe_legacy_tx_desc *)txr->tx_base;
  396 
  397                 l = txr->next_to_clean;
  398                 k = netmap_idx_k2n(kring, kring->nr_hwcur);
  399                 delta = 0;
  400                 while (l != k &&
  401                     txd[l].upper.fields.status & IXGBE_TXD_STAT_DD) {
  402                     delta++;
  403                     l = (l == lim) ? 0 : l + 1;
  404                 }
  405             } else {
  406                 l = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(ring_nr));
  407                 if (l >= kring->nkr_num_slots) { /* XXX can happen */
  408                         D("TDH wrap %d", l);
  409                         l -= kring->nkr_num_slots;
  410                 }
  411                 delta = l - txr->next_to_clean;
  412             }
  413                 if (delta) {
  414                         /* some tx completed, increment avail */
  415                         if (delta < 0)
  416                                 delta += kring->nkr_num_slots;
  417                         txr->next_to_clean = l;
  418                         kring->nr_hwavail += delta;
  419                         if (kring->nr_hwavail > lim)
  420                                 goto ring_reset;
  421                 }
  422         }
  423         /* update avail to what the kernel knows */
  424         ring->avail = kring->nr_hwavail;
  425 
  426         if (do_lock)
  427                 IXGBE_TX_UNLOCK(txr);
  428         return 0;
  429 }
  430 
  431 
  432 /*
  433  * Reconcile kernel and user view of the receive ring.
  434  * Same as for the txsync, this routine must be efficient and
  435  * avoid races in accessing the shared regions.
  436  *
  437  * When called, userspace has read data from slots kring->nr_hwcur
  438  * up to ring->cur (excluded).
  439  *
  440  * The last interrupt reported kring->nr_hwavail slots available
  441  * after kring->nr_hwcur.
  442  * We must subtract the newly consumed slots (cur - nr_hwcur)
  443  * from nr_hwavail, make the descriptors available for the next reads,
  444  * and set kring->nr_hwcur = ring->cur and ring->avail = kring->nr_hwavail.
  445  *
  446  * do_lock has a special meaning: please refer to txsync.
  447  */
  448 static int
  449 ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
  450 {
  451         struct adapter *adapter = ifp->if_softc;
  452         struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
  453         struct netmap_adapter *na = NA(adapter->ifp);
  454         struct netmap_kring *kring = &na->rx_rings[ring_nr];
  455         struct netmap_ring *ring = kring->ring;
  456         u_int j, l, n, lim = kring->nkr_num_slots - 1;
  457         int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
  458         u_int k = ring->cur, resvd = ring->reserved;
  459 
  460         if (k > lim)
  461                 return netmap_ring_reinit(kring);
  462 
  463         if (do_lock)
  464                 IXGBE_RX_LOCK(rxr);
  465         /* XXX check sync modes */
  466         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
  467                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  468 
  469         /*
  470          * First part, import newly received packets into the netmap ring.
  471          *
  472          * j is the index of the next free slot in the netmap ring,
  473          * and l is the index of the next received packet in the NIC ring,
  474          * and they may differ in case if_init() has been called while
  475          * in netmap mode. For the receive ring we have
  476          *
  477          *      j = (kring->nr_hwcur + kring->nr_hwavail) % ring_size
  478          *      l = rxr->next_to_check;
  479          * and
  480          *      j == (l + kring->nkr_hwofs) % ring_size
  481          *
  482          * rxr->next_to_check is set to 0 on a ring reinit
  483          */
  484         if (netmap_no_pendintr || force_update) {
  485                 /* XXX apparently the length field in advanced descriptors
  486                  * does not include the CRC irrespective of the setting
  487                  * of CRCSTRIP. The data sheets say differently.
  488                  * Very strange.
  489                  */
  490                 int crclen = ix_crcstrip ? 0 : 4;
  491                 l = rxr->next_to_check;
  492                 j = netmap_idx_n2k(kring, l);
  493 
  494                 for (n = 0; ; n++) {
  495                         union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l];
  496                         uint32_t staterr = le32toh(curr->wb.upper.status_error);
  497 
  498                         if ((staterr & IXGBE_RXD_STAT_DD) == 0)
  499                                 break;
  500                         ring->slot[j].len = le16toh(curr->wb.upper.length) - crclen;
  501                         if (ix_write_len)
  502                                 D("rx[%d] len %d", j, ring->slot[j].len);
  503                         bus_dmamap_sync(rxr->ptag,
  504                             rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
  505                         j = (j == lim) ? 0 : j + 1;
  506                         l = (l == lim) ? 0 : l + 1;
  507                 }
  508                 if (n) { /* update the state variables */
  509                         if (netmap_no_pendintr && !force_update) {
  510                                 /* diagnostics */
  511                                 ix_rx_miss ++;
  512                                 ix_rx_miss_bufs += n;
  513                         }
  514                         rxr->next_to_check = l;
  515                         kring->nr_hwavail += n;
  516                 }
  517                 kring->nr_kflags &= ~NKR_PENDINTR;
  518         }
  519 
  520         /*
  521          * Skip past packets that userspace has released
  522          * (from kring->nr_hwcur to ring->cur - ring->reserved excluded),
  523          * and make the buffers available for reception.
  524          * As usual j is the index in the netmap ring, l is the index
  525          * in the NIC ring, and j == (l + kring->nkr_hwofs) % ring_size
  526          */
  527         j = kring->nr_hwcur;
  528         if (resvd > 0) {
  529                 if (resvd + ring->avail >= lim + 1) {
  530                         D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
  531                         ring->reserved = resvd = 0; // XXX panic...
  532                 }
  533                 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
  534         }
  535         if (j != k) { /* userspace has released some packets. */
  536                 l = netmap_idx_k2n(kring, j);
  537                 for (n = 0; j != k; n++) {
  538                         /* collect per-slot info, with similar validations
  539                          * and flag handling as in the txsync code.
  540                          *
  541                          * NOTE curr and rxbuf are indexed by l.
  542                          * Also, this driver needs to update the physical
  543                          * address in the NIC ring, but other drivers
  544                          * may not have this requirement.
  545                          */
  546                         struct netmap_slot *slot = &ring->slot[j];
  547                         union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l];
  548                         struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[l];
  549                         uint64_t paddr;
  550                         void *addr = PNMB(slot, &paddr);
  551 
  552                         if (addr == netmap_buffer_base) /* bad buf */
  553                                 goto ring_reset;
  554 
  555                         if (slot->flags & NS_BUF_CHANGED) {
  556                                 netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
  557                                 slot->flags &= ~NS_BUF_CHANGED;
  558                         }
  559                         curr->wb.upper.status_error = 0;
  560                         curr->read.pkt_addr = htole64(paddr);
  561                         bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
  562                             BUS_DMASYNC_PREREAD);
  563                         j = (j == lim) ? 0 : j + 1;
  564                         l = (l == lim) ? 0 : l + 1;
  565                 }
  566                 kring->nr_hwavail -= n;
  567                 kring->nr_hwcur = k;
  568                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
  569                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  570                 /* IMPORTANT: we must leave one free slot in the ring,
  571                  * so move l back by one unit
  572                  */
  573                 l = (l == 0) ? lim : l - 1;
  574                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), l);
  575         }
  576         /* tell userspace that there are new packets */
  577         ring->avail = kring->nr_hwavail - resvd;
  578 
  579         if (do_lock)
  580                 IXGBE_RX_UNLOCK(rxr);
  581         return 0;
  582 
  583 ring_reset:
  584         if (do_lock)
  585                 IXGBE_RX_UNLOCK(rxr);
  586         return netmap_ring_reinit(kring);
  587 }
  588 
  589 
  590 /*
  591  * The attach routine, called near the end of ixgbe_attach(),
  592  * fills the parameters for netmap_attach() and calls it.
  593  * It cannot fail, in the worst case (such as no memory)
  594  * netmap mode will be disabled and the driver will only
  595  * operate in standard mode.
  596  */
  597 static void
  598 ixgbe_netmap_attach(struct adapter *adapter)
  599 {
  600         struct netmap_adapter na;
  601 
  602         bzero(&na, sizeof(na));
  603 
  604         na.ifp = adapter->ifp;
  605         na.separate_locks = 1;  /* this card has separate rx/tx locks */
  606         na.num_tx_desc = adapter->num_tx_desc;
  607         na.num_rx_desc = adapter->num_rx_desc;
  608         na.nm_txsync = ixgbe_netmap_txsync;
  609         na.nm_rxsync = ixgbe_netmap_rxsync;
  610         na.nm_lock = ixgbe_netmap_lock_wrapper;
  611         na.nm_register = ixgbe_netmap_reg;
  612         netmap_attach(&na, adapter->num_queues);
  613 }       
  614 
  615 /* end of file */

Cache object: 30ca7952bcd455e05cedc140fb247d3a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.