The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ena/ena_netmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  *
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #ifdef DEV_NETMAP
   34 
   35 #include "ena.h"
   36 #include "ena_netmap.h"
   37 
   38 #define ENA_NETMAP_MORE_FRAMES          1
   39 #define ENA_NETMAP_NO_MORE_FRAMES       0
   40 #define ENA_MAX_FRAMES                  16384
   41 
   42 struct ena_netmap_ctx {
   43         struct netmap_kring *kring;
   44         struct ena_adapter *adapter;
   45         struct netmap_adapter *na;
   46         struct netmap_slot *slots;
   47         struct ena_ring *ring;
   48         struct ena_com_io_cq *io_cq;
   49         struct ena_com_io_sq *io_sq;
   50         u_int nm_i;
   51         uint16_t nt;
   52         uint16_t lim;
   53 };
   54 
   55 /* Netmap callbacks */
   56 static int ena_netmap_reg(struct netmap_adapter *, int);
   57 static int ena_netmap_txsync(struct netmap_kring *, int);
   58 static int ena_netmap_rxsync(struct netmap_kring *, int);
   59 
   60 /* Helper functions */
   61 static int ena_netmap_tx_frames(struct ena_netmap_ctx *);
   62 static int ena_netmap_tx_frame(struct ena_netmap_ctx *);
   63 static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *);
   64 static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int,
   65     uint16_t);
   66 static int ena_netmap_copy_data(struct netmap_adapter *, struct netmap_slot *,
   67     u_int, uint16_t, uint16_t, void *);
   68 static int ena_netmap_map_single_slot(struct netmap_adapter *,
   69     struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *);
   70 static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *,
   71     struct ena_tx_buffer *, void **, uint16_t *, uint16_t *);
   72 static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *,
   73     struct ena_tx_buffer *);
   74 static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *);
   75 static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *, uint16_t);
   76 static inline int validate_tx_req_id(struct ena_ring *, uint16_t);
   77 static int ena_netmap_rx_frames(struct ena_netmap_ctx *);
   78 static int ena_netmap_rx_frame(struct ena_netmap_ctx *);
   79 static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, int *);
   80 static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *);
   81 static void ena_netmap_fill_ctx(struct netmap_kring *, struct ena_netmap_ctx *,
   82     uint16_t);
   83 
   84 int
   85 ena_netmap_attach(struct ena_adapter *adapter)
   86 {
   87         struct netmap_adapter na;
   88 
   89         ena_log_nm(adapter->pdev, INFO, "netmap attach\n");
   90 
   91         bzero(&na, sizeof(na));
   92         na.na_flags = NAF_MOREFRAG;
   93         na.ifp = adapter->ifp;
   94         na.num_tx_desc = adapter->requested_tx_ring_size;
   95         na.num_rx_desc = adapter->requested_rx_ring_size;
   96         na.num_tx_rings = adapter->num_io_queues;
   97         na.num_rx_rings = adapter->num_io_queues;
   98         na.rx_buf_maxsize = adapter->buf_ring_size;
   99         na.nm_txsync = ena_netmap_txsync;
  100         na.nm_rxsync = ena_netmap_rxsync;
  101         na.nm_register = ena_netmap_reg;
  102 
  103         return (netmap_attach(&na));
  104 }
  105 
  106 int
  107 ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
  108     struct ena_rx_buffer *rx_info)
  109 {
  110         struct netmap_adapter *na = NA(adapter->ifp);
  111         struct netmap_kring *kring;
  112         struct netmap_ring *ring;
  113         struct netmap_slot *slot;
  114         void *addr;
  115         uint64_t paddr;
  116         int nm_i, qid, head, lim, rc;
  117 
  118         /* if previously allocated frag is not used */
  119         if (unlikely(rx_info->netmap_buf_idx != 0))
  120                 return (0);
  121 
  122         qid = rx_ring->qid;
  123         kring = na->rx_rings[qid];
  124         nm_i = kring->nr_hwcur;
  125         head = kring->rhead;
  126 
  127         ena_log_nm(adapter->pdev, DBG,
  128             "nr_hwcur: %d, nr_hwtail: %d, rhead: %d, rcur: %d, rtail: %d\n",
  129             kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur,
  130             kring->rtail);
  131 
  132         if ((nm_i == head) && rx_ring->initialized) {
  133                 ena_log_nm(adapter->pdev, ERR,
  134                     "No free slots in netmap ring\n");
  135                 return (ENOMEM);
  136         }
  137 
  138         ring = kring->ring;
  139         if (ring == NULL) {
  140                 ena_log_nm(adapter->pdev, ERR, "Rx ring %d is NULL\n", qid);
  141                 return (EFAULT);
  142         }
  143         slot = &ring->slot[nm_i];
  144 
  145         addr = PNMB(na, slot, &paddr);
  146         if (addr == NETMAP_BUF_BASE(na)) {
  147                 ena_log_nm(adapter->pdev, ERR, "Bad buff in slot\n");
  148                 return (EFAULT);
  149         }
  150 
  151         rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr);
  152         if (rc != 0) {
  153                 ena_log_nm(adapter->pdev, WARN, "DMA mapping error\n");
  154                 return (rc);
  155         }
  156         bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
  157 
  158         rx_info->ena_buf.paddr = paddr;
  159         rx_info->ena_buf.len = ring->nr_buf_size;
  160         rx_info->mbuf = NULL;
  161         rx_info->netmap_buf_idx = slot->buf_idx;
  162 
  163         slot->buf_idx = 0;
  164 
  165         lim = kring->nkr_num_slots - 1;
  166         kring->nr_hwcur = nm_next(nm_i, lim);
  167 
  168         return (0);
  169 }
  170 
  171 void
  172 ena_netmap_free_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
  173     struct ena_rx_buffer *rx_info)
  174 {
  175         struct netmap_adapter *na;
  176         struct netmap_kring *kring;
  177         struct netmap_slot *slot;
  178         int nm_i, qid, lim;
  179 
  180         na = NA(adapter->ifp);
  181         if (na == NULL) {
  182                 ena_log_nm(adapter->pdev, ERR, "netmap adapter is NULL\n");
  183                 return;
  184         }
  185 
  186         if (na->rx_rings == NULL) {
  187                 ena_log_nm(adapter->pdev, ERR, "netmap rings are NULL\n");
  188                 return;
  189         }
  190 
  191         qid = rx_ring->qid;
  192         kring = na->rx_rings[qid];
  193         if (kring == NULL) {
  194                 ena_log_nm(adapter->pdev, ERR,
  195                     "netmap kernel ring %d is NULL\n", qid);
  196                 return;
  197         }
  198 
  199         lim = kring->nkr_num_slots - 1;
  200         nm_i = nm_prev(kring->nr_hwcur, lim);
  201 
  202         if (kring->nr_mode != NKR_NETMAP_ON)
  203                 return;
  204 
  205         bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
  206             BUS_DMASYNC_POSTREAD);
  207         netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map);
  208 
  209         KASSERT(kring->ring != NULL, ("Netmap Rx ring is NULL\n"));
  210 
  211         slot = &kring->ring->slot[nm_i];
  212 
  213         ENA_WARN(slot->buf_idx != 0, adapter->ena_dev, "Overwrite slot buf\n");
  214         slot->buf_idx = rx_info->netmap_buf_idx;
  215         slot->flags = NS_BUF_CHANGED;
  216 
  217         rx_info->netmap_buf_idx = 0;
  218         kring->nr_hwcur = nm_i;
  219 }
  220 
  221 static bool
  222 ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x)
  223 {
  224         struct netmap_adapter *na;
  225         struct netmap_kring *kring;
  226 
  227         if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
  228                 na = NA(adapter->ifp);
  229                 kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid];
  230                 if (kring->nr_mode == NKR_NETMAP_ON)
  231                         return true;
  232         }
  233         return false;
  234 }
  235 
  236 bool
  237 ena_tx_ring_in_netmap(struct ena_adapter *adapter, int qid)
  238 {
  239         return ena_ring_in_netmap(adapter, qid, NR_TX);
  240 }
  241 
  242 bool
  243 ena_rx_ring_in_netmap(struct ena_adapter *adapter, int qid)
  244 {
  245         return ena_ring_in_netmap(adapter, qid, NR_RX);
  246 }
  247 
  248 static void
  249 ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x)
  250 {
  251         if (!ena_ring_in_netmap(adapter, qid, x))
  252                 return;
  253 
  254         netmap_reset(NA(adapter->ifp), x, qid, 0);
  255         ena_log_nm(adapter->pdev, INFO, "%s ring %d is in netmap mode\n",
  256             (x == NR_TX) ? "Tx" : "Rx", qid);
  257 }
  258 
  259 void
  260 ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid)
  261 {
  262         ena_netmap_reset_ring(adapter, qid, NR_RX);
  263 }
  264 
  265 void
  266 ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid)
  267 {
  268         ena_netmap_reset_ring(adapter, qid, NR_TX);
  269 }
  270 
  271 static int
  272 ena_netmap_reg(struct netmap_adapter *na, int onoff)
  273 {
  274         if_t ifp = na->ifp;
  275         struct ena_adapter *adapter = if_getsoftc(ifp);
  276         device_t pdev = adapter->pdev;
  277         struct netmap_kring *kring;
  278         enum txrx t;
  279         int rc, i;
  280 
  281         ENA_LOCK_LOCK();
  282         ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
  283         ena_down(adapter);
  284 
  285         if (onoff) {
  286                 ena_log_nm(pdev, INFO, "netmap on\n");
  287                 for_rx_tx(t) {
  288                         for (i = 0; i <= nma_get_nrings(na, t); i++) {
  289                                 kring = NMR(na, t)[i];
  290                                 if (nm_kring_pending_on(kring)) {
  291                                         kring->nr_mode = NKR_NETMAP_ON;
  292                                 }
  293                         }
  294                 }
  295                 nm_set_native_flags(na);
  296         } else {
  297                 ena_log_nm(pdev, INFO, "netmap off\n");
  298                 nm_clear_native_flags(na);
  299                 for_rx_tx(t) {
  300                         for (i = 0; i <= nma_get_nrings(na, t); i++) {
  301                                 kring = NMR(na, t)[i];
  302                                 if (nm_kring_pending_off(kring)) {
  303                                         kring->nr_mode = NKR_NETMAP_OFF;
  304                                 }
  305                         }
  306                 }
  307         }
  308 
  309         rc = ena_up(adapter);
  310         if (rc != 0) {
  311                 ena_log_nm(pdev, WARN, "ena_up failed with rc=%d\n", rc);
  312                 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
  313                 nm_clear_native_flags(na);
  314                 ena_destroy_device(adapter, false);
  315                 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
  316                 rc = ena_restore_device(adapter);
  317         }
  318         ENA_LOCK_UNLOCK();
  319 
  320         return (rc);
  321 }
  322 
  323 static int
  324 ena_netmap_txsync(struct netmap_kring *kring, int flags)
  325 {
  326         struct ena_netmap_ctx ctx;
  327         int rc = 0;
  328 
  329         ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id));
  330         ctx.ring = &ctx.adapter->tx_ring[kring->ring_id];
  331 
  332         ENA_RING_MTX_LOCK(ctx.ring);
  333         if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter)))
  334                 goto txsync_end;
  335 
  336         if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
  337                 goto txsync_end;
  338 
  339         rc = ena_netmap_tx_frames(&ctx);
  340         ena_netmap_tx_cleanup(&ctx);
  341 
  342 txsync_end:
  343         ENA_RING_MTX_UNLOCK(ctx.ring);
  344         return (rc);
  345 }
  346 
  347 static int
  348 ena_netmap_tx_frames(struct ena_netmap_ctx *ctx)
  349 {
  350         struct ena_ring *tx_ring = ctx->ring;
  351         int rc = 0;
  352 
  353         ctx->nm_i = ctx->kring->nr_hwcur;
  354         ctx->nt = ctx->ring->next_to_use;
  355 
  356         __builtin_prefetch(&ctx->slots[ctx->nm_i]);
  357 
  358         while (ctx->nm_i != ctx->kring->rhead) {
  359                 if ((rc = ena_netmap_tx_frame(ctx)) != 0) {
  360                         /*
  361                          * When there is no empty space in Tx ring, error is
  362                          * still being returned. It should not be passed to the
  363                          * netmap, as application knows current ring state from
  364                          * netmap ring pointers. Returning error there could
  365                          * cause application to exit, but the Tx ring is
  366                          * commonly being full.
  367                          */
  368                         if (rc == ENA_COM_NO_MEM)
  369                                 rc = 0;
  370                         break;
  371                 }
  372                 tx_ring->acum_pkts++;
  373         }
  374 
  375         /* If any packet was sent... */
  376         if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) {
  377                 /* ...send the doorbell to the device. */
  378                 ena_ring_tx_doorbell(tx_ring);
  379 
  380                 ctx->ring->next_to_use = ctx->nt;
  381                 ctx->kring->nr_hwcur = ctx->nm_i;
  382         }
  383 
  384         return (rc);
  385 }
  386 
  387 static int
  388 ena_netmap_tx_frame(struct ena_netmap_ctx *ctx)
  389 {
  390         struct ena_com_tx_ctx ena_tx_ctx;
  391         struct ena_adapter *adapter;
  392         struct ena_ring *tx_ring;
  393         struct ena_tx_buffer *tx_info;
  394         uint16_t req_id;
  395         uint16_t header_len;
  396         uint16_t packet_len;
  397         int nb_hw_desc;
  398         int rc;
  399         void *push_hdr;
  400 
  401         adapter = ctx->adapter;
  402         if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) {
  403                 ena_log_nm(adapter->pdev, WARN, "Too many slots per packet\n");
  404                 return (EINVAL);
  405         }
  406 
  407         tx_ring = ctx->ring;
  408 
  409         req_id = tx_ring->free_tx_ids[ctx->nt];
  410         tx_info = &tx_ring->tx_buffer_info[req_id];
  411         tx_info->num_of_bufs = 0;
  412         tx_info->nm_info.sockets_used = 0;
  413 
  414         rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len,
  415             &packet_len);
  416         if (unlikely(rc != 0)) {
  417                 ena_log_nm(adapter->pdev, ERR, "Failed to map Tx slot\n");
  418                 return (rc);
  419         }
  420 
  421         bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx));
  422         ena_tx_ctx.ena_bufs = tx_info->bufs;
  423         ena_tx_ctx.push_header = push_hdr;
  424         ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
  425         ena_tx_ctx.req_id = req_id;
  426         ena_tx_ctx.header_len = header_len;
  427         ena_tx_ctx.meta_valid = adapter->disable_meta_caching;
  428 
  429         /* There are no any offloads, as the netmap doesn't support them */
  430 
  431         if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
  432             ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx))
  433                 ena_ring_tx_doorbell(tx_ring);
  434 
  435         rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc);
  436         if (unlikely(rc != 0)) {
  437                 if (likely(rc == ENA_COM_NO_MEM)) {
  438                         ena_log_nm(adapter->pdev, DBG,
  439                             "Tx ring[%d] is out of space\n", tx_ring->que->id);
  440                 } else {
  441                         ena_log_nm(adapter->pdev, ERR,
  442                             "Failed to prepare Tx bufs\n");
  443                         ena_trigger_reset(adapter,
  444                             ENA_REGS_RESET_DRIVER_INVALID_STATE);
  445                 }
  446                 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
  447 
  448                 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
  449                 return (rc);
  450         }
  451 
  452         counter_enter();
  453         counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
  454         counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len);
  455         counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
  456         counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len);
  457         counter_exit();
  458 
  459         tx_info->tx_descs = nb_hw_desc;
  460 
  461         ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
  462 
  463         for (unsigned int i = 0; i < tx_info->num_of_bufs; i++)
  464                 bus_dmamap_sync(adapter->tx_buf_tag,
  465                     tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE);
  466 
  467         return (0);
  468 }
  469 
  470 static inline uint16_t
  471 ena_netmap_count_slots(struct ena_netmap_ctx *ctx)
  472 {
  473         uint16_t slots = 1;
  474         uint16_t nm = ctx->nm_i;
  475 
  476         while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) {
  477                 slots++;
  478                 nm = nm_next(nm, ctx->lim);
  479         }
  480 
  481         return slots;
  482 }
  483 
  484 static inline uint16_t
  485 ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index,
  486     uint16_t limit)
  487 {
  488         struct netmap_slot *nm_slot;
  489         uint16_t packet_size = 0;
  490 
  491         do {
  492                 nm_slot = &slots[slot_index];
  493                 packet_size += nm_slot->len;
  494                 slot_index = nm_next(slot_index, limit);
  495         } while ((nm_slot->flags & NS_MOREFRAG) != 0);
  496 
  497         return packet_size;
  498 }
  499 
  500 static int
  501 ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots,
  502     u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination)
  503 {
  504         struct netmap_slot *nm_slot;
  505         void *slot_vaddr;
  506         uint16_t data_amount;
  507 
  508         do {
  509                 nm_slot = &slots[slot_index];
  510                 slot_vaddr = NMB(na, nm_slot);
  511                 if (unlikely(slot_vaddr == NULL))
  512                         return (EINVAL);
  513 
  514                 data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len);
  515                 memcpy(destination, slot_vaddr, data_amount);
  516                 bytes_to_copy -= data_amount;
  517 
  518                 slot_index = nm_next(slot_index, limit);
  519         } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0);
  520 
  521         return (0);
  522 }
  523 
  524 static int
  525 ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot,
  526     bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr)
  527 {
  528         device_t pdev;
  529         int rc;
  530 
  531         pdev = ((struct ena_adapter *)if_getsoftc(na->ifp))->pdev;
  532 
  533         *vaddr = PNMB(na, slot, paddr);
  534         if (unlikely(vaddr == NULL)) {
  535                 ena_log_nm(pdev, ERR, "Slot address is NULL\n");
  536                 return (EINVAL);
  537         }
  538 
  539         rc = netmap_load_map(na, dmatag, dmamap, *vaddr);
  540         if (unlikely(rc != 0)) {
  541                 ena_log_nm(pdev, ERR, "Failed to map slot %d for DMA\n",
  542                     slot->buf_idx);
  543                 return (EINVAL);
  544         }
  545 
  546         return (0);
  547 }
  548 
  549 static int
  550 ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
  551     struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len,
  552     uint16_t *packet_len)
  553 {
  554         struct netmap_slot *slot;
  555         struct ena_com_buf *ena_buf;
  556         struct ena_adapter *adapter;
  557         struct ena_ring *tx_ring;
  558         struct ena_netmap_tx_info *nm_info;
  559         bus_dmamap_t *nm_maps;
  560         void *vaddr;
  561         uint64_t paddr;
  562         uint32_t *nm_buf_idx;
  563         uint32_t slot_head_len;
  564         uint32_t frag_len;
  565         uint32_t remaining_len;
  566         uint16_t push_len;
  567         uint16_t delta;
  568         int rc;
  569 
  570         adapter = ctx->adapter;
  571         tx_ring = ctx->ring;
  572         ena_buf = tx_info->bufs;
  573         nm_info = &tx_info->nm_info;
  574         nm_maps = nm_info->map_seg;
  575         nm_buf_idx = nm_info->socket_buf_idx;
  576         slot = &ctx->slots[ctx->nm_i];
  577 
  578         slot_head_len = slot->len;
  579         *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim);
  580         remaining_len = *packet_len;
  581         delta = 0;
  582 
  583         __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
  584         if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  585                 /*
  586                  * When the device is in LLQ mode, the driver will copy
  587                  * the header into the device memory space.
  588                  * The ena_com layer assumes that the header is in a linear
  589                  * memory space.
  590                  * This assumption might be wrong since part of the header
  591                  * can be in the fragmented buffers.
  592                  * First, check if header fits in the first slot. If not, copy
  593                  * it to separate buffer that will be holding linearized data.
  594                  */
  595                 push_len = min_t(uint32_t, *packet_len,
  596                     tx_ring->tx_max_header_size);
  597                 *header_len = push_len;
  598                 /* If header is in linear space, just point to socket's data. */
  599                 if (likely(push_len <= slot_head_len)) {
  600                         *push_hdr = NMB(ctx->na, slot);
  601                         if (unlikely(push_hdr == NULL)) {
  602                                 ena_log_nm(adapter->pdev, ERR,
  603                                     "Slot vaddress is NULL\n");
  604                                 return (EINVAL);
  605                         }
  606                 /*
  607                  * Otherwise, copy whole portion of header from multiple
  608                  * slots to intermediate buffer.
  609                  */
  610                 } else {
  611                         rc = ena_netmap_copy_data(ctx->na, ctx->slots,
  612                             ctx->nm_i, ctx->lim, push_len,
  613                             tx_ring->push_buf_intermediate_buf);
  614                         if (unlikely(rc)) {
  615                                 ena_log_nm(adapter->pdev, ERR,
  616                                     "Failed to copy data from slots to push_buf\n");
  617                                 return (EINVAL);
  618                         }
  619 
  620                         *push_hdr = tx_ring->push_buf_intermediate_buf;
  621                         counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
  622 
  623                         delta = push_len - slot_head_len;
  624                 }
  625 
  626                 ena_log_nm(adapter->pdev, DBG,
  627                     "slot: %d header_buf->vaddr: %p push_len: %d\n",
  628                     slot->buf_idx, *push_hdr, push_len);
  629 
  630                 /*
  631                  * If header was in linear memory space, map for the dma rest of
  632                  * the data in the first mbuf of the mbuf chain.
  633                  */
  634                 if (slot_head_len > push_len) {
  635                         rc = ena_netmap_map_single_slot(ctx->na, slot,
  636                             adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
  637                         if (unlikely(rc != 0)) {
  638                                 ena_log_nm(adapter->pdev, ERR,
  639                                     "DMA mapping error\n");
  640                                 return (rc);
  641                         }
  642                         nm_maps++;
  643 
  644                         ena_buf->paddr = paddr + push_len;
  645                         ena_buf->len = slot->len - push_len;
  646                         ena_buf++;
  647 
  648                         tx_info->num_of_bufs++;
  649                 }
  650 
  651                 remaining_len -= slot->len;
  652 
  653                 /* Save buf idx before advancing */
  654                 *nm_buf_idx = slot->buf_idx;
  655                 nm_buf_idx++;
  656                 slot->buf_idx = 0;
  657 
  658                 /* Advance to the next socket */
  659                 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
  660                 slot = &ctx->slots[ctx->nm_i];
  661                 nm_info->sockets_used++;
  662 
  663                 /*
  664                  * If header is in non linear space (delta > 0), then skip mbufs
  665                  * containing header and map the last one containing both header
  666                  * and the packet data.
  667                  * The first segment is already counted in.
  668                  */
  669                 while (delta > 0) {
  670                         __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
  671                         frag_len = slot->len;
  672 
  673                         /*
  674                          * If whole segment contains header just move to the
  675                          * next one and reduce delta.
  676                          */
  677                         if (unlikely(delta >= frag_len)) {
  678                                 delta -= frag_len;
  679                         } else {
  680                                 /*
  681                                  * Map the data and then assign it with the
  682                                  * offsets
  683                                  */
  684                                 rc = ena_netmap_map_single_slot(ctx->na, slot,
  685                                     adapter->tx_buf_tag, *nm_maps, &vaddr,
  686                                     &paddr);
  687                                 if (unlikely(rc != 0)) {
  688                                         ena_log_nm(adapter->pdev, ERR,
  689                                             "DMA mapping error\n");
  690                                         goto error_map;
  691                                 }
  692                                 nm_maps++;
  693 
  694                                 ena_buf->paddr = paddr + delta;
  695                                 ena_buf->len = slot->len - delta;
  696                                 ena_buf++;
  697 
  698                                 tx_info->num_of_bufs++;
  699                                 delta = 0;
  700                         }
  701 
  702                         remaining_len -= slot->len;
  703 
  704                         /* Save buf idx before advancing */
  705                         *nm_buf_idx = slot->buf_idx;
  706                         nm_buf_idx++;
  707                         slot->buf_idx = 0;
  708 
  709                         /* Advance to the next socket */
  710                         ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
  711                         slot = &ctx->slots[ctx->nm_i];
  712                         nm_info->sockets_used++;
  713                 }
  714         } else {
  715                 *push_hdr = NULL;
  716                 /*
  717                  * header_len is just a hint for the device. Because netmap is
  718                  * not giving us any information about packet header length and
  719                  * it is not guaranteed that all packet headers will be in the
  720                  * 1st slot, setting header_len to 0 is making the device ignore
  721                  * this value and resolve header on it's own.
  722                  */
  723                 *header_len = 0;
  724         }
  725 
  726         /* Map all remaining data (regular routine for non-LLQ mode) */
  727         while (remaining_len > 0) {
  728                 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
  729 
  730                 rc = ena_netmap_map_single_slot(ctx->na, slot,
  731                     adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
  732                 if (unlikely(rc != 0)) {
  733                         ena_log_nm(adapter->pdev, ERR, "DMA mapping error\n");
  734                         goto error_map;
  735                 }
  736                 nm_maps++;
  737 
  738                 ena_buf->paddr = paddr;
  739                 ena_buf->len = slot->len;
  740                 ena_buf++;
  741 
  742                 tx_info->num_of_bufs++;
  743 
  744                 remaining_len -= slot->len;
  745 
  746                 /* Save buf idx before advancing */
  747                 *nm_buf_idx = slot->buf_idx;
  748                 nm_buf_idx++;
  749                 slot->buf_idx = 0;
  750 
  751                 /* Advance to the next socket */
  752                 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
  753                 slot = &ctx->slots[ctx->nm_i];
  754                 nm_info->sockets_used++;
  755         }
  756 
  757         return (0);
  758 
  759 error_map:
  760         ena_netmap_unmap_last_socket_chain(ctx, tx_info);
  761 
  762         return (rc);
  763 }
  764 
  765 static void
  766 ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
  767     struct ena_tx_buffer *tx_info)
  768 {
  769         struct ena_netmap_tx_info *nm_info;
  770         int n;
  771 
  772         nm_info = &tx_info->nm_info;
  773 
  774         /**
  775          * As the used sockets must not be equal to the buffers used in the LLQ
  776          * mode, they must be treated separately.
  777          * First, unmap the DMA maps.
  778          */
  779         n = tx_info->num_of_bufs;
  780         while (n--) {
  781                 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
  782                     nm_info->map_seg[n]);
  783         }
  784         tx_info->num_of_bufs = 0;
  785 
  786         /* Next, retain the sockets back to the userspace */
  787         n = nm_info->sockets_used;
  788         while (n--) {
  789                 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
  790                 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
  791                 nm_info->socket_buf_idx[n] = 0;
  792                 ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
  793         }
  794         nm_info->sockets_used = 0;
  795 }
  796 
  797 static void
  798 ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx)
  799 {
  800         uint16_t req_id;
  801         uint16_t total_tx_descs = 0;
  802 
  803         ctx->nm_i = ctx->kring->nr_hwtail;
  804         ctx->nt = ctx->ring->next_to_clean;
  805 
  806         /* Reclaim buffers for completed transmissions */
  807         while (ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id) >= 0) {
  808                 if (validate_tx_req_id(ctx->ring, req_id) != 0)
  809                         break;
  810                 total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id);
  811         }
  812 
  813         ctx->kring->nr_hwtail = ctx->nm_i;
  814 
  815         if (total_tx_descs > 0) {
  816                 /* acknowledge completion of sent packets */
  817                 ctx->ring->next_to_clean = ctx->nt;
  818                 ena_com_comp_ack(ctx->ring->ena_com_io_sq, total_tx_descs);
  819                 ena_com_update_dev_comp_head(ctx->ring->ena_com_io_cq);
  820         }
  821 }
  822 
  823 static uint16_t
  824 ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id)
  825 {
  826         struct ena_tx_buffer *tx_info;
  827         struct ena_netmap_tx_info *nm_info;
  828         int n;
  829 
  830         tx_info = &ctx->ring->tx_buffer_info[req_id];
  831         nm_info = &tx_info->nm_info;
  832 
  833         /**
  834          * As the used sockets must not be equal to the buffers used in the LLQ
  835          * mode, they must be treated separately.
  836          * First, unmap the DMA maps.
  837          */
  838         n = tx_info->num_of_bufs;
  839         for (n = 0; n < tx_info->num_of_bufs; n++) {
  840                 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
  841                     nm_info->map_seg[n]);
  842         }
  843         tx_info->num_of_bufs = 0;
  844 
  845         /* Next, retain the sockets back to the userspace */
  846         for (n = 0; n < nm_info->sockets_used; n++) {
  847                 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
  848                 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0,
  849                     ctx->adapter->ena_dev, "Tx idx is not 0.\n");
  850                 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
  851                 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
  852                 nm_info->socket_buf_idx[n] = 0;
  853         }
  854         nm_info->sockets_used = 0;
  855 
  856         ctx->ring->free_tx_ids[ctx->nt] = req_id;
  857         ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim);
  858 
  859         return tx_info->tx_descs;
  860 }
  861 
  862 static inline int
  863 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
  864 {
  865         struct ena_adapter *adapter = tx_ring->adapter;
  866 
  867         if (likely(req_id < tx_ring->ring_size))
  868                 return (0);
  869 
  870         ena_log_nm(adapter->pdev, WARN, "Invalid req_id %hu in qid %hu\n",
  871             req_id, tx_ring->qid);
  872         counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
  873 
  874         ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
  875 
  876         return (EFAULT);
  877 }
  878 
  879 static int
  880 ena_netmap_rxsync(struct netmap_kring *kring, int flags)
  881 {
  882         struct ena_netmap_ctx ctx;
  883         int rc;
  884 
  885         ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id));
  886         ctx.ring = &ctx.adapter->rx_ring[kring->ring_id];
  887 
  888         if (ctx.kring->rhead > ctx.lim) {
  889                 /* Probably not needed to release slots from RX ring. */
  890                 return (netmap_ring_reinit(ctx.kring));
  891         }
  892 
  893         if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0))
  894                 return (0);
  895 
  896         if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
  897                 return (0);
  898 
  899         if ((rc = ena_netmap_rx_frames(&ctx)) != 0)
  900                 return (rc);
  901 
  902         ena_netmap_rx_cleanup(&ctx);
  903 
  904         return (0);
  905 }
  906 
  907 static inline int
  908 ena_netmap_rx_frames(struct ena_netmap_ctx *ctx)
  909 {
  910         int rc = 0;
  911         int frames_counter = 0;
  912 
  913         ctx->nt = ctx->ring->next_to_clean;
  914         ctx->nm_i = ctx->kring->nr_hwtail;
  915 
  916         while ((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) {
  917                 frames_counter++;
  918                 /* In case of multiple frames, it is not an error. */
  919                 rc = 0;
  920                 if (frames_counter > ENA_MAX_FRAMES) {
  921                         ena_log_nm(ctx->adapter->pdev, ERR,
  922                             "Driver is stuck in the Rx loop\n");
  923                         break;
  924                 }
  925         };
  926 
  927         ctx->kring->nr_hwtail = ctx->nm_i;
  928         ctx->kring->nr_kflags &= ~NKR_PENDINTR;
  929         ctx->ring->next_to_clean = ctx->nt;
  930 
  931         return (rc);
  932 }
  933 
  934 static inline int
  935 ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
  936 {
  937         struct ena_com_rx_ctx ena_rx_ctx;
  938         enum ena_regs_reset_reason_types reset_reason;
  939         int rc, len = 0;
  940         uint16_t buf, nm;
  941 
  942         ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs;
  943         ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size;
  944         bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
  945             ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
  946 
  947         rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx);
  948         if (unlikely(rc != 0)) {
  949                 ena_log_nm(ctx->adapter->pdev, ERR,
  950                     "Failed to read pkt from the device with error: %d\n", rc);
  951                 if (rc == ENA_COM_NO_SPACE) {
  952                         counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1);
  953                         reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
  954                 } else {
  955                         counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1);
  956                         reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
  957                 }
  958                 ena_trigger_reset(ctx->adapter, reset_reason);
  959                 return (rc);
  960         }
  961         if (unlikely(ena_rx_ctx.descs == 0))
  962                 return (ENA_NETMAP_NO_MORE_FRAMES);
  963 
  964         ena_log_nm(ctx->adapter->pdev, DBG,
  965             "Rx: q %d got packet from ena. descs #:"
  966             " %d l3 proto %d l4 proto %d hash: %x\n",
  967             ctx->ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
  968             ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
  969 
  970         for (buf = 0; buf < ena_rx_ctx.descs; buf++)
  971                 if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0)
  972                         break;
  973         /*
  974          * ena_netmap_rx_load_desc doesn't know the number of descriptors.
  975          * It just set flag NS_MOREFRAG to all slots, then here flag of
  976          * last slot is cleared.
  977          */
  978         ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED;
  979 
  980         if (rc != 0) {
  981                 goto rx_clear_desc;
  982         }
  983 
  984         bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
  985             ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD);
  986 
  987         counter_enter();
  988         counter_u64_add_protected(ctx->ring->rx_stats.bytes, len);
  989         counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len);
  990         counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1);
  991         counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1);
  992         counter_exit();
  993 
  994         return (ENA_NETMAP_MORE_FRAMES);
  995 
  996 rx_clear_desc:
  997         nm = ctx->nm_i;
  998 
  999         /* Remove failed packet from ring */
 1000         while (buf--) {
 1001                 ctx->slots[nm].flags = 0;
 1002                 ctx->slots[nm].len = 0;
 1003                 nm = nm_prev(nm, ctx->lim);
 1004         }
 1005 
 1006         return (rc);
 1007 }
 1008 
 1009 static inline int
 1010 ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len)
 1011 {
 1012         struct ena_rx_buffer *rx_info;
 1013         uint16_t req_id;
 1014 
 1015         req_id = ctx->ring->ena_bufs[buf].req_id;
 1016         rx_info = &ctx->ring->rx_buffer_info[req_id];
 1017         bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map,
 1018             BUS_DMASYNC_POSTREAD);
 1019         netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map);
 1020 
 1021         ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, ctx->adapter->ena_dev,
 1022             "Rx idx is not 0.\n");
 1023 
 1024         ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx;
 1025         rx_info->netmap_buf_idx = 0;
 1026         /*
 1027          * Set NS_MOREFRAG to all slots.
 1028          * Then ena_netmap_rx_frame clears it from last one.
 1029          */
 1030         ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED;
 1031         ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len;
 1032         *len += ctx->slots[ctx->nm_i].len;
 1033         ctx->ring->free_rx_ids[ctx->nt] = req_id;
 1034         ena_log_nm(ctx->adapter->pdev, DBG,
 1035             "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", rx_info,
 1036             ctx->slots[ctx->nm_i].buf_idx, (uintmax_t)rx_info->ena_buf.paddr,
 1037             ctx->nm_i);
 1038 
 1039         ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
 1040         ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
 1041 
 1042         return (0);
 1043 }
 1044 
 1045 static inline void
 1046 ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx)
 1047 {
 1048         int refill_required;
 1049 
 1050         refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur;
 1051         if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail)
 1052                 refill_required -= 1;
 1053 
 1054         if (refill_required == 0)
 1055                 return;
 1056         else if (refill_required < 0)
 1057                 refill_required += ctx->kring->nkr_num_slots;
 1058 
 1059         ena_refill_rx_bufs(ctx->ring, refill_required);
 1060 }
 1061 
 1062 static inline void
 1063 ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx,
 1064     uint16_t ena_qid)
 1065 {
 1066         ctx->kring = kring;
 1067         ctx->na = kring->na;
 1068         ctx->adapter = if_getsoftc(ctx->na->ifp);
 1069         ctx->lim = kring->nkr_num_slots - 1;
 1070         ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid];
 1071         ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid];
 1072         ctx->slots = kring->ring->slot;
 1073 }
 1074 
 1075 void
 1076 ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map)
 1077 {
 1078         struct netmap_adapter *na = NA(adapter->ifp);
 1079 
 1080         netmap_unload_map(na, adapter->tx_buf_tag, map);
 1081 }
 1082 
 1083 #endif /* DEV_NETMAP */

Cache object: 1b1dd45871c82fe9bd6115daec9ec948


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.