1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #ifdef DEV_NETMAP
34
35 #include "ena.h"
36 #include "ena_netmap.h"
37
38 #define ENA_NETMAP_MORE_FRAMES 1
39 #define ENA_NETMAP_NO_MORE_FRAMES 0
40 #define ENA_MAX_FRAMES 16384
41
42 struct ena_netmap_ctx {
43 struct netmap_kring *kring;
44 struct ena_adapter *adapter;
45 struct netmap_adapter *na;
46 struct netmap_slot *slots;
47 struct ena_ring *ring;
48 struct ena_com_io_cq *io_cq;
49 struct ena_com_io_sq *io_sq;
50 u_int nm_i;
51 uint16_t nt;
52 uint16_t lim;
53 };
54
55 /* Netmap callbacks */
56 static int ena_netmap_reg(struct netmap_adapter *, int);
57 static int ena_netmap_txsync(struct netmap_kring *, int);
58 static int ena_netmap_rxsync(struct netmap_kring *, int);
59
60 /* Helper functions */
61 static int ena_netmap_tx_frames(struct ena_netmap_ctx *);
62 static int ena_netmap_tx_frame(struct ena_netmap_ctx *);
63 static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *);
64 static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int,
65 uint16_t);
66 static int ena_netmap_copy_data(struct netmap_adapter *,
67 struct netmap_slot *, u_int, uint16_t, uint16_t, void *);
68 static int ena_netmap_map_single_slot(struct netmap_adapter *,
69 struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *);
70 static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *,
71 struct ena_tx_buffer *, void **, uint16_t *, uint16_t *);
72 static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *,
73 struct ena_tx_buffer *);
74 static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *);
75 static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *,
76 uint16_t);
77 static inline int validate_tx_req_id(struct ena_ring *, uint16_t);
78 static int ena_netmap_rx_frames(struct ena_netmap_ctx *);
79 static int ena_netmap_rx_frame(struct ena_netmap_ctx *);
80 static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t,
81 int *);
82 static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *);
83 static void ena_netmap_fill_ctx(struct netmap_kring *,
84 struct ena_netmap_ctx *, uint16_t);
85
86 int
87 ena_netmap_attach(struct ena_adapter *adapter)
88 {
89 struct netmap_adapter na;
90
91 ena_log_nm(adapter->pdev, INFO, "netmap attach\n");
92
93 bzero(&na, sizeof(na));
94 na.na_flags = NAF_MOREFRAG;
95 na.ifp = adapter->ifp;
96 na.num_tx_desc = adapter->requested_tx_ring_size;
97 na.num_rx_desc = adapter->requested_rx_ring_size;
98 na.num_tx_rings = adapter->num_io_queues;
99 na.num_rx_rings = adapter->num_io_queues;
100 na.rx_buf_maxsize = adapter->buf_ring_size;
101 na.nm_txsync = ena_netmap_txsync;
102 na.nm_rxsync = ena_netmap_rxsync;
103 na.nm_register = ena_netmap_reg;
104
105 return (netmap_attach(&na));
106 }
107
108 int
109 ena_netmap_alloc_rx_slot(struct ena_adapter *adapter,
110 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
111 {
112 struct netmap_adapter *na = NA(adapter->ifp);
113 struct netmap_kring *kring;
114 struct netmap_ring *ring;
115 struct netmap_slot *slot;
116 void *addr;
117 uint64_t paddr;
118 int nm_i, qid, head, lim, rc;
119
120 /* if previously allocated frag is not used */
121 if (unlikely(rx_info->netmap_buf_idx != 0))
122 return (0);
123
124 qid = rx_ring->qid;
125 kring = na->rx_rings[qid];
126 nm_i = kring->nr_hwcur;
127 head = kring->rhead;
128
129 ena_log_nm(adapter->pdev, DBG, "nr_hwcur: %d, nr_hwtail: %d, "
130 "rhead: %d, rcur: %d, rtail: %d\n", kring->nr_hwcur,
131 kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail);
132
133 if ((nm_i == head) && rx_ring->initialized) {
134 ena_log_nm(adapter->pdev, ERR, "No free slots in netmap ring\n");
135 return (ENOMEM);
136 }
137
138 ring = kring->ring;
139 if (ring == NULL) {
140 ena_log_nm(adapter->pdev, ERR, "Rx ring %d is NULL\n", qid);
141 return (EFAULT);
142 }
143 slot = &ring->slot[nm_i];
144
145 addr = PNMB(na, slot, &paddr);
146 if (addr == NETMAP_BUF_BASE(na)) {
147 ena_log_nm(adapter->pdev, ERR, "Bad buff in slot\n");
148 return (EFAULT);
149 }
150
151 rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr);
152 if (rc != 0) {
153 ena_log_nm(adapter->pdev, WARN, "DMA mapping error\n");
154 return (rc);
155 }
156 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
157
158 rx_info->ena_buf.paddr = paddr;
159 rx_info->ena_buf.len = ring->nr_buf_size;
160 rx_info->mbuf = NULL;
161 rx_info->netmap_buf_idx = slot->buf_idx;
162
163 slot->buf_idx = 0;
164
165 lim = kring->nkr_num_slots - 1;
166 kring->nr_hwcur = nm_next(nm_i, lim);
167
168 return (0);
169 }
170
171 void
172 ena_netmap_free_rx_slot(struct ena_adapter *adapter,
173 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
174 {
175 struct netmap_adapter *na;
176 struct netmap_kring *kring;
177 struct netmap_slot *slot;
178 int nm_i, qid, lim;
179
180 na = NA(adapter->ifp);
181 if (na == NULL) {
182 ena_log_nm(adapter->pdev, ERR, "netmap adapter is NULL\n");
183 return;
184 }
185
186 if (na->rx_rings == NULL) {
187 ena_log_nm(adapter->pdev, ERR, "netmap rings are NULL\n");
188 return;
189 }
190
191 qid = rx_ring->qid;
192 kring = na->rx_rings[qid];
193 if (kring == NULL) {
194 ena_log_nm(adapter->pdev, ERR,
195 "netmap kernel ring %d is NULL\n", qid);
196 return;
197 }
198
199 lim = kring->nkr_num_slots - 1;
200 nm_i = nm_prev(kring->nr_hwcur, lim);
201
202 if (kring->nr_mode != NKR_NETMAP_ON)
203 return;
204
205 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
206 BUS_DMASYNC_POSTREAD);
207 netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map);
208
209 KASSERT(kring->ring == NULL, ("Netmap Rx ring is NULL\n"));
210
211 slot = &kring->ring->slot[nm_i];
212
213 ENA_WARN(slot->buf_idx != 0, adapter->ena_dev, "Overwrite slot buf\n");
214 slot->buf_idx = rx_info->netmap_buf_idx;
215 slot->flags = NS_BUF_CHANGED;
216
217 rx_info->netmap_buf_idx = 0;
218 kring->nr_hwcur = nm_i;
219 }
220
221 static bool
222 ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x)
223 {
224 struct netmap_adapter *na;
225 struct netmap_kring *kring;
226
227 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
228 na = NA(adapter->ifp);
229 kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid];
230 if (kring->nr_mode == NKR_NETMAP_ON)
231 return true;
232 }
233 return false;
234 }
235
236 bool
237 ena_tx_ring_in_netmap(struct ena_adapter *adapter, int qid)
238 {
239 return ena_ring_in_netmap(adapter, qid, NR_TX);
240 }
241
242 bool
243 ena_rx_ring_in_netmap(struct ena_adapter *adapter, int qid)
244 {
245 return ena_ring_in_netmap(adapter, qid, NR_RX);
246 }
247
248 static void
249 ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x)
250 {
251 if (!ena_ring_in_netmap(adapter, qid, x))
252 return;
253
254 netmap_reset(NA(adapter->ifp), x, qid, 0);
255 ena_log_nm(adapter->pdev, INFO, "%s ring %d is in netmap mode\n",
256 (x == NR_TX) ? "Tx" : "Rx", qid);
257 }
258
259 void
260 ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid)
261 {
262 ena_netmap_reset_ring(adapter, qid, NR_RX);
263 }
264
265 void
266 ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid)
267 {
268 ena_netmap_reset_ring(adapter, qid, NR_TX);
269 }
270
271 static int
272 ena_netmap_reg(struct netmap_adapter *na, int onoff)
273 {
274 struct ifnet *ifp = na->ifp;
275 struct ena_adapter* adapter = ifp->if_softc;
276 device_t pdev = adapter->pdev;
277 struct netmap_kring *kring;
278 enum txrx t;
279 int rc, i;
280
281 ENA_LOCK_LOCK();
282 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
283 ena_down(adapter);
284
285 if (onoff) {
286 ena_log_nm(pdev, INFO, "netmap on\n");
287 for_rx_tx(t) {
288 for (i = 0; i <= nma_get_nrings(na, t); i++) {
289 kring = NMR(na, t)[i];
290 if (nm_kring_pending_on(kring)) {
291 kring->nr_mode = NKR_NETMAP_ON;
292 }
293 }
294 }
295 nm_set_native_flags(na);
296 } else {
297 ena_log_nm(pdev, INFO, "netmap off\n");
298 nm_clear_native_flags(na);
299 for_rx_tx(t) {
300 for (i = 0; i <= nma_get_nrings(na, t); i++) {
301 kring = NMR(na, t)[i];
302 if (nm_kring_pending_off(kring)) {
303 kring->nr_mode = NKR_NETMAP_OFF;
304 }
305 }
306 }
307 }
308
309 rc = ena_up(adapter);
310 if (rc != 0) {
311 ena_log_nm(pdev, WARN, "ena_up failed with rc=%d\n", rc);
312 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
313 nm_clear_native_flags(na);
314 ena_destroy_device(adapter, false);
315 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
316 rc = ena_restore_device(adapter);
317 }
318 ENA_LOCK_UNLOCK();
319
320 return (rc);
321 }
322
323 static int
324 ena_netmap_txsync(struct netmap_kring *kring, int flags)
325 {
326 struct ena_netmap_ctx ctx;
327 int rc = 0;
328
329 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id));
330 ctx.ring = &ctx.adapter->tx_ring[kring->ring_id];
331
332 ENA_RING_MTX_LOCK(ctx.ring);
333 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter)))
334 goto txsync_end;
335
336 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
337 goto txsync_end;
338
339 rc = ena_netmap_tx_frames(&ctx);
340 ena_netmap_tx_cleanup(&ctx);
341
342 txsync_end:
343 ENA_RING_MTX_UNLOCK(ctx.ring);
344 return (rc);
345 }
346
347 static int
348 ena_netmap_tx_frames(struct ena_netmap_ctx *ctx)
349 {
350 struct ena_ring *tx_ring = ctx->ring;
351 int rc = 0;
352
353 ctx->nm_i = ctx->kring->nr_hwcur;
354 ctx->nt = ctx->ring->next_to_use;
355
356 __builtin_prefetch(&ctx->slots[ctx->nm_i]);
357
358 while (ctx->nm_i != ctx->kring->rhead) {
359 if ((rc = ena_netmap_tx_frame(ctx)) != 0) {
360 /*
361 * When there is no empty space in Tx ring, error is
362 * still being returned. It should not be passed to the
363 * netmap, as application knows current ring state from
364 * netmap ring pointers. Returning error there could
365 * cause application to exit, but the Tx ring is commonly
366 * being full.
367 */
368 if (rc == ENA_COM_NO_MEM)
369 rc = 0;
370 break;
371 }
372 tx_ring->acum_pkts++;
373 }
374
375 /* If any packet was sent... */
376 if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) {
377 /* ...send the doorbell to the device. */
378 ena_com_write_sq_doorbell(ctx->io_sq);
379 counter_u64_add(ctx->ring->tx_stats.doorbells, 1);
380 tx_ring->acum_pkts = 0;
381
382 ctx->ring->next_to_use = ctx->nt;
383 ctx->kring->nr_hwcur = ctx->nm_i;
384 }
385
386 return (rc);
387 }
388
389 static int
390 ena_netmap_tx_frame(struct ena_netmap_ctx *ctx)
391 {
392 struct ena_com_tx_ctx ena_tx_ctx;
393 struct ena_adapter *adapter;
394 struct ena_ring *tx_ring;
395 struct ena_tx_buffer *tx_info;
396 uint16_t req_id;
397 uint16_t header_len;
398 uint16_t packet_len;
399 int nb_hw_desc;
400 int rc;
401 void *push_hdr;
402
403 adapter = ctx->adapter;
404 if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) {
405 ena_log_nm(adapter->pdev, WARN, "Too many slots per packet\n");
406 return (EINVAL);
407 }
408
409 tx_ring = ctx->ring;
410
411 req_id = tx_ring->free_tx_ids[ctx->nt];
412 tx_info = &tx_ring->tx_buffer_info[req_id];
413 tx_info->num_of_bufs = 0;
414 tx_info->nm_info.sockets_used = 0;
415
416 rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len,
417 &packet_len);
418 if (unlikely(rc != 0)) {
419 ena_log_nm(adapter->pdev, ERR, "Failed to map Tx slot\n");
420 return (rc);
421 }
422
423 bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx));
424 ena_tx_ctx.ena_bufs = tx_info->bufs;
425 ena_tx_ctx.push_header = push_hdr;
426 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
427 ena_tx_ctx.req_id = req_id;
428 ena_tx_ctx.header_len = header_len;
429 ena_tx_ctx.meta_valid = adapter->disable_meta_caching;
430
431 /* There are no any offloads, as the netmap doesn't support them */
432
433 if (tx_ring->acum_pkts == DB_THRESHOLD ||
434 ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx)) {
435 ena_com_write_sq_doorbell(ctx->io_sq);
436 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
437 tx_ring->acum_pkts = 0;
438 }
439
440 rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc);
441 if (unlikely(rc != 0)) {
442 if (likely(rc == ENA_COM_NO_MEM)) {
443 ena_log_nm(adapter->pdev, DBG,
444 "Tx ring[%d] is out of space\n", tx_ring->que->id);
445 } else {
446 ena_log_nm(adapter->pdev, ERR,
447 "Failed to prepare Tx bufs\n");
448 ena_trigger_reset(adapter,
449 ENA_REGS_RESET_DRIVER_INVALID_STATE);
450 }
451 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
452
453 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
454 return (rc);
455 }
456
457 counter_enter();
458 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
459 counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len);
460 counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
461 counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len);
462 counter_exit();
463
464 tx_info->tx_descs = nb_hw_desc;
465
466 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
467
468 for (unsigned int i = 0; i < tx_info->num_of_bufs; i++)
469 bus_dmamap_sync(adapter->tx_buf_tag,
470 tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE);
471
472 return (0);
473 }
474
475 static inline uint16_t
476 ena_netmap_count_slots(struct ena_netmap_ctx *ctx)
477 {
478 uint16_t slots = 1;
479 uint16_t nm = ctx->nm_i;
480
481 while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) {
482 slots++;
483 nm = nm_next(nm, ctx->lim);
484 }
485
486 return slots;
487 }
488
489 static inline uint16_t
490 ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index,
491 uint16_t limit)
492 {
493 struct netmap_slot *nm_slot;
494 uint16_t packet_size = 0;
495
496 do {
497 nm_slot = &slots[slot_index];
498 packet_size += nm_slot->len;
499 slot_index = nm_next(slot_index, limit);
500 } while ((nm_slot->flags & NS_MOREFRAG) != 0);
501
502 return packet_size;
503 }
504
505 static int
506 ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots,
507 u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination)
508 {
509 struct netmap_slot *nm_slot;
510 void *slot_vaddr;
511 uint16_t packet_size;
512 uint16_t data_amount;
513
514 packet_size = 0;
515 do {
516 nm_slot = &slots[slot_index];
517 slot_vaddr = NMB(na, nm_slot);
518 if (unlikely(slot_vaddr == NULL))
519 return (EINVAL);
520
521 data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len);
522 memcpy(destination, slot_vaddr, data_amount);
523 bytes_to_copy -= data_amount;
524
525 slot_index = nm_next(slot_index, limit);
526 } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0);
527
528 return (0);
529 }
530
531 static int
532 ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot,
533 bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr)
534 {
535 device_t pdev;
536 int rc;
537
538 pdev = ((struct ena_adapter *)na->ifp->if_softc)->pdev;
539
540 *vaddr = PNMB(na, slot, paddr);
541 if (unlikely(vaddr == NULL)) {
542 ena_log_nm(pdev, ERR, "Slot address is NULL\n");
543 return (EINVAL);
544 }
545
546 rc = netmap_load_map(na, dmatag, dmamap, *vaddr);
547 if (unlikely(rc != 0)) {
548 ena_log_nm(pdev, ERR, "Failed to map slot %d for DMA\n",
549 slot->buf_idx);
550 return (EINVAL);
551 }
552
553 return (0);
554 }
555
556 static int
557 ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
558 struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len,
559 uint16_t *packet_len)
560 {
561 struct netmap_slot *slot;
562 struct ena_com_buf *ena_buf;
563 struct ena_adapter *adapter;
564 struct ena_ring *tx_ring;
565 struct ena_netmap_tx_info *nm_info;
566 bus_dmamap_t *nm_maps;
567 void *vaddr;
568 uint64_t paddr;
569 uint32_t *nm_buf_idx;
570 uint32_t slot_head_len;
571 uint32_t frag_len;
572 uint32_t remaining_len;
573 uint16_t push_len;
574 uint16_t delta;
575 int rc;
576
577 adapter = ctx->adapter;
578 tx_ring = ctx->ring;
579 ena_buf = tx_info->bufs;
580 nm_info = &tx_info->nm_info;
581 nm_maps = nm_info->map_seg;
582 nm_buf_idx = nm_info->socket_buf_idx;
583 slot = &ctx->slots[ctx->nm_i];
584
585 slot_head_len = slot->len;
586 *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim);
587 remaining_len = *packet_len;
588 delta = 0;
589
590 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
591 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
592 /*
593 * When the device is in LLQ mode, the driver will copy
594 * the header into the device memory space.
595 * The ena_com layer assumes that the header is in a linear
596 * memory space.
597 * This assumption might be wrong since part of the header
598 * can be in the fragmented buffers.
599 * First, check if header fits in the first slot. If not, copy
600 * it to separate buffer that will be holding linearized data.
601 */
602 push_len = min_t(uint32_t, *packet_len,
603 tx_ring->tx_max_header_size);
604 *header_len = push_len;
605 /* If header is in linear space, just point to socket's data. */
606 if (likely(push_len <= slot_head_len)) {
607 *push_hdr = NMB(ctx->na, slot);
608 if (unlikely(push_hdr == NULL)) {
609 ena_log_nm(adapter->pdev, ERR,
610 "Slot vaddress is NULL\n");
611 return (EINVAL);
612 }
613 /*
614 * Otherwise, copy whole portion of header from multiple slots
615 * to intermediate buffer.
616 */
617 } else {
618 rc = ena_netmap_copy_data(ctx->na,
619 ctx->slots,
620 ctx->nm_i,
621 ctx->lim,
622 push_len,
623 tx_ring->push_buf_intermediate_buf);
624 if (unlikely(rc)) {
625 ena_log_nm(adapter->pdev, ERR,
626 "Failed to copy data from slots to push_buf\n");
627 return (EINVAL);
628 }
629
630 *push_hdr = tx_ring->push_buf_intermediate_buf;
631 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
632
633 delta = push_len - slot_head_len;
634 }
635
636 ena_log_nm(adapter->pdev, DBG,
637 "slot: %d header_buf->vaddr: %p push_len: %d\n",
638 slot->buf_idx, *push_hdr, push_len);
639
640 /*
641 * If header was in linear memory space, map for the dma rest of the data
642 * in the first mbuf of the mbuf chain.
643 */
644 if (slot_head_len > push_len) {
645 rc = ena_netmap_map_single_slot(ctx->na,
646 slot,
647 adapter->tx_buf_tag,
648 *nm_maps,
649 &vaddr,
650 &paddr);
651 if (unlikely(rc != 0)) {
652 ena_log_nm(adapter->pdev, ERR,
653 "DMA mapping error\n");
654 return (rc);
655 }
656 nm_maps++;
657
658 ena_buf->paddr = paddr + push_len;
659 ena_buf->len = slot->len - push_len;
660 ena_buf++;
661
662 tx_info->num_of_bufs++;
663 }
664
665 remaining_len -= slot->len;
666
667 /* Save buf idx before advancing */
668 *nm_buf_idx = slot->buf_idx;
669 nm_buf_idx++;
670 slot->buf_idx = 0;
671
672 /* Advance to the next socket */
673 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
674 slot = &ctx->slots[ctx->nm_i];
675 nm_info->sockets_used++;
676
677 /*
678 * If header is in non linear space (delta > 0), then skip mbufs
679 * containing header and map the last one containing both header
680 * and the packet data.
681 * The first segment is already counted in.
682 */
683 while (delta > 0) {
684 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
685 frag_len = slot->len;
686
687 /*
688 * If whole segment contains header just move to the
689 * next one and reduce delta.
690 */
691 if (unlikely(delta >= frag_len)) {
692 delta -= frag_len;
693 } else {
694 /*
695 * Map the data and then assign it with the
696 * offsets
697 */
698 rc = ena_netmap_map_single_slot(ctx->na,
699 slot,
700 adapter->tx_buf_tag,
701 *nm_maps,
702 &vaddr,
703 &paddr);
704 if (unlikely(rc != 0)) {
705 ena_log_nm(adapter->pdev, ERR,
706 "DMA mapping error\n");
707 goto error_map;
708 }
709 nm_maps++;
710
711 ena_buf->paddr = paddr + delta;
712 ena_buf->len = slot->len - delta;
713 ena_buf++;
714
715 tx_info->num_of_bufs++;
716 delta = 0;
717 }
718
719 remaining_len -= slot->len;
720
721 /* Save buf idx before advancing */
722 *nm_buf_idx = slot->buf_idx;
723 nm_buf_idx++;
724 slot->buf_idx = 0;
725
726 /* Advance to the next socket */
727 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
728 slot = &ctx->slots[ctx->nm_i];
729 nm_info->sockets_used++;
730 }
731 } else {
732 *push_hdr = NULL;
733 /*
734 * header_len is just a hint for the device. Because netmap is
735 * not giving us any information about packet header length and
736 * it is not guaranteed that all packet headers will be in the
737 * 1st slot, setting header_len to 0 is making the device ignore
738 * this value and resolve header on it's own.
739 */
740 *header_len = 0;
741 }
742
743 /* Map all remaining data (regular routine for non-LLQ mode) */
744 while (remaining_len > 0) {
745 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
746
747 rc = ena_netmap_map_single_slot(ctx->na,
748 slot,
749 adapter->tx_buf_tag,
750 *nm_maps,
751 &vaddr,
752 &paddr);
753 if (unlikely(rc != 0)) {
754 ena_log_nm(adapter->pdev, ERR,
755 "DMA mapping error\n");
756 goto error_map;
757 }
758 nm_maps++;
759
760 ena_buf->paddr = paddr;
761 ena_buf->len = slot->len;
762 ena_buf++;
763
764 tx_info->num_of_bufs++;
765
766 remaining_len -= slot->len;
767
768 /* Save buf idx before advancing */
769 *nm_buf_idx = slot->buf_idx;
770 nm_buf_idx++;
771 slot->buf_idx = 0;
772
773 /* Advance to the next socket */
774 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
775 slot = &ctx->slots[ctx->nm_i];
776 nm_info->sockets_used++;
777 }
778
779 return (0);
780
781 error_map:
782 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
783
784 return (rc);
785 }
786
787 static void
788 ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
789 struct ena_tx_buffer *tx_info)
790 {
791 struct ena_netmap_tx_info *nm_info;
792 int n;
793
794 nm_info = &tx_info->nm_info;
795
796 /**
797 * As the used sockets must not be equal to the buffers used in the LLQ
798 * mode, they must be treated separately.
799 * First, unmap the DMA maps.
800 */
801 n = tx_info->num_of_bufs;
802 while (n--) {
803 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
804 nm_info->map_seg[n]);
805 }
806 tx_info->num_of_bufs = 0;
807
808 /* Next, retain the sockets back to the userspace */
809 n = nm_info->sockets_used;
810 while (n--) {
811 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
812 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
813 nm_info->socket_buf_idx[n] = 0;
814 ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
815 }
816 nm_info->sockets_used = 0;
817 }
818
819 static void
820 ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx)
821 {
822 uint16_t req_id;
823 uint16_t total_tx_descs = 0;
824
825 ctx->nm_i = ctx->kring->nr_hwtail;
826 ctx->nt = ctx->ring->next_to_clean;
827
828 /* Reclaim buffers for completed transmissions */
829 while (ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id) >= 0) {
830 if (validate_tx_req_id(ctx->ring, req_id) != 0)
831 break;
832 total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id);
833 }
834
835 ctx->kring->nr_hwtail = ctx->nm_i;
836
837 if (total_tx_descs > 0) {
838 /* acknowledge completion of sent packets */
839 ctx->ring->next_to_clean = ctx->nt;
840 ena_com_comp_ack(ctx->ring->ena_com_io_sq, total_tx_descs);
841 ena_com_update_dev_comp_head(ctx->ring->ena_com_io_cq);
842 }
843 }
844
845 static uint16_t
846 ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id)
847 {
848 struct ena_tx_buffer *tx_info;
849 struct ena_netmap_tx_info *nm_info;
850 int n;
851
852 tx_info = &ctx->ring->tx_buffer_info[req_id];
853 nm_info = &tx_info->nm_info;
854
855 /**
856 * As the used sockets must not be equal to the buffers used in the LLQ
857 * mode, they must be treated separately.
858 * First, unmap the DMA maps.
859 */
860 n = tx_info->num_of_bufs;
861 for (n = 0; n < tx_info->num_of_bufs; n++) {
862 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
863 nm_info->map_seg[n]);
864 }
865 tx_info->num_of_bufs = 0;
866
867 /* Next, retain the sockets back to the userspace */
868 for (n = 0; n < nm_info->sockets_used; n++) {
869 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
870 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0,
871 ctx->adapter->ena_dev, "Tx idx is not 0.\n");
872 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
873 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
874 nm_info->socket_buf_idx[n] = 0;
875 }
876 nm_info->sockets_used = 0;
877
878 ctx->ring->free_tx_ids[ctx->nt] = req_id;
879 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim);
880
881 return tx_info->tx_descs;
882 }
883
884 static inline int
885 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
886 {
887 struct ena_adapter *adapter = tx_ring->adapter;
888
889 if (likely(req_id < tx_ring->ring_size))
890 return (0);
891
892 ena_log_nm(adapter->pdev, WARN, "Invalid req_id: %hu\n", req_id);
893 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
894
895 ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
896
897 return (EFAULT);
898 }
899
900 static int
901 ena_netmap_rxsync(struct netmap_kring *kring, int flags)
902 {
903 struct ena_netmap_ctx ctx;
904 int rc;
905
906 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id));
907 ctx.ring = &ctx.adapter->rx_ring[kring->ring_id];
908
909 if (ctx.kring->rhead > ctx.lim) {
910 /* Probably not needed to release slots from RX ring. */
911 return (netmap_ring_reinit(ctx.kring));
912 }
913
914 if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0))
915 return (0);
916
917 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
918 return (0);
919
920 if ((rc = ena_netmap_rx_frames(&ctx)) != 0)
921 return (rc);
922
923 ena_netmap_rx_cleanup(&ctx);
924
925 return (0);
926 }
927
928 static inline int
929 ena_netmap_rx_frames(struct ena_netmap_ctx *ctx)
930 {
931 int rc = 0;
932 int frames_counter = 0;
933
934 ctx->nt = ctx->ring->next_to_clean;
935 ctx->nm_i = ctx->kring->nr_hwtail;
936
937 while((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) {
938 frames_counter++;
939 /* In case of multiple frames, it is not an error. */
940 rc = 0;
941 if (frames_counter > ENA_MAX_FRAMES) {
942 ena_log_nm(ctx->adapter->pdev, ERR,
943 "Driver is stuck in the Rx loop\n");
944 break;
945 }
946 };
947
948 ctx->kring->nr_hwtail = ctx->nm_i;
949 ctx->kring->nr_kflags &= ~NKR_PENDINTR;
950 ctx->ring->next_to_clean = ctx->nt;
951
952 return (rc);
953 }
954
955 static inline int
956 ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
957 {
958 struct ena_com_rx_ctx ena_rx_ctx;
959 enum ena_regs_reset_reason_types reset_reason;
960 int rc, len = 0;
961 uint16_t buf, nm;
962
963 ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs;
964 ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size;
965 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
966 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
967
968 rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx);
969 if (unlikely(rc != 0)) {
970 ena_log_nm(ctx->adapter->pdev, ERR,
971 "Failed to read pkt from the device with error: %d\n", rc);
972 if (rc == ENA_COM_NO_SPACE) {
973 counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1);
974 reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
975 } else {
976 counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1);
977 reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
978 }
979 ena_trigger_reset(ctx->adapter, reset_reason);
980 return (rc);
981 }
982 if (unlikely(ena_rx_ctx.descs == 0))
983 return (ENA_NETMAP_NO_MORE_FRAMES);
984
985 ena_log_nm(ctx->adapter->pdev, DBG,
986 "Rx: q %d got packet from ena. descs #:"
987 " %d l3 proto %d l4 proto %d hash: %x\n", ctx->ring->qid,
988 ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto,
989 ena_rx_ctx.hash);
990
991 for (buf = 0; buf < ena_rx_ctx.descs; buf++)
992 if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0)
993 break;
994 /*
995 * ena_netmap_rx_load_desc doesn't know the number of descriptors.
996 * It just set flag NS_MOREFRAG to all slots, then here flag of
997 * last slot is cleared.
998 */
999 ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED;
1000
1001 if (rc != 0) {
1002 goto rx_clear_desc;
1003 }
1004
1005 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
1006 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD);
1007
1008 counter_enter();
1009 counter_u64_add_protected(ctx->ring->rx_stats.bytes, len);
1010 counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len);
1011 counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1);
1012 counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1);
1013 counter_exit();
1014
1015 return (ENA_NETMAP_MORE_FRAMES);
1016
1017 rx_clear_desc:
1018 nm = ctx->nm_i;
1019
1020 /* Remove failed packet from ring */
1021 while(buf--) {
1022 ctx->slots[nm].flags = 0;
1023 ctx->slots[nm].len = 0;
1024 nm = nm_prev(nm, ctx->lim);
1025 }
1026
1027 return (rc);
1028 }
1029
1030 static inline int
1031 ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len)
1032 {
1033 struct ena_rx_buffer *rx_info;
1034 uint16_t req_id;
1035
1036 req_id = ctx->ring->ena_bufs[buf].req_id;
1037 rx_info = &ctx->ring->rx_buffer_info[req_id];
1038 bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map,
1039 BUS_DMASYNC_POSTREAD);
1040 netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map);
1041
1042 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, ctx->adapter->ena_dev,
1043 "Rx idx is not 0.\n");
1044
1045 ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx;
1046 rx_info->netmap_buf_idx = 0;
1047 /*
1048 * Set NS_MOREFRAG to all slots.
1049 * Then ena_netmap_rx_frame clears it from last one.
1050 */
1051 ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED;
1052 ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len;
1053 *len += ctx->slots[ctx->nm_i].len;
1054 ctx->ring->free_rx_ids[ctx->nt] = req_id;
1055 ena_log_nm(ctx->adapter->pdev, DBG, "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n",
1056 rx_info, ctx->slots[ctx->nm_i].buf_idx,
1057 (uintmax_t)rx_info->ena_buf.paddr, ctx->nm_i);
1058
1059 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
1060 ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
1061
1062 return (0);
1063 }
1064
1065 static inline void
1066 ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx)
1067 {
1068 int refill_required;
1069
1070 refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur;
1071 if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail)
1072 refill_required -= 1;
1073
1074 if (refill_required == 0)
1075 return;
1076 else if (refill_required < 0)
1077 refill_required += ctx->kring->nkr_num_slots;
1078
1079 ena_refill_rx_bufs(ctx->ring, refill_required);
1080 }
1081
1082 static inline void
1083 ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx,
1084 uint16_t ena_qid)
1085 {
1086 ctx->kring = kring;
1087 ctx->na = kring->na;
1088 ctx->adapter = ctx->na->ifp->if_softc;
1089 ctx->lim = kring->nkr_num_slots - 1;
1090 ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid];
1091 ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid];
1092 ctx->slots = kring->ring->slot;
1093 }
1094
1095 void
1096 ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map)
1097 {
1098 struct netmap_adapter *na = NA(adapter->ifp);
1099
1100 netmap_unload_map(na, adapter->tx_buf_tag, map);
1101 }
1102
1103 #endif /* DEV_NETMAP */
Cache object: 5ed1787acd3e4d304a4b2bd032809ed7
|