FreeBSD/Linux Kernel Cross Reference
sys/dev/mana/mana.h
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Microsoft Corp.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * $FreeBSD$
31 *
32 */
33
34 #ifndef _MANA_H
35 #define _MANA_H
36
37 #include <sys/types.h>
38 #include <sys/proc.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
42 #include <sys/counter.h>
43
44 #include <net/ethernet.h>
45 #include <net/if.h>
46 #include <net/if_media.h>
47 #include <netinet/tcp_lro.h>
48
49 #include "gdma.h"
50 #include "hw_channel.h"
51
52
53 /* Microsoft Azure Network Adapter (MANA)'s definitions
54 *
55 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
56 * them are naturally aligned and hence don't need __packed.
57 */
58 /* MANA protocol version */
59 #define MANA_MAJOR_VERSION 0
60 #define MANA_MINOR_VERSION 1
61 #define MANA_MICRO_VERSION 1
62
63 #define DRV_MODULE_NAME "mana"
64
65 #ifndef DRV_MODULE_VERSION
66 #define DRV_MODULE_VERSION \
67 __XSTRING(MANA_MAJOR_VERSION) "." \
68 __XSTRING(MANA_MINOR_VERSION) "." \
69 __XSTRING(MANA_MICRO_VERSION)
70 #endif
71 #define DEVICE_NAME "Microsoft Azure Network Adapter (MANA)"
72 #define DEVICE_DESC "MANA adapter"
73
74 /*
75 * Supported PCI vendor and devices IDs
76 */
77 #ifndef PCI_VENDOR_ID_MICROSOFT
78 #define PCI_VENDOR_ID_MICROSOFT 0x1414
79 #endif
80
81 #define PCI_DEV_ID_MANA_VF 0x00ba
82
83 typedef struct _mana_vendor_id_t {
84 uint16_t vendor_id;
85 uint16_t device_id;
86 } mana_vendor_id_t;
87
88 typedef uint64_t mana_handle_t;
89 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
90
91 enum TRI_STATE {
92 TRI_STATE_UNKNOWN = -1,
93 TRI_STATE_FALSE = 0,
94 TRI_STATE_TRUE = 1
95 };
96
97 /* Number of entries for hardware indirection table must be in power of 2 */
98 #define MANA_INDIRECT_TABLE_SIZE 64
99 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
100
101 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
102 #define MANA_HASH_KEY_SIZE 40
103
104 #define COMP_ENTRY_SIZE 64
105
106 #define MIN_FRAME_SIZE 146
107 #define ADAPTER_MTU_SIZE 1500
108 #define DEFAULT_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
109 #define MAX_FRAME_SIZE 4096
110
111 #define RX_BUFFERS_PER_QUEUE 512
112
113 #define MAX_SEND_BUFFERS_PER_QUEUE 256
114
115 #define EQ_SIZE (8 * PAGE_SIZE)
116 #define LOG2_EQ_THROTTLE 3
117
118 #define MAX_PORTS_IN_MANA_DEV 8
119
120 struct mana_send_buf_info {
121 struct mbuf *mbuf;
122 bus_dmamap_t dma_map;
123
124 /* Required to store the result of mana_gd_post_work_request.
125 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
126 * work queue when the WQE is consumed.
127 */
128 struct gdma_posted_wqe_info wqe_inf;
129 };
130
131 struct mana_stats {
132 counter_u64_t packets; /* rx, tx */
133 counter_u64_t bytes; /* rx, tx */
134 counter_u64_t stop; /* tx */
135 counter_u64_t wakeup; /* tx */
136 counter_u64_t collapse; /* tx */
137 counter_u64_t collapse_err; /* tx */
138 counter_u64_t dma_mapping_err; /* rx, tx */
139 counter_u64_t mbuf_alloc_fail; /* rx */
140 counter_u64_t alt_chg; /* tx */
141 counter_u64_t alt_reset; /* tx */
142 };
143
144 struct mana_txq {
145 struct gdma_queue *gdma_sq;
146
147 union {
148 uint32_t gdma_txq_id;
149 struct {
150 uint32_t reserved1 :10;
151 uint32_t vsq_frame :14;
152 uint32_t reserved2 :8;
153 };
154 };
155
156 uint16_t vp_offset;
157
158 struct ifnet *ndev;
159 /* Store index to the array of tx_qp in port structure */
160 int idx;
161 /* The alternative txq idx when this txq is under heavy load */
162 int alt_txq_idx;
163
164 /* The mbufs are sent to the HW and we are waiting for the CQEs. */
165 struct mana_send_buf_info *tx_buf_info;
166 uint16_t next_to_use;
167 uint16_t next_to_complete;
168
169 atomic_t pending_sends;
170
171 struct buf_ring *txq_br;
172 struct mtx txq_mtx;
173 char txq_mtx_name[16];
174
175 struct task enqueue_task;
176 struct taskqueue *enqueue_tq;
177
178 struct mana_stats stats;
179 };
180
181
182 /*
183 * Max WQE size is 512B. The first 8B is for GDMA Out of Band (OOB),
184 * next is the Client OOB can be either 8B or 24B. Thus, the max
185 * space for SGL entries in a singel WQE is 512 - 8 - 8 = 496B. Since each
186 * SGL is 16B in size, the max number of SGLs in a WQE is 496/16 = 31.
187 * Save one for emergency use, set the MAX_MBUF_FRAGS allowed to 30.
188 */
189 #define MAX_MBUF_FRAGS 30
190 #define MANA_TSO_MAXSEG_SZ PAGE_SIZE
191
192 /* mbuf data and frags dma mappings */
193 struct mana_mbuf_head {
194 bus_addr_t dma_handle[MAX_MBUF_FRAGS + 1];
195
196 uint32_t size[MAX_MBUF_FRAGS + 1];
197 };
198
199 #define MANA_HEADROOM sizeof(struct mana_mbuf_head)
200
201 enum mana_tx_pkt_format {
202 MANA_SHORT_PKT_FMT = 0,
203 MANA_LONG_PKT_FMT = 1,
204 };
205
206 struct mana_tx_short_oob {
207 uint32_t pkt_fmt :2;
208 uint32_t is_outer_ipv4 :1;
209 uint32_t is_outer_ipv6 :1;
210 uint32_t comp_iphdr_csum :1;
211 uint32_t comp_tcp_csum :1;
212 uint32_t comp_udp_csum :1;
213 uint32_t supress_txcqe_gen :1;
214 uint32_t vcq_num :24;
215
216 uint32_t trans_off :10; /* Transport header offset */
217 uint32_t vsq_frame :14;
218 uint32_t short_vp_offset :8;
219 }; /* HW DATA */
220
221 struct mana_tx_long_oob {
222 uint32_t is_encap :1;
223 uint32_t inner_is_ipv6 :1;
224 uint32_t inner_tcp_opt :1;
225 uint32_t inject_vlan_pri_tag :1;
226 uint32_t reserved1 :12;
227 uint32_t pcp :3; /* 802.1Q */
228 uint32_t dei :1; /* 802.1Q */
229 uint32_t vlan_id :12; /* 802.1Q */
230
231 uint32_t inner_frame_offset :10;
232 uint32_t inner_ip_rel_offset :6;
233 uint32_t long_vp_offset :12;
234 uint32_t reserved2 :4;
235
236 uint32_t reserved3;
237 uint32_t reserved4;
238 }; /* HW DATA */
239
240 struct mana_tx_oob {
241 struct mana_tx_short_oob s_oob;
242 struct mana_tx_long_oob l_oob;
243 }; /* HW DATA */
244
245 enum mana_cq_type {
246 MANA_CQ_TYPE_RX,
247 MANA_CQ_TYPE_TX,
248 };
249
250 enum mana_cqe_type {
251 CQE_INVALID = 0,
252 CQE_RX_OKAY = 1,
253 CQE_RX_COALESCED_4 = 2,
254 CQE_RX_OBJECT_FENCE = 3,
255 CQE_RX_TRUNCATED = 4,
256
257 CQE_TX_OKAY = 32,
258 CQE_TX_SA_DROP = 33,
259 CQE_TX_MTU_DROP = 34,
260 CQE_TX_INVALID_OOB = 35,
261 CQE_TX_INVALID_ETH_TYPE = 36,
262 CQE_TX_HDR_PROCESSING_ERROR = 37,
263 CQE_TX_VF_DISABLED = 38,
264 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
265 CQE_TX_VPORT_DISABLED = 40,
266 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
267 };
268
269 #define MANA_CQE_COMPLETION 1
270
271 struct mana_cqe_header {
272 uint32_t cqe_type :6;
273 uint32_t client_type :2;
274 uint32_t vendor_err :24;
275 }; /* HW DATA */
276
277 /* NDIS HASH Types */
278 #define NDIS_HASH_IPV4 BIT(0)
279 #define NDIS_HASH_TCP_IPV4 BIT(1)
280 #define NDIS_HASH_UDP_IPV4 BIT(2)
281 #define NDIS_HASH_IPV6 BIT(3)
282 #define NDIS_HASH_TCP_IPV6 BIT(4)
283 #define NDIS_HASH_UDP_IPV6 BIT(5)
284 #define NDIS_HASH_IPV6_EX BIT(6)
285 #define NDIS_HASH_TCP_IPV6_EX BIT(7)
286 #define NDIS_HASH_UDP_IPV6_EX BIT(8)
287
288 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
289 #define MANA_HASH_L4 \
290 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
291 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
292
293 #define NDIS_HASH_IPV4_L3_MASK (NDIS_HASH_IPV4)
294 #define NDIS_HASH_IPV4_L4_MASK (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4)
295 #define NDIS_HASH_IPV6_L3_MASK (NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
296 #define NDIS_HASH_IPV6_L4_MASK \
297 (NDIS_HASH_TCP_IPV6 | NDIS_HASH_UDP_IPV6 | \
298 NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
299 #define NDIS_HASH_IPV4_MASK \
300 (NDIS_HASH_IPV4_L3_MASK | NDIS_HASH_IPV4_L4_MASK)
301 #define NDIS_HASH_IPV6_MASK \
302 (NDIS_HASH_IPV6_L3_MASK | NDIS_HASH_IPV6_L4_MASK)
303
304
305 struct mana_rxcomp_perpkt_info {
306 uint32_t pkt_len :16;
307 uint32_t reserved1 :16;
308 uint32_t reserved2;
309 uint32_t pkt_hash;
310 }; /* HW DATA */
311
312 #define MANA_RXCOMP_OOB_NUM_PPI 4
313
314 /* Receive completion OOB */
315 struct mana_rxcomp_oob {
316 struct mana_cqe_header cqe_hdr;
317
318 uint32_t rx_vlan_id :12;
319 uint32_t rx_vlantag_present :1;
320 uint32_t rx_outer_iphdr_csum_succeed :1;
321 uint32_t rx_outer_iphdr_csum_fail :1;
322 uint32_t reserved1 :1;
323 uint32_t rx_hashtype :9;
324 uint32_t rx_iphdr_csum_succeed :1;
325 uint32_t rx_iphdr_csum_fail :1;
326 uint32_t rx_tcp_csum_succeed :1;
327 uint32_t rx_tcp_csum_fail :1;
328 uint32_t rx_udp_csum_succeed :1;
329 uint32_t rx_udp_csum_fail :1;
330 uint32_t reserved2 :1;
331
332 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
333
334 uint32_t rx_wqe_offset;
335 }; /* HW DATA */
336
337 struct mana_tx_comp_oob {
338 struct mana_cqe_header cqe_hdr;
339
340 uint32_t tx_data_offset;
341
342 uint32_t tx_sgl_offset :5;
343 uint32_t tx_wqe_offset :27;
344
345 uint32_t reserved[12];
346 }; /* HW DATA */
347
348 struct mana_rxq;
349
350 #define CQE_POLLING_BUFFER 512
351
352 struct mana_cq {
353 struct gdma_queue *gdma_cq;
354
355 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
356 uint32_t gdma_id;
357
358 /* Type of the CQ: TX or RX */
359 enum mana_cq_type type;
360
361 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
362 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
363 */
364 struct mana_rxq *rxq;
365
366 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
367 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
368 */
369 struct mana_txq *txq;
370
371 /* Taskqueue and related structs */
372 struct task cleanup_task;
373 struct taskqueue *cleanup_tq;
374 int cpu;
375 bool do_not_ring_db;
376
377 /* Budget for one cleanup task */
378 int work_done;
379 int budget;
380
381 /* Buffer which the CQ handler can copy the CQE's into. */
382 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
383 };
384
385 struct mana_recv_buf_oob {
386 /* A valid GDMA work request representing the data buffer. */
387 struct gdma_wqe_request wqe_req;
388
389 struct mbuf *mbuf;
390 bus_dmamap_t dma_map;
391
392 /* SGL of the buffer going to be sent as part of the work request. */
393 uint32_t num_sge;
394 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
395
396 /* Required to store the result of mana_gd_post_work_request.
397 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
398 * work queue when the WQE is consumed.
399 */
400 struct gdma_posted_wqe_info wqe_inf;
401 };
402
403 struct mana_rxq {
404 struct gdma_queue *gdma_rq;
405 /* Cache the gdma receive queue id */
406 uint32_t gdma_id;
407
408 /* Index of RQ in the vPort, not gdma receive queue id */
409 uint32_t rxq_idx;
410
411 uint32_t datasize;
412
413 mana_handle_t rxobj;
414
415 struct completion fence_event;
416
417 struct mana_cq rx_cq;
418
419 struct ifnet *ndev;
420 struct lro_ctrl lro;
421
422 /* Total number of receive buffers to be allocated */
423 uint32_t num_rx_buf;
424
425 uint32_t buf_index;
426
427 struct mana_stats stats;
428
429 /* MUST BE THE LAST MEMBER:
430 * Each receive buffer has an associated mana_recv_buf_oob.
431 */
432 struct mana_recv_buf_oob rx_oobs[];
433 };
434
435 struct mana_tx_qp {
436 struct mana_txq txq;
437
438 struct mana_cq tx_cq;
439
440 mana_handle_t tx_object;
441 };
442
443 struct mana_port_stats {
444 counter_u64_t rx_packets;
445 counter_u64_t tx_packets;
446
447 counter_u64_t rx_bytes;
448 counter_u64_t tx_bytes;
449
450 counter_u64_t rx_drops;
451 counter_u64_t tx_drops;
452
453 counter_u64_t stop_queue;
454 counter_u64_t wake_queue;
455 };
456
457 struct mana_context {
458 struct gdma_dev *gdma_dev;
459
460 uint16_t num_ports;
461
462 struct mana_eq *eqs;
463
464 struct ifnet *ports[MAX_PORTS_IN_MANA_DEV];
465 };
466
467 struct mana_port_context {
468 struct mana_context *ac;
469 struct ifnet *ndev;
470 struct ifmedia media;
471
472 struct sx apc_lock;
473
474 /* DMA tag used for queue bufs of the entire port */
475 bus_dma_tag_t rx_buf_tag;
476 bus_dma_tag_t tx_buf_tag;
477
478 uint8_t mac_addr[ETHER_ADDR_LEN];
479
480 enum TRI_STATE rss_state;
481
482 mana_handle_t default_rxobj;
483 bool tx_shortform_allowed;
484 uint16_t tx_vp_offset;
485
486 struct mana_tx_qp *tx_qp;
487
488 /* Indirection Table for RX & TX. The values are queue indexes */
489 uint32_t indir_table[MANA_INDIRECT_TABLE_SIZE];
490
491 /* Indirection table containing RxObject Handles */
492 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
493
494 /* Hash key used by the NIC */
495 uint8_t hashkey[MANA_HASH_KEY_SIZE];
496
497 /* This points to an array of num_queues of RQ pointers. */
498 struct mana_rxq **rxqs;
499
500 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
501 unsigned int max_queues;
502 unsigned int num_queues;
503
504 mana_handle_t port_handle;
505
506 int vport_use_count;
507
508 uint16_t port_idx;
509
510 uint16_t frame_size;
511
512 bool port_is_up;
513 bool port_st_save; /* Saved port state */
514
515 bool enable_tx_altq;
516
517 bool bind_cleanup_thread_cpu;
518 int last_tx_cq_bind_cpu;
519 int last_rx_cq_bind_cpu;
520
521 struct mana_port_stats port_stats;
522
523 struct sysctl_oid_list *port_list;
524 struct sysctl_ctx_list que_sysctl_ctx;
525 };
526
527 #define MANA_APC_LOCK_INIT(apc) \
528 sx_init(&(apc)->apc_lock, "MANA port lock")
529 #define MANA_APC_LOCK_DESTROY(apc) sx_destroy(&(apc)->apc_lock)
530 #define MANA_APC_LOCK_LOCK(apc) sx_xlock(&(apc)->apc_lock)
531 #define MANA_APC_LOCK_UNLOCK(apc) sx_unlock(&(apc)->apc_lock)
532
533 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
534 bool update_hash, bool update_tab);
535
536 int mana_alloc_queues(struct ifnet *ndev);
537 int mana_attach(struct ifnet *ndev);
538 int mana_detach(struct ifnet *ndev);
539
540 int mana_probe(struct gdma_dev *gd);
541 void mana_remove(struct gdma_dev *gd);
542
543 struct mana_obj_spec {
544 uint32_t queue_index;
545 uint64_t gdma_region;
546 uint32_t queue_size;
547 uint32_t attached_eq;
548 uint32_t modr_ctx_id;
549 };
550
551 enum mana_command_code {
552 MANA_QUERY_DEV_CONFIG = 0x20001,
553 MANA_QUERY_GF_STAT = 0x20002,
554 MANA_CONFIG_VPORT_TX = 0x20003,
555 MANA_CREATE_WQ_OBJ = 0x20004,
556 MANA_DESTROY_WQ_OBJ = 0x20005,
557 MANA_FENCE_RQ = 0x20006,
558 MANA_CONFIG_VPORT_RX = 0x20007,
559 MANA_QUERY_VPORT_CONFIG = 0x20008,
560 };
561
562 /* Query Device Configuration */
563 struct mana_query_device_cfg_req {
564 struct gdma_req_hdr hdr;
565
566 /* Driver Capability flags */
567 uint64_t drv_cap_flags1;
568 uint64_t drv_cap_flags2;
569 uint64_t drv_cap_flags3;
570 uint64_t drv_cap_flags4;
571
572 uint32_t proto_major_ver;
573 uint32_t proto_minor_ver;
574 uint32_t proto_micro_ver;
575
576 uint32_t reserved;
577 }; /* HW DATA */
578
579 struct mana_query_device_cfg_resp {
580 struct gdma_resp_hdr hdr;
581
582 uint64_t pf_cap_flags1;
583 uint64_t pf_cap_flags2;
584 uint64_t pf_cap_flags3;
585 uint64_t pf_cap_flags4;
586
587 uint16_t max_num_vports;
588 uint16_t reserved;
589 uint32_t max_num_eqs;
590 }; /* HW DATA */
591
592 /* Query vPort Configuration */
593 struct mana_query_vport_cfg_req {
594 struct gdma_req_hdr hdr;
595 uint32_t vport_index;
596 }; /* HW DATA */
597
598 struct mana_query_vport_cfg_resp {
599 struct gdma_resp_hdr hdr;
600 uint32_t max_num_sq;
601 uint32_t max_num_rq;
602 uint32_t num_indirection_ent;
603 uint32_t reserved1;
604 uint8_t mac_addr[6];
605 uint8_t reserved2[2];
606 mana_handle_t vport;
607 }; /* HW DATA */
608
609 /* Configure vPort */
610 struct mana_config_vport_req {
611 struct gdma_req_hdr hdr;
612 mana_handle_t vport;
613 uint32_t pdid;
614 uint32_t doorbell_pageid;
615 }; /* HW DATA */
616
617 struct mana_config_vport_resp {
618 struct gdma_resp_hdr hdr;
619 uint16_t tx_vport_offset;
620 uint8_t short_form_allowed;
621 uint8_t reserved;
622 }; /* HW DATA */
623
624 /* Create WQ Object */
625 struct mana_create_wqobj_req {
626 struct gdma_req_hdr hdr;
627 mana_handle_t vport;
628 uint32_t wq_type;
629 uint32_t reserved;
630 uint64_t wq_gdma_region;
631 uint64_t cq_gdma_region;
632 uint32_t wq_size;
633 uint32_t cq_size;
634 uint32_t cq_moderation_ctx_id;
635 uint32_t cq_parent_qid;
636 }; /* HW DATA */
637
638 struct mana_create_wqobj_resp {
639 struct gdma_resp_hdr hdr;
640 uint32_t wq_id;
641 uint32_t cq_id;
642 mana_handle_t wq_obj;
643 }; /* HW DATA */
644
645 /* Destroy WQ Object */
646 struct mana_destroy_wqobj_req {
647 struct gdma_req_hdr hdr;
648 uint32_t wq_type;
649 uint32_t reserved;
650 mana_handle_t wq_obj_handle;
651 }; /* HW DATA */
652
653 struct mana_destroy_wqobj_resp {
654 struct gdma_resp_hdr hdr;
655 }; /* HW DATA */
656
657 /* Fence RQ */
658 struct mana_fence_rq_req {
659 struct gdma_req_hdr hdr;
660 mana_handle_t wq_obj_handle;
661 }; /* HW DATA */
662
663 struct mana_fence_rq_resp {
664 struct gdma_resp_hdr hdr;
665 }; /* HW DATA */
666
667 /* Configure vPort Rx Steering */
668 struct mana_cfg_rx_steer_req {
669 struct gdma_req_hdr hdr;
670 mana_handle_t vport;
671 uint16_t num_indir_entries;
672 uint16_t indir_tab_offset;
673 uint32_t rx_enable;
674 uint32_t rss_enable;
675 uint8_t update_default_rxobj;
676 uint8_t update_hashkey;
677 uint8_t update_indir_tab;
678 uint8_t reserved;
679 mana_handle_t default_rxobj;
680 uint8_t hashkey[MANA_HASH_KEY_SIZE];
681 }; /* HW DATA */
682
683 struct mana_cfg_rx_steer_resp {
684 struct gdma_resp_hdr hdr;
685 }; /* HW DATA */
686
687 #define MANA_MAX_NUM_QUEUES 16
688
689 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
690
691 struct mana_tx_package {
692 struct gdma_wqe_request wqe_req;
693 struct gdma_sge sgl_array[MAX_MBUF_FRAGS];
694
695 struct mana_tx_oob tx_oob;
696
697 struct gdma_posted_wqe_info wqe_info;
698 };
699
700 int mana_restart(struct mana_port_context *apc);
701
702 int mana_create_wq_obj(struct mana_port_context *apc,
703 mana_handle_t vport,
704 uint32_t wq_type, struct mana_obj_spec *wq_spec,
705 struct mana_obj_spec *cq_spec,
706 mana_handle_t *wq_obj);
707
708 void mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
709 mana_handle_t wq_obj);
710
711 int mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
712 uint32_t doorbell_pg_id);
713
714 void mana_uncfg_vport(struct mana_port_context *apc);
715 #endif /* _MANA_H */
Cache object: 946e7fa23953443bde7a2acb2bfe0f4e
|