FreeBSD/Linux Kernel Cross Reference
sys/dev/mlx5/device.h
1 /*-
2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #ifndef MLX5_DEVICE_H
29 #define MLX5_DEVICE_H
30
31 #include <linux/types.h>
32 #include <rdma/ib_verbs.h>
33 #include <dev/mlx5/mlx5_ifc.h>
34
35 #define FW_INIT_TIMEOUT_MILI 2000
36 #define FW_INIT_WAIT_MS 2
37 #define FW_PRE_INIT_TIMEOUT_MILI 120000
38 #define FW_INIT_WARN_MESSAGE_INTERVAL 20000
39
40 #if defined(__LITTLE_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS 0
42 #elif defined(__BIG_ENDIAN)
43 #define MLX5_SET_HOST_ENDIANNESS 0x80
44 #else
45 #error Host endianness not defined
46 #endif
47
48 /* helper macros */
49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
51 #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
57 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
59 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
61 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
62
63 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
64 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
65 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
66 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
67 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
68 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
70 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
71
72 /* insert a value to a struct */
73 #define MLX5_SET(typ, p, fld, v) do { \
74 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
75 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
76 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
77 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
78 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
79 << __mlx5_dw_bit_off(typ, fld))); \
80 } while (0)
81
82 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
83 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
84 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
85 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
86 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
87 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
88 << __mlx5_dw_bit_off(typ, fld))); \
89 } while (0)
90
91 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
92 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
93 __mlx5_mask(typ, fld))
94
95 #define MLX5_GET_PR(typ, p, fld) ({ \
96 u32 ___t = MLX5_GET(typ, p, fld); \
97 pr_debug(#fld " = 0x%x\n", ___t); \
98 ___t; \
99 })
100
101 #define __MLX5_SET64(typ, p, fld, v) do { \
102 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
103 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
104 } while (0)
105
106 #define MLX5_SET64(typ, p, fld, v) do { \
107 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
108 __MLX5_SET64(typ, p, fld, v); \
109 } while (0)
110
111 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
112 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
113 __MLX5_SET64(typ, p, fld[idx], v); \
114 } while (0)
115
116 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
117
118 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
119 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
120 __mlx5_mask16(typ, fld))
121
122 #define MLX5_SET16(typ, p, fld, v) do { \
123 u16 _v = v; \
124 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
125 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
126 cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
127 (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
128 << __mlx5_16_bit_off(typ, fld))); \
129 } while (0)
130
131 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
132 __mlx5_64_off(typ, fld)))
133
134 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \
135 type_t tmp; \
136 switch (sizeof(tmp)) { \
137 case sizeof(u8): \
138 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
139 break; \
140 case sizeof(u16): \
141 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
142 break; \
143 case sizeof(u32): \
144 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
145 break; \
146 case sizeof(u64): \
147 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
148 break; \
149 } \
150 tmp; \
151 })
152
153 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
154 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
155 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
156 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
157 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
158 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
159
160 /* insert a value to a struct */
161 #define MLX5_VSC_SET(typ, p, fld, v) do { \
162 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
163 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
164 *((__le32 *)(p) + __mlx5_dw_off(typ, fld)) = \
165 cpu_to_le32((le32_to_cpu(*((__le32 *)(p) + __mlx5_dw_off(typ, fld))) & \
166 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
167 << __mlx5_dw_bit_off(typ, fld))); \
168 } while (0)
169
170 #define MLX5_VSC_GET(typ, p, fld) ((le32_to_cpu(*((__le32 *)(p) +\
171 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
172 __mlx5_mask(typ, fld))
173
174 #define MLX5_VSC_GET_PR(typ, p, fld) ({ \
175 u32 ___t = MLX5_VSC_GET(typ, p, fld); \
176 pr_debug(#fld " = 0x%x\n", ___t); \
177 ___t; \
178 })
179
180 enum {
181 MLX5_MAX_COMMANDS = 32,
182 MLX5_CMD_DATA_BLOCK_SIZE = 512,
183 MLX5_CMD_MBOX_SIZE = 1024,
184 MLX5_PCI_CMD_XPORT = 7,
185 MLX5_MKEY_BSF_OCTO_SIZE = 4,
186 MLX5_MAX_PSVS = 4,
187 };
188
189 enum {
190 MLX5_EXTENDED_UD_AV = 0x80000000,
191 };
192
193 enum {
194 MLX5_CQ_FLAGS_OI = 2,
195 };
196
197 enum {
198 MLX5_STAT_RATE_OFFSET = 5,
199 };
200
201 enum {
202 MLX5_INLINE_SEG = 0x80000000,
203 };
204
205 enum {
206 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
207 };
208
209 enum {
210 MLX5_MIN_PKEY_TABLE_SIZE = 128,
211 MLX5_MAX_LOG_PKEY_TABLE = 5,
212 };
213
214 enum {
215 MLX5_MKEY_INBOX_PG_ACCESS = 1U << 31
216 };
217
218 enum {
219 MLX5_PERM_LOCAL_READ = 1 << 2,
220 MLX5_PERM_LOCAL_WRITE = 1 << 3,
221 MLX5_PERM_REMOTE_READ = 1 << 4,
222 MLX5_PERM_REMOTE_WRITE = 1 << 5,
223 MLX5_PERM_ATOMIC = 1 << 6,
224 MLX5_PERM_UMR_EN = 1 << 7,
225 };
226
227 enum {
228 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
229 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
230 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
231 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
232 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
233 };
234
235 enum {
236 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
237 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
238 MLX5_MKEY_BSF_EN = 1 << 30,
239 MLX5_MKEY_LEN64 = 1U << 31,
240 };
241
242 enum {
243 MLX5_EN_RD = (u64)1,
244 MLX5_EN_WR = (u64)2
245 };
246
247 enum {
248 MLX5_ADAPTER_PAGE_SHIFT = 12,
249 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
250 };
251
252 enum {
253 MLX5_BFREGS_PER_UAR = 4,
254 MLX5_MAX_UARS = 1 << 8,
255 MLX5_NON_FP_BFREGS_PER_UAR = 2,
256 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
257 MLX5_NON_FP_BFREGS_PER_UAR,
258 MLX5_MAX_BFREGS = MLX5_MAX_UARS *
259 MLX5_NON_FP_BFREGS_PER_UAR,
260 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
261 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
262 MLX5_MIN_DYN_BFREGS = 512,
263 MLX5_MAX_DYN_BFREGS = 1024,
264 };
265
266 enum {
267 MLX5_MKEY_MASK_LEN = 1ull << 0,
268 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
269 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
270 MLX5_MKEY_MASK_PD = 1ull << 7,
271 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
272 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
273 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
274 MLX5_MKEY_MASK_KEY = 1ull << 13,
275 MLX5_MKEY_MASK_QPN = 1ull << 14,
276 MLX5_MKEY_MASK_LR = 1ull << 17,
277 MLX5_MKEY_MASK_LW = 1ull << 18,
278 MLX5_MKEY_MASK_RR = 1ull << 19,
279 MLX5_MKEY_MASK_RW = 1ull << 20,
280 MLX5_MKEY_MASK_A = 1ull << 21,
281 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
282 MLX5_MKEY_MASK_FREE = 1ull << 29,
283 };
284
285 enum {
286 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
287
288 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
289 MLX5_UMR_CHECK_FREE = (2 << 5),
290
291 MLX5_UMR_INLINE = (1 << 7),
292 };
293
294 #define MLX5_UMR_MTT_ALIGNMENT 0x40
295 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
296 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
297
298 enum {
299 MLX5_EVENT_QUEUE_TYPE_QP = 0,
300 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
301 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
302 MLX5_EVENT_QUEUE_TYPE_DCT = 6,
303 };
304
305 enum {
306 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
307 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
308 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
309 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
310 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
311 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
312 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
313 };
314
315 enum {
316 MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
317 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
318 MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
319 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
320 MLX5_MAX_INLINE_RECEIVE_SIZE = 64
321 };
322
323 enum {
324 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
325 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
326 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
327 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
328 MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21,
329 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
330 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
331 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
332 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33,
333 MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34,
334 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
335 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
336 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
337 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48,
338 };
339
340 enum {
341 MLX5_ROCE_VERSION_1 = 0,
342 MLX5_ROCE_VERSION_1_5 = 1,
343 MLX5_ROCE_VERSION_2 = 2,
344 };
345
346 enum {
347 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
348 MLX5_ROCE_VERSION_1_5_CAP = 1 << MLX5_ROCE_VERSION_1_5,
349 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
350 };
351
352 enum {
353 MLX5_ROCE_L3_TYPE_IPV4 = 0,
354 MLX5_ROCE_L3_TYPE_IPV6 = 1,
355 };
356
357 enum {
358 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
359 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
360 };
361
362 enum {
363 MLX5_OPCODE_NOP = 0x00,
364 MLX5_OPCODE_SEND_INVAL = 0x01,
365 MLX5_OPCODE_RDMA_WRITE = 0x08,
366 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
367 MLX5_OPCODE_SEND = 0x0a,
368 MLX5_OPCODE_SEND_IMM = 0x0b,
369 MLX5_OPCODE_LSO = 0x0e,
370 MLX5_OPCODE_RDMA_READ = 0x10,
371 MLX5_OPCODE_ATOMIC_CS = 0x11,
372 MLX5_OPCODE_ATOMIC_FA = 0x12,
373 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
374 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
375 MLX5_OPCODE_BIND_MW = 0x18,
376 MLX5_OPCODE_CONFIG_CMD = 0x1f,
377 MLX5_OPCODE_DUMP = 0x23,
378
379 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
380 MLX5_RECV_OPCODE_SEND = 0x01,
381 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
382 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
383
384 MLX5_CQE_OPCODE_ERROR = 0x1e,
385 MLX5_CQE_OPCODE_RESIZE = 0x16,
386
387 MLX5_OPCODE_SET_PSV = 0x20,
388 MLX5_OPCODE_GET_PSV = 0x21,
389 MLX5_OPCODE_CHECK_PSV = 0x22,
390 MLX5_OPCODE_RGET_PSV = 0x26,
391 MLX5_OPCODE_RCHECK_PSV = 0x27,
392
393 MLX5_OPCODE_UMR = 0x25,
394 MLX5_OPCODE_QOS_REMAP = 0x2a,
395
396 MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15),
397 };
398
399 enum {
400 MLX5_OPCODE_MOD_UMR_UMR = 0x0,
401 MLX5_OPCODE_MOD_UMR_TLS_TIS_STATIC_PARAMS = 0x1,
402 MLX5_OPCODE_MOD_UMR_TLS_TIR_STATIC_PARAMS = 0x2,
403 };
404
405 enum {
406 MLX5_OPCODE_MOD_PSV_PSV = 0x0,
407 MLX5_OPCODE_MOD_PSV_TLS_TIS_PROGRESS_PARAMS = 0x1,
408 MLX5_OPCODE_MOD_PSV_TLS_TIR_PROGRESS_PARAMS = 0x2,
409 };
410
411 struct mlx5_wqe_tls_static_params_seg {
412 u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
413 };
414
415 struct mlx5_wqe_tls_progress_params_seg {
416 u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
417 } __aligned(64);
418
419 enum {
420 MLX5_SET_PORT_RESET_QKEY = 0,
421 MLX5_SET_PORT_GUID0 = 16,
422 MLX5_SET_PORT_NODE_GUID = 17,
423 MLX5_SET_PORT_SYS_GUID = 18,
424 MLX5_SET_PORT_GID_TABLE = 19,
425 MLX5_SET_PORT_PKEY_TABLE = 20,
426 };
427
428 enum {
429 MLX5_MAX_PAGE_SHIFT = 31
430 };
431
432 enum {
433 MLX5_CAP_OFF_CMDIF_CSUM = 46,
434 };
435
436 enum {
437 /*
438 * Max wqe size for rdma read is 512 bytes, so this
439 * limits our max_sge_rd as the wqe needs to fit:
440 * - ctrl segment (16 bytes)
441 * - rdma segment (16 bytes)
442 * - scatter elements (16 bytes each)
443 */
444 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
445 };
446
447 struct mlx5_cmd_layout {
448 u8 type;
449 u8 rsvd0[3];
450 __be32 inlen;
451 __be64 in_ptr;
452 __be32 in[4];
453 __be32 out[4];
454 __be64 out_ptr;
455 __be32 outlen;
456 u8 token;
457 u8 sig;
458 u8 rsvd1;
459 u8 status_own;
460 };
461
462 enum mlx5_fatal_assert_bit_offsets {
463 MLX5_RFR_OFFSET = 31,
464 };
465
466 struct mlx5_health_buffer {
467 __be32 assert_var[5];
468 __be32 rsvd0[3];
469 __be32 assert_exit_ptr;
470 __be32 assert_callra;
471 __be32 rsvd1[2];
472 __be32 fw_ver;
473 __be32 hw_id;
474 __be32 rfr;
475 u8 irisc_index;
476 u8 synd;
477 __be16 ext_synd;
478 };
479
480 enum mlx5_initializing_bit_offsets {
481 MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
482 };
483
484 enum mlx5_cmd_addr_l_sz_offset {
485 MLX5_NIC_IFC_OFFSET = 8,
486 };
487
488 struct mlx5_init_seg {
489 __be32 fw_rev;
490 __be32 cmdif_rev_fw_sub;
491 __be32 rsvd0[2];
492 __be32 cmdq_addr_h;
493 __be32 cmdq_addr_l_sz;
494 __be32 cmd_dbell;
495 __be32 rsvd1[120];
496 __be32 initializing;
497 struct mlx5_health_buffer health;
498 __be32 rsvd2[880];
499 __be32 internal_timer_h;
500 __be32 internal_timer_l;
501 __be32 rsvd3[2];
502 __be32 health_counter;
503 __be32 rsvd4[1019];
504 __be64 ieee1588_clk;
505 __be32 ieee1588_clk_type;
506 __be32 clr_intx;
507 };
508
509 struct mlx5_eqe_comp {
510 __be32 reserved[6];
511 __be32 cqn;
512 };
513
514 struct mlx5_eqe_qp_srq {
515 __be32 reserved1[5];
516 u8 type;
517 u8 reserved2[3];
518 __be32 qp_srq_n;
519 };
520
521 struct mlx5_eqe_cq_err {
522 __be32 cqn;
523 u8 reserved1[7];
524 u8 syndrome;
525 };
526
527 struct mlx5_eqe_xrq_err {
528 __be32 reserved1[5];
529 __be32 type_xrqn;
530 __be32 reserved2;
531 };
532
533 struct mlx5_eqe_port_state {
534 u8 reserved0[8];
535 u8 port;
536 };
537
538 struct mlx5_eqe_gpio {
539 __be32 reserved0[2];
540 __be64 gpio_event;
541 };
542
543 struct mlx5_eqe_congestion {
544 u8 type;
545 u8 rsvd0;
546 u8 congestion_level;
547 };
548
549 struct mlx5_eqe_stall_vl {
550 u8 rsvd0[3];
551 u8 port_vl;
552 };
553
554 struct mlx5_eqe_cmd {
555 __be32 vector;
556 __be32 rsvd[6];
557 };
558
559 struct mlx5_eqe_page_req {
560 u8 rsvd0[2];
561 __be16 func_id;
562 __be32 num_pages;
563 __be32 rsvd1[5];
564 };
565
566 struct mlx5_eqe_vport_change {
567 u8 rsvd0[2];
568 __be16 vport_num;
569 __be32 rsvd1[6];
570 };
571
572
573 #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
574 #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
575
576 enum {
577 MLX5_MODULE_STATUS_PLUGGED_ENABLED = 0x1,
578 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
579 MLX5_MODULE_STATUS_ERROR = 0x3,
580 MLX5_MODULE_STATUS_NUM ,
581 };
582
583 enum {
584 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
585 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE = 0x1,
586 MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
587 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
588 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
589 MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE = 0x5,
590 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
591 MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7,
592 MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED = 0x8,
593 MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE = 0x9,
594 MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT = 0xa,
595 MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE = 0xb,
596 MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED = 0xc,
597 MLX5_MODULE_EVENT_ERROR_HIGH_POWER = 0xd,
598 MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT = 0xe,
599 MLX5_MODULE_EVENT_ERROR_NUM ,
600 };
601
602 struct mlx5_eqe_port_module_event {
603 u8 rsvd0;
604 u8 module;
605 u8 rsvd1;
606 u8 module_status;
607 u8 rsvd2[2];
608 u8 error_type;
609 };
610
611 struct mlx5_eqe_general_notification_event {
612 u32 rq_user_index_delay_drop;
613 u32 rsvd0[6];
614 };
615
616 struct mlx5_eqe_dct {
617 __be32 reserved[6];
618 __be32 dctn;
619 };
620
621 struct mlx5_eqe_temp_warning {
622 __be64 sensor_warning_msb;
623 __be64 sensor_warning_lsb;
624 } __packed;
625
626 union ev_data {
627 __be32 raw[7];
628 struct mlx5_eqe_cmd cmd;
629 struct mlx5_eqe_comp comp;
630 struct mlx5_eqe_qp_srq qp_srq;
631 struct mlx5_eqe_cq_err cq_err;
632 struct mlx5_eqe_port_state port;
633 struct mlx5_eqe_gpio gpio;
634 struct mlx5_eqe_congestion cong;
635 struct mlx5_eqe_stall_vl stall_vl;
636 struct mlx5_eqe_page_req req_pages;
637 struct mlx5_eqe_port_module_event port_module_event;
638 struct mlx5_eqe_vport_change vport_change;
639 struct mlx5_eqe_general_notification_event general_notifications;
640 struct mlx5_eqe_dct dct;
641 struct mlx5_eqe_temp_warning temp_warning;
642 struct mlx5_eqe_xrq_err xrq_err;
643 } __packed;
644
645 struct mlx5_eqe {
646 u8 rsvd0;
647 u8 type;
648 u8 rsvd1;
649 u8 sub_type;
650 __be32 rsvd2[7];
651 union ev_data data;
652 __be16 rsvd3;
653 u8 signature;
654 u8 owner;
655 } __packed;
656
657 struct mlx5_cmd_prot_block {
658 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
659 u8 rsvd0[48];
660 __be64 next;
661 __be32 block_num;
662 u8 rsvd1;
663 u8 token;
664 u8 ctrl_sig;
665 u8 sig;
666 };
667
668 #define MLX5_NUM_CMDS_IN_ADAPTER_PAGE \
669 (MLX5_ADAPTER_PAGE_SIZE / MLX5_CMD_MBOX_SIZE)
670 CTASSERT(MLX5_CMD_MBOX_SIZE >= sizeof(struct mlx5_cmd_prot_block));
671 CTASSERT(MLX5_CMD_MBOX_SIZE <= MLX5_ADAPTER_PAGE_SIZE);
672
673 enum {
674 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
675 };
676
677 struct mlx5_err_cqe {
678 u8 rsvd0[32];
679 __be32 srqn;
680 u8 rsvd1[18];
681 u8 vendor_err_synd;
682 u8 syndrome;
683 __be32 s_wqe_opcode_qpn;
684 __be16 wqe_counter;
685 u8 signature;
686 u8 op_own;
687 };
688
689 struct mlx5_cqe64 {
690 u8 tls_outer_l3_tunneled;
691 u8 rsvd0;
692 __be16 wqe_id;
693 u8 lro_tcppsh_abort_dupack;
694 u8 lro_min_ttl;
695 __be16 lro_tcp_win;
696 __be32 lro_ack_seq_num;
697 __be32 rss_hash_result;
698 u8 rss_hash_type;
699 u8 ml_path;
700 u8 rsvd20[2];
701 __be16 check_sum;
702 __be16 slid;
703 __be32 flags_rqpn;
704 u8 hds_ip_ext;
705 u8 l4_hdr_type_etc;
706 __be16 vlan_info;
707 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
708 __be32 imm_inval_pkey;
709 u8 rsvd40[4];
710 __be32 byte_cnt;
711 __be64 timestamp;
712 __be32 sop_drop_qpn;
713 __be16 wqe_counter;
714 u8 signature;
715 u8 op_own;
716 };
717
718 #define MLX5_CQE_TSTMP_PTP (1ULL << 63)
719
720 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
721 {
722 return (cqe->op_own >> 4);
723 }
724
725 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe)
726 {
727 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
728 }
729
730 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
731 {
732 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
733 }
734
735 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
736 {
737 return (cqe->l4_hdr_type_etc >> 4) & 0x7;
738 }
739
740 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe)
741 {
742 return be16_to_cpu(cqe->vlan_info) & 0xfff;
743 }
744
745 static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac)
746 {
747 memcpy(smac, &cqe->rss_hash_type , 4);
748 memcpy(smac + 4, &cqe->slid , 2);
749 }
750
751 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
752 {
753 return cqe->l4_hdr_type_etc & 0x1;
754 }
755
756 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
757 {
758 return cqe->tls_outer_l3_tunneled & 0x1;
759 }
760
761 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
762 {
763 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
764 }
765
766 enum {
767 CQE_L4_HDR_TYPE_NONE = 0x0,
768 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
769 CQE_L4_HDR_TYPE_UDP = 0x2,
770 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
771 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
772 };
773
774 enum {
775 /* source L3 hash types */
776 CQE_RSS_SRC_HTYPE_IP = 0x3 << 0,
777 CQE_RSS_SRC_HTYPE_IPV4 = 0x1 << 0,
778 CQE_RSS_SRC_HTYPE_IPV6 = 0x2 << 0,
779
780 /* destination L3 hash types */
781 CQE_RSS_DST_HTYPE_IP = 0x3 << 2,
782 CQE_RSS_DST_HTYPE_IPV4 = 0x1 << 2,
783 CQE_RSS_DST_HTYPE_IPV6 = 0x2 << 2,
784
785 /* source L4 hash types */
786 CQE_RSS_SRC_HTYPE_L4 = 0x3 << 4,
787 CQE_RSS_SRC_HTYPE_TCP = 0x1 << 4,
788 CQE_RSS_SRC_HTYPE_UDP = 0x2 << 4,
789 CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4,
790
791 /* destination L4 hash types */
792 CQE_RSS_DST_HTYPE_L4 = 0x3 << 6,
793 CQE_RSS_DST_HTYPE_TCP = 0x1 << 6,
794 CQE_RSS_DST_HTYPE_UDP = 0x2 << 6,
795 CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6,
796 };
797
798 enum {
799 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
800 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
801 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
802 };
803
804 enum {
805 CQE_L2_OK = 1 << 0,
806 CQE_L3_OK = 1 << 1,
807 CQE_L4_OK = 1 << 2,
808 };
809
810 enum {
811 CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
812 CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
813 CQE_TLS_OFFLOAD_RESYNC = 0x2,
814 CQE_TLS_OFFLOAD_ERROR = 0x3,
815 };
816
817 struct mlx5_sig_err_cqe {
818 u8 rsvd0[16];
819 __be32 expected_trans_sig;
820 __be32 actual_trans_sig;
821 __be32 expected_reftag;
822 __be32 actual_reftag;
823 __be16 syndrome;
824 u8 rsvd22[2];
825 __be32 mkey;
826 __be64 err_offset;
827 u8 rsvd30[8];
828 __be32 qpn;
829 u8 rsvd38[2];
830 u8 signature;
831 u8 op_own;
832 };
833
834 struct mlx5_wqe_srq_next_seg {
835 u8 rsvd0[2];
836 __be16 next_wqe_index;
837 u8 signature;
838 u8 rsvd1[11];
839 };
840
841 union mlx5_ext_cqe {
842 struct ib_grh grh;
843 u8 inl[64];
844 };
845
846 struct mlx5_cqe128 {
847 union mlx5_ext_cqe inl_grh;
848 struct mlx5_cqe64 cqe64;
849 };
850
851 enum {
852 MLX5_MKEY_STATUS_FREE = 1 << 6,
853 };
854
855 struct mlx5_mkey_seg {
856 /* This is a two bit field occupying bits 31-30.
857 * bit 31 is always 0,
858 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
859 */
860 u8 status;
861 u8 pcie_control;
862 u8 flags;
863 u8 version;
864 __be32 qpn_mkey7_0;
865 u8 rsvd1[4];
866 __be32 flags_pd;
867 __be64 start_addr;
868 __be64 len;
869 __be32 bsfs_octo_size;
870 u8 rsvd2[16];
871 __be32 xlt_oct_size;
872 u8 rsvd3[3];
873 u8 log2_page_size;
874 u8 rsvd4[4];
875 };
876
877 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
878
879 enum {
880 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
881 };
882
883 static inline int mlx5_host_is_le(void)
884 {
885 #if defined(__LITTLE_ENDIAN)
886 return 1;
887 #elif defined(__BIG_ENDIAN)
888 return 0;
889 #else
890 #error Host endianness not defined
891 #endif
892 }
893
894 #define MLX5_CMD_OP_MAX 0x939
895
896 enum {
897 VPORT_STATE_DOWN = 0x0,
898 VPORT_STATE_UP = 0x1,
899 VPORT_STATE_FOLLOW = 0x2,
900 };
901
902 enum {
903 MLX5_L3_PROT_TYPE_IPV4 = 0,
904 MLX5_L3_PROT_TYPE_IPV6 = 1,
905 };
906
907 enum {
908 MLX5_L4_PROT_TYPE_TCP = 0,
909 MLX5_L4_PROT_TYPE_UDP = 1,
910 };
911
912 enum {
913 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
914 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
915 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
916 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
917 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
918 };
919
920 enum {
921 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
922 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
923 MLX5_MATCH_INNER_HEADERS = 1 << 2,
924
925 };
926
927 enum {
928 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
929 MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2,
930 MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
931 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
932 MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5,
933 MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6,
934 MLX5_FLOW_TABLE_TYPE_NIC_RX_RDMA = 7,
935 };
936
937 enum {
938 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE = 0,
939 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1,
940 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE = 2
941 };
942
943 enum {
944 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP = 1 << 0,
945 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP = 1 << 1,
946 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2,
947 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
948 };
949
950 enum {
951 MLX5_UC_ADDR_CHANGE = (1 << 0),
952 MLX5_MC_ADDR_CHANGE = (1 << 1),
953 MLX5_VLAN_CHANGE = (1 << 2),
954 MLX5_PROMISC_CHANGE = (1 << 3),
955 MLX5_MTU_CHANGE = (1 << 4),
956 };
957
958 enum mlx5_list_type {
959 MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0,
960 MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1,
961 MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
962 };
963
964 enum {
965 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
966 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
967 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
968 };
969
970 /* MLX5 DEV CAPs */
971
972 /* TODO: EAT.ME */
973 enum mlx5_cap_mode {
974 HCA_CAP_OPMOD_GET_MAX = 0,
975 HCA_CAP_OPMOD_GET_CUR = 1,
976 };
977
978 enum mlx5_cap_type {
979 MLX5_CAP_GENERAL = 0,
980 MLX5_CAP_ETHERNET_OFFLOADS,
981 MLX5_CAP_ODP,
982 MLX5_CAP_ATOMIC,
983 MLX5_CAP_ROCE,
984 MLX5_CAP_IPOIB_OFFLOADS,
985 MLX5_CAP_EOIB_OFFLOADS,
986 MLX5_CAP_FLOW_TABLE,
987 MLX5_CAP_ESWITCH_FLOW_TABLE,
988 MLX5_CAP_ESWITCH,
989 MLX5_CAP_SNAPSHOT,
990 MLX5_CAP_VECTOR_CALC,
991 MLX5_CAP_QOS,
992 MLX5_CAP_DEBUG,
993 MLX5_CAP_NVME,
994 MLX5_CAP_DMC,
995 MLX5_CAP_DEC,
996 MLX5_CAP_TLS,
997 MLX5_CAP_DEV_EVENT = 0x14,
998 /* NUM OF CAP Types */
999 MLX5_CAP_NUM
1000 };
1001
1002 enum mlx5_qcam_reg_groups {
1003 MLX5_QCAM_REGS_FIRST_128 = 0x0,
1004 };
1005
1006 enum mlx5_qcam_feature_groups {
1007 MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1008 };
1009
1010 enum mlx5_pcam_reg_groups {
1011 MLX5_PCAM_REGS_5000_TO_507F = 0x0,
1012 };
1013
1014 enum mlx5_pcam_feature_groups {
1015 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1016 };
1017
1018 enum mlx5_mcam_reg_groups {
1019 MLX5_MCAM_REGS_FIRST_128 = 0x0,
1020 };
1021
1022 enum mlx5_mcam_feature_groups {
1023 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1024 };
1025
1026 /* GET Dev Caps macros */
1027 #define MLX5_CAP_GEN(mdev, cap) \
1028 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1029
1030 #define MLX5_CAP_GEN_64(mdev, cap) \
1031 MLX5_GET64(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1032
1033 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1034 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1035
1036 #define MLX5_CAP_ETH(mdev, cap) \
1037 MLX5_GET(per_protocol_networking_offload_caps,\
1038 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1039
1040 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1041 MLX5_GET(per_protocol_networking_offload_caps,\
1042 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1043
1044 #define MLX5_CAP_ROCE(mdev, cap) \
1045 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1046
1047 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1048 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1049
1050 #define MLX5_CAP_ATOMIC(mdev, cap) \
1051 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1052
1053 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1054 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1055
1056 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1057 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1058
1059 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1060 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1061
1062 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1063 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1064
1065 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1066 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1067
1068 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1069 MLX5_GET(flow_table_eswitch_cap, \
1070 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1071
1072 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1073 MLX5_GET(flow_table_eswitch_cap, \
1074 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1075
1076 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1077 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1078
1079 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1080 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1081
1082 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1083 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1084
1085 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1086 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1087
1088 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1089 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1090
1091 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1092 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1093
1094 #define MLX5_CAP_ESW(mdev, cap) \
1095 MLX5_GET(e_switch_cap, \
1096 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1097
1098 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1099 MLX5_GET(e_switch_cap, \
1100 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1101
1102 #define MLX5_CAP_ODP(mdev, cap)\
1103 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1104
1105 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1106 MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
1107
1108 #define MLX5_CAP_SNAPSHOT(mdev, cap) \
1109 MLX5_GET(snapshot_cap, \
1110 mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
1111
1112 #define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
1113 MLX5_GET(snapshot_cap, \
1114 mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
1115
1116 #define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
1117 MLX5_GET(per_protocol_networking_offload_caps,\
1118 mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
1119
1120 #define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
1121 MLX5_GET(per_protocol_networking_offload_caps,\
1122 mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
1123
1124 #define MLX5_CAP_DEBUG(mdev, cap) \
1125 MLX5_GET(debug_cap, \
1126 mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
1127
1128 #define MLX5_CAP_DEBUG_MAX(mdev, cap) \
1129 MLX5_GET(debug_cap, \
1130 mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
1131
1132 #define MLX5_CAP_QOS(mdev, cap) \
1133 MLX5_GET(qos_cap,\
1134 mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1135
1136 #define MLX5_CAP_QOS_MAX(mdev, cap) \
1137 MLX5_GET(qos_cap,\
1138 mdev->hca_caps_max[MLX5_CAP_QOS], cap)
1139
1140 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1141 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1142
1143 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1144 MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1145
1146 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1147 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1148
1149 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1150 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
1151
1152 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1153 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1154
1155 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1156 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1157
1158 #define MLX5_CAP_FPGA(mdev, cap) \
1159 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1160
1161 #define MLX5_CAP64_FPGA(mdev, cap) \
1162 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1163
1164 #define MLX5_CAP_TLS(mdev, cap) \
1165 MLX5_GET(tls_capabilities, (mdev)->hca_caps_cur[MLX5_CAP_TLS], cap)
1166
1167 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1168 MLX5_ADDR_OF(device_event_cap, (mdev)->hca_caps_cur[MLX5_CAP_DEV_EVENT], cap)
1169
1170 enum {
1171 MLX5_CMD_STAT_OK = 0x0,
1172 MLX5_CMD_STAT_INT_ERR = 0x1,
1173 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1174 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1175 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1176 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1177 MLX5_CMD_STAT_RES_BUSY = 0x6,
1178 MLX5_CMD_STAT_LIM_ERR = 0x8,
1179 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1180 MLX5_CMD_STAT_IX_ERR = 0xa,
1181 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1182 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1183 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1184 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1185 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1186 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1187 };
1188
1189 enum {
1190 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1191 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1192 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1193 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1194 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1195 MLX5_ETHERNET_DISCARD_COUNTERS_GROUP = 0x6,
1196 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1197 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1198 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1199 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1200 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1201 };
1202
1203 enum {
1204 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1205 MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1,
1206 MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
1207 };
1208
1209 enum {
1210 MLX5_CAP_PORT_TYPE_IB = 0x0,
1211 MLX5_CAP_PORT_TYPE_ETH = 0x1,
1212 };
1213
1214 enum {
1215 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2 = 0x0,
1216 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1,
1217 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
1218 };
1219
1220 enum mlx5_inline_modes {
1221 MLX5_INLINE_MODE_NONE,
1222 MLX5_INLINE_MODE_L2,
1223 MLX5_INLINE_MODE_IP,
1224 MLX5_INLINE_MODE_TCP_UDP,
1225 };
1226
1227 enum {
1228 MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
1229 };
1230
1231 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1232 {
1233 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1234 return 0;
1235 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1236 }
1237
1238 struct mlx5_ifc_mcia_reg_bits {
1239 u8 l[0x1];
1240 u8 reserved_0[0x7];
1241 u8 module[0x8];
1242 u8 reserved_1[0x8];
1243 u8 status[0x8];
1244
1245 u8 i2c_device_address[0x8];
1246 u8 page_number[0x8];
1247 u8 device_address[0x10];
1248
1249 u8 reserved_2[0x10];
1250 u8 size[0x10];
1251
1252 u8 reserved_3[0x20];
1253
1254 u8 dword_0[0x20];
1255 u8 dword_1[0x20];
1256 u8 dword_2[0x20];
1257 u8 dword_3[0x20];
1258 u8 dword_4[0x20];
1259 u8 dword_5[0x20];
1260 u8 dword_6[0x20];
1261 u8 dword_7[0x20];
1262 u8 dword_8[0x20];
1263 u8 dword_9[0x20];
1264 u8 dword_10[0x20];
1265 u8 dword_11[0x20];
1266 };
1267
1268 #define MLX5_CMD_OP_QUERY_EEPROM 0x93c
1269
1270 struct mlx5_mini_cqe8 {
1271 union {
1272 __be32 rx_hash_result;
1273 __be16 checksum;
1274 __be16 rsvd;
1275 struct {
1276 __be16 wqe_counter;
1277 u8 s_wqe_opcode;
1278 u8 reserved;
1279 } s_wqe_info;
1280 };
1281 __be32 byte_cnt;
1282 };
1283
1284 enum {
1285 MLX5_NO_INLINE_DATA,
1286 MLX5_INLINE_DATA32_SEG,
1287 MLX5_INLINE_DATA64_SEG,
1288 MLX5_COMPRESSED,
1289 };
1290
1291 enum mlx5_exp_cqe_zip_recv_type {
1292 MLX5_CQE_FORMAT_HASH,
1293 MLX5_CQE_FORMAT_CSUM,
1294 };
1295
1296 #define MLX5E_CQE_FORMAT_MASK 0xc
1297 static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
1298 {
1299 return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
1300 }
1301
1302 enum {
1303 MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
1304 MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
1305 };
1306
1307 enum {
1308 MLX5_FRL_LEVEL3 = 0x8,
1309 MLX5_FRL_LEVEL6 = 0x40,
1310 };
1311
1312 /* 8 regular priorities + 1 for multicast */
1313 #define MLX5_NUM_BYPASS_FTS 9
1314
1315 #endif /* MLX5_DEVICE_H */
Cache object: 9afac04d6d412ffef0a5af1847c690c9
|