1 /*-
2 * Copyright (c) 2013-2021, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30
31 #include <linux/interrupt.h>
32 #include <linux/module.h>
33 #include <dev/mlx5/port.h>
34 #include <dev/mlx5/mlx5_ifc.h>
35 #include <dev/mlx5/mlx5_fpga/core.h>
36 #include <dev/mlx5/mlx5_core/mlx5_core.h>
37 #include <dev/mlx5/mlx5_core/eswitch.h>
38
39 #ifdef RSS
40 #include <net/rss_config.h>
41 #include <netinet/in_rss.h>
42 #endif
43
44 enum {
45 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
46 MLX5_EQE_OWNER_INIT_VAL = 0x1,
47 };
48
49 enum {
50 MLX5_NUM_SPARE_EQE = 0x80,
51 MLX5_NUM_ASYNC_EQE = 0x100,
52 MLX5_NUM_CMD_EQE = 32,
53 };
54
55 enum {
56 MLX5_EQ_DOORBEL_OFFSET = 0x40,
57 };
58
59 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
60 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
61 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
62 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
63 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
64 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
65 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
66 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
67 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
68 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
69 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
70 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT) | \
71 (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE))
72
73 struct map_eq_in {
74 u64 mask;
75 u32 reserved;
76 u32 unmap_eqn;
77 };
78
79 struct cre_des_eq {
80 u8 reserved[15];
81 u8 eqn;
82 };
83
84 /*Function prototype*/
85 static void mlx5_port_module_event(struct mlx5_core_dev *dev,
86 struct mlx5_eqe *eqe);
87 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
88 struct mlx5_eqe *eqe);
89
90 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
91 {
92 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
93 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
94
95 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
96 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
97
98 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
99 }
100
101 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
102 {
103 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
104 }
105
106 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
107 {
108 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
109
110 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
111 }
112
113 static const char *eqe_type_str(u8 type)
114 {
115 switch (type) {
116 case MLX5_EVENT_TYPE_COMP:
117 return "MLX5_EVENT_TYPE_COMP";
118 case MLX5_EVENT_TYPE_PATH_MIG:
119 return "MLX5_EVENT_TYPE_PATH_MIG";
120 case MLX5_EVENT_TYPE_COMM_EST:
121 return "MLX5_EVENT_TYPE_COMM_EST";
122 case MLX5_EVENT_TYPE_SQ_DRAINED:
123 return "MLX5_EVENT_TYPE_SQ_DRAINED";
124 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
125 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
126 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
127 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
128 case MLX5_EVENT_TYPE_CQ_ERROR:
129 return "MLX5_EVENT_TYPE_CQ_ERROR";
130 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
131 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
132 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
133 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
134 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
135 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
136 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
137 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
138 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
139 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
140 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
141 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
142 case MLX5_EVENT_TYPE_PORT_CHANGE:
143 return "MLX5_EVENT_TYPE_PORT_CHANGE";
144 case MLX5_EVENT_TYPE_GPIO_EVENT:
145 return "MLX5_EVENT_TYPE_GPIO_EVENT";
146 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
147 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
148 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
149 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
150 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
151 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
152 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
153 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
154 case MLX5_EVENT_TYPE_STALL_EVENT:
155 return "MLX5_EVENT_TYPE_STALL_EVENT";
156 case MLX5_EVENT_TYPE_CMD:
157 return "MLX5_EVENT_TYPE_CMD";
158 case MLX5_EVENT_TYPE_PAGE_REQUEST:
159 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
161 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
162 case MLX5_EVENT_TYPE_FPGA_ERROR:
163 return "MLX5_EVENT_TYPE_FPGA_ERROR";
164 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
165 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
166 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
167 return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
168 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
169 return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT";
170 default:
171 return "Unrecognized event";
172 }
173 }
174
175 static enum mlx5_dev_event port_subtype_event(u8 subtype)
176 {
177 switch (subtype) {
178 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
179 return MLX5_DEV_EVENT_PORT_DOWN;
180 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
181 return MLX5_DEV_EVENT_PORT_UP;
182 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
183 return MLX5_DEV_EVENT_PORT_INITIALIZED;
184 case MLX5_PORT_CHANGE_SUBTYPE_LID:
185 return MLX5_DEV_EVENT_LID_CHANGE;
186 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
187 return MLX5_DEV_EVENT_PKEY_CHANGE;
188 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
189 return MLX5_DEV_EVENT_GUID_CHANGE;
190 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
191 return MLX5_DEV_EVENT_CLIENT_REREG;
192 }
193 return -1;
194 }
195
196 static enum mlx5_dev_event dcbx_subevent(u8 subtype)
197 {
198 switch (subtype) {
199 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
200 return MLX5_DEV_EVENT_ERROR_STATE_DCBX;
201 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
202 return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE;
203 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
204 return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE;
205 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
206 return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE;
207 }
208 return -1;
209 }
210
211 static void eq_update_ci(struct mlx5_eq *eq, int arm)
212 {
213 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
214 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
215 __raw_writel((__force u32) cpu_to_be32(val), addr);
216 /* We still want ordering, just not swabbing, so add a barrier */
217 mb();
218 }
219
220 static void
221 mlx5_temp_warning_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
222 {
223
224 mlx5_core_warn(dev,
225 "High temperature on sensors with bit set %#jx %#jx\n",
226 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb),
227 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb));
228 }
229
230 static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
231 {
232 struct mlx5_eqe *eqe;
233 int eqes_found = 0;
234 int set_ci = 0;
235 u32 cqn;
236 u32 rsn;
237 u8 port;
238
239 while ((eqe = next_eqe_sw(eq))) {
240 /*
241 * Make sure we read EQ entry contents after we've
242 * checked the ownership bit.
243 */
244 atomic_thread_fence_acq();
245
246 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
247 eq->eqn, eqe_type_str(eqe->type));
248
249 if (dev->priv.eq_table.cb != NULL &&
250 dev->priv.eq_table.cb(dev, eqe->type, &eqe->data)) {
251 /* FALLTHROUGH */
252 } else switch (eqe->type) {
253 case MLX5_EVENT_TYPE_COMP:
254 mlx5_cq_completion(dev, eqe);
255 break;
256
257 case MLX5_EVENT_TYPE_PATH_MIG:
258 case MLX5_EVENT_TYPE_COMM_EST:
259 case MLX5_EVENT_TYPE_SQ_DRAINED:
260 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
261 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
262 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
263 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
264 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
265 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
266 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
267 eqe_type_str(eqe->type), eqe->type, rsn);
268 mlx5_rsc_event(dev, rsn, eqe->type);
269 break;
270
271 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
272 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
273 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
274 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
275 eqe_type_str(eqe->type), eqe->type, rsn);
276 mlx5_srq_event(dev, rsn, eqe->type);
277 break;
278
279 case MLX5_EVENT_TYPE_CMD:
280 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
281 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector),
282 MLX5_CMD_MODE_EVENTS);
283 }
284 break;
285
286 case MLX5_EVENT_TYPE_PORT_CHANGE:
287 port = (eqe->data.port.port >> 4) & 0xf;
288 switch (eqe->sub_type) {
289 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
290 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
291 case MLX5_PORT_CHANGE_SUBTYPE_LID:
292 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
293 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
294 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
295 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
296 if (dev->event)
297 dev->event(dev, port_subtype_event(eqe->sub_type),
298 (unsigned long)port);
299 break;
300 default:
301 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
302 port, eqe->sub_type);
303 }
304 break;
305
306 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
307 port = (eqe->data.port.port >> 4) & 0xf;
308 switch (eqe->sub_type) {
309 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
310 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
311 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
312 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
313 if (dev->event)
314 dev->event(dev,
315 dcbx_subevent(eqe->sub_type),
316 0);
317 break;
318 default:
319 mlx5_core_warn(dev,
320 "dcbx event with unrecognized subtype: port %d, sub_type %d\n",
321 port, eqe->sub_type);
322 }
323 break;
324
325 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
326 mlx5_port_general_notification_event(dev, eqe);
327 break;
328
329 case MLX5_EVENT_TYPE_CQ_ERROR:
330 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
331 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
332 cqn, eqe->data.cq_err.syndrome);
333 mlx5_cq_event(dev, cqn, eqe->type);
334 break;
335
336 case MLX5_EVENT_TYPE_PAGE_REQUEST:
337 {
338 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
339 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
340
341 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
342 func_id, npages);
343 mlx5_core_req_pages_handler(dev, func_id, npages);
344 }
345 break;
346
347 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
348 mlx5_port_module_event(dev, eqe);
349 break;
350
351 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
352 {
353 struct mlx5_eqe_vport_change *vc_eqe =
354 &eqe->data.vport_change;
355 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
356
357 if (dev->event)
358 dev->event(dev,
359 MLX5_DEV_EVENT_VPORT_CHANGE,
360 (unsigned long)vport_num);
361 }
362 if (dev->priv.eswitch != NULL)
363 mlx5_eswitch_vport_event(dev->priv.eswitch,
364 eqe);
365 break;
366
367 case MLX5_EVENT_TYPE_FPGA_ERROR:
368 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
369 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
370 break;
371 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
372 mlx5_temp_warning_event(dev, eqe);
373 break;
374
375 default:
376 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
377 eqe->type, eq->eqn);
378 break;
379 }
380
381 ++eq->cons_index;
382 eqes_found = 1;
383 ++set_ci;
384
385 /* The HCA will think the queue has overflowed if we
386 * don't tell it we've been processing events. We
387 * create our EQs with MLX5_NUM_SPARE_EQE extra
388 * entries, so we must update our consumer index at
389 * least that often.
390 */
391 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
392 eq_update_ci(eq, 0);
393 set_ci = 0;
394 }
395 }
396
397 eq_update_ci(eq, 1);
398
399 return eqes_found;
400 }
401
402 static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
403 {
404 struct mlx5_eq *eq = eq_ptr;
405 struct mlx5_core_dev *dev = eq->dev;
406
407 /* check if IRQs are not disabled */
408 if (likely(dev->priv.disable_irqs == 0))
409 mlx5_eq_int(dev, eq);
410
411 /* MSI-X vectors always belong to us */
412 return IRQ_HANDLED;
413 }
414
415 static void init_eq_buf(struct mlx5_eq *eq)
416 {
417 struct mlx5_eqe *eqe;
418 int i;
419
420 for (i = 0; i < eq->nent; i++) {
421 eqe = get_eqe(eq, i);
422 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
423 }
424 }
425
426 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
427 int nent, u64 mask)
428 {
429 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
430 struct mlx5_priv *priv = &dev->priv;
431 __be64 *pas;
432 void *eqc;
433 int inlen;
434 u32 *in;
435 int err;
436
437 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
438 eq->cons_index = 0;
439 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
440 &eq->buf);
441 if (err)
442 return err;
443
444 init_eq_buf(eq);
445
446 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
447 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
448 in = mlx5_vzalloc(inlen);
449 if (!in) {
450 err = -ENOMEM;
451 goto err_buf;
452 }
453
454 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
455 mlx5_fill_page_array(&eq->buf, pas);
456
457 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
458 MLX5_SET64(create_eq_in, in, event_bitmask, mask);
459
460 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
461 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
462 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
463 MLX5_SET(eqc, eqc, intr, vecidx);
464 MLX5_SET(eqc, eqc, log_page_size,
465 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
466
467 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
468 if (err)
469 goto err_in;
470
471 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
472 eq->irqn = vecidx;
473 eq->dev = dev;
474 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
475 err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
476 "mlx5_core", eq);
477 if (err)
478 goto err_eq;
479 #ifdef RSS
480 if (vecidx >= MLX5_EQ_VEC_COMP_BASE) {
481 u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE;
482 err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector,
483 rss_getcpu(bucket % rss_getnumbuckets()));
484 if (err)
485 goto err_irq;
486 }
487 #else
488 if (0)
489 goto err_irq;
490 #endif
491
492
493 /* EQs are created in ARMED state
494 */
495 eq_update_ci(eq, 1);
496
497 kvfree(in);
498 return 0;
499
500 err_irq:
501 free_irq(priv->msix_arr[vecidx].vector, eq);
502
503 err_eq:
504 mlx5_cmd_destroy_eq(dev, eq->eqn);
505
506 err_in:
507 kvfree(in);
508
509 err_buf:
510 mlx5_buf_free(dev, &eq->buf);
511 return err;
512 }
513 EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
514
515 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
516 {
517 int err;
518
519 free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
520 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
521 if (err)
522 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
523 eq->eqn);
524 mlx5_buf_free(dev, &eq->buf);
525
526 return err;
527 }
528 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
529
530 int mlx5_eq_init(struct mlx5_core_dev *dev)
531 {
532 int err;
533
534 spin_lock_init(&dev->priv.eq_table.lock);
535
536 err = 0;
537
538 return err;
539 }
540
541
542 void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
543 {
544 }
545
546 int mlx5_start_eqs(struct mlx5_core_dev *dev)
547 {
548 struct mlx5_eq_table *table = &dev->priv.eq_table;
549 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
550 int err;
551
552 if (MLX5_CAP_GEN(dev, port_module_event))
553 async_event_mask |= (1ull <<
554 MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT);
555
556 if (MLX5_CAP_GEN(dev, nic_vport_change_event))
557 async_event_mask |= (1ull <<
558 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
559
560 if (MLX5_CAP_GEN(dev, dcbx))
561 async_event_mask |= (1ull <<
562 MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT);
563
564 if (MLX5_CAP_GEN(dev, fpga))
565 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
566 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
567
568 if (MLX5_CAP_GEN(dev, temp_warn_event))
569 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
570
571 if (MLX5_CAP_GEN(dev, general_notification_event)) {
572 async_event_mask |= (1ull <<
573 MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT);
574 }
575
576 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
577 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD);
578 if (err) {
579 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
580 return err;
581 }
582
583 mlx5_cmd_use_events(dev);
584
585 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
586 MLX5_NUM_ASYNC_EQE, async_event_mask);
587 if (err) {
588 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
589 goto err1;
590 }
591
592 err = mlx5_create_map_eq(dev, &table->pages_eq,
593 MLX5_EQ_VEC_PAGES,
594 /* TODO: sriov max_vf + */ 1,
595 1 << MLX5_EVENT_TYPE_PAGE_REQUEST);
596 if (err) {
597 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
598 goto err2;
599 }
600
601 return err;
602
603 err2:
604 mlx5_destroy_unmap_eq(dev, &table->async_eq);
605
606 err1:
607 mlx5_cmd_use_polling(dev);
608 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
609 return err;
610 }
611
612 int mlx5_stop_eqs(struct mlx5_core_dev *dev)
613 {
614 struct mlx5_eq_table *table = &dev->priv.eq_table;
615 int err;
616
617 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
618 if (err)
619 return err;
620
621 mlx5_destroy_unmap_eq(dev, &table->async_eq);
622 mlx5_cmd_use_polling(dev);
623
624 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
625 if (err)
626 mlx5_cmd_use_events(dev);
627
628 return err;
629 }
630
631 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
632 u32 *out, int outlen)
633 {
634 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
635
636 memset(out, 0, outlen);
637 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
638 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
639
640 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
641 }
642 EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
643
644 static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)
645 {
646 switch (error_type) {
647 case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
648 return "Power budget exceeded";
649 case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE:
650 return "Long Range for non MLNX cable";
651 case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
652 return "Bus stuck(I2C or data shorted)";
653 case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
654 return "No EEPROM/retry timeout";
655 case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
656 return "Enforce part number list";
657 case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE:
658 return "Unknown identifier";
659 case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
660 return "High Temperature";
661 case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED:
662 return "Bad or shorted cable/module";
663 case MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED:
664 return "PMD type is not enabled";
665 case MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE:
666 return "Laster_TEC_failure";
667 case MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT:
668 return "High_current";
669 case MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE:
670 return "High_voltage";
671 case MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED:
672 return "pcie_system_power_slot_Exceeded";
673 case MLX5_MODULE_EVENT_ERROR_HIGH_POWER:
674 return "High_power";
675 case MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT:
676 return "Module_state_machine_fault";
677 default:
678 return "Unknown error type";
679 }
680 }
681
682 unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num)
683 {
684 if (module_num < 0 || module_num >= MLX5_MAX_PORTS)
685 return 0; /* undefined */
686 return dev->module_status[module_num];
687 }
688
689 static void mlx5_port_module_event(struct mlx5_core_dev *dev,
690 struct mlx5_eqe *eqe)
691 {
692 unsigned int module_num;
693 unsigned int module_status;
694 unsigned int error_type;
695 struct mlx5_eqe_port_module_event *module_event_eqe;
696
697 module_event_eqe = &eqe->data.port_module_event;
698
699 module_num = (unsigned int)module_event_eqe->module;
700 module_status = (unsigned int)module_event_eqe->module_status &
701 PORT_MODULE_EVENT_MODULE_STATUS_MASK;
702 error_type = (unsigned int)module_event_eqe->error_type &
703 PORT_MODULE_EVENT_ERROR_TYPE_MASK;
704
705 if (module_status < MLX5_MODULE_STATUS_NUM)
706 dev->priv.pme_stats.status_counters[module_status]++;
707 switch (module_status) {
708 case MLX5_MODULE_STATUS_PLUGGED_ENABLED:
709 mlx5_core_info(dev,
710 "Module %u, status: plugged and enabled\n",
711 module_num);
712 break;
713
714 case MLX5_MODULE_STATUS_UNPLUGGED:
715 mlx5_core_info(dev,
716 "Module %u, status: unplugged\n", module_num);
717 break;
718
719 case MLX5_MODULE_STATUS_ERROR:
720 mlx5_core_err(dev,
721 "Module %u, status: error, %s (%d)\n",
722 module_num,
723 mlx5_port_module_event_error_type_to_string(error_type),
724 error_type);
725 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
726 dev->priv.pme_stats.error_counters[error_type]++;
727 break;
728
729 default:
730 mlx5_core_info(dev,
731 "Module %u, unknown status %d\n", module_num, module_status);
732 }
733 /* store module status */
734 if (module_num < MLX5_MAX_PORTS)
735 dev->module_status[module_num] = module_status;
736 }
737
738 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
739 struct mlx5_eqe *eqe)
740 {
741 u8 port = (eqe->data.port.port >> 4) & 0xf;
742
743 switch (eqe->sub_type) {
744 case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT:
745 break;
746 case MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT:
747 mlx5_trigger_health_watchdog(dev);
748 break;
749 default:
750 mlx5_core_warn(dev,
751 "general event with unrecognized subtype: port %d, sub_type %d\n",
752 port, eqe->sub_type);
753 break;
754 }
755 }
756
757 void
758 mlx5_disable_interrupts(struct mlx5_core_dev *dev)
759 {
760 int nvec = dev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
761 int x;
762
763 for (x = 0; x != nvec; x++)
764 disable_irq(dev->priv.msix_arr[x].vector);
765 }
766
767 void
768 mlx5_poll_interrupts(struct mlx5_core_dev *dev)
769 {
770 struct mlx5_eq *eq;
771
772 if (unlikely(dev->priv.disable_irqs != 0))
773 return;
774
775 mlx5_eq_int(dev, &dev->priv.eq_table.cmd_eq);
776 mlx5_eq_int(dev, &dev->priv.eq_table.async_eq);
777 mlx5_eq_int(dev, &dev->priv.eq_table.pages_eq);
778
779 list_for_each_entry(eq, &dev->priv.eq_table.comp_eqs_list, list)
780 mlx5_eq_int(dev, eq);
781 }
Cache object: 2c89142388ffa4ff681a7e2d8cdace6e
|