1 /*-
2 * Copyright (c) 2013-2020, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30
31 #include <dev/mlx5/driver.h>
32 #include <linux/module.h>
33 #include <dev/mlx5/mlx5_core/mlx5_core.h>
34
35 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
36 int outlen)
37 {
38 u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
39 int err;
40
41 memset(in, 0, sizeof(in));
42
43 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
44
45 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
46 return err;
47 }
48
49 int mlx5_query_board_id(struct mlx5_core_dev *dev)
50 {
51 u32 *out;
52 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
53 int err;
54
55 out = kzalloc(outlen, GFP_KERNEL);
56
57 err = mlx5_cmd_query_adapter(dev, out, outlen);
58 if (err)
59 goto out_out;
60
61 memcpy(dev->board_id,
62 MLX5_ADDR_OF(query_adapter_out, out,
63 query_adapter_struct.vsd_contd_psid),
64 MLX5_FLD_SZ_BYTES(query_adapter_out,
65 query_adapter_struct.vsd_contd_psid));
66
67 out_out:
68 kfree(out);
69
70 return err;
71 }
72
73 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
74 {
75 u32 *out;
76 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
77 int err;
78
79 out = kzalloc(outlen, GFP_KERNEL);
80
81 err = mlx5_cmd_query_adapter(mdev, out, outlen);
82 if (err)
83 goto out_out;
84
85 *vendor_id = MLX5_GET(query_adapter_out, out,
86 query_adapter_struct.ieee_vendor_id);
87
88 out_out:
89 kfree(out);
90
91 return err;
92 }
93 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
94
95 static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
96 {
97 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)];
98 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)];
99 int err;
100
101 memset(in, 0, sizeof(in));
102 memset(out, 0, sizeof(out));
103
104 MLX5_SET(query_special_contexts_in, in, opcode,
105 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
106 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
107 if (err)
108 return err;
109
110 dev->special_contexts.resd_lkey = MLX5_GET(query_special_contexts_out,
111 out, resd_lkey);
112
113 return err;
114 }
115
116 static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
117 {
118 return mlx5_query_qcam_reg(dev, dev->caps.qcam,
119 MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
120 MLX5_QCAM_REGS_FIRST_128);
121 }
122
123 static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
124 {
125 return mlx5_query_pcam_reg(dev, dev->caps.pcam,
126 MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
127 MLX5_PCAM_REGS_5000_TO_507F);
128 }
129
130 static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
131 {
132 return mlx5_query_mcam_reg(dev, dev->caps.mcam,
133 MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
134 MLX5_MCAM_REGS_FIRST_128);
135 }
136
137 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
138 {
139 int err;
140
141 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
142 if (err)
143 return err;
144
145 if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
146 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
147 if (err)
148 return err;
149 }
150
151 if (MLX5_CAP_GEN(dev, pg)) {
152 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
153 if (err)
154 return err;
155 }
156
157 if (MLX5_CAP_GEN(dev, atomic)) {
158 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
159 if (err)
160 return err;
161 }
162
163 if (MLX5_CAP_GEN(dev, roce)) {
164 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
165 if (err)
166 return err;
167 }
168
169 if ((MLX5_CAP_GEN(dev, port_type) ==
170 MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET &&
171 MLX5_CAP_GEN(dev, nic_flow_table)) ||
172 (MLX5_CAP_GEN(dev, port_type) == MLX5_CMD_HCA_CAP_PORT_TYPE_IB &&
173 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) {
174 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
175 if (err)
176 return err;
177 }
178
179 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
180 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
181 if (err)
182 return err;
183 }
184
185 if (MLX5_CAP_GEN(dev, vport_group_manager)) {
186 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
187 if (err)
188 return err;
189 }
190
191 if (MLX5_CAP_GEN(dev, snapshot)) {
192 err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT);
193 if (err)
194 return err;
195 }
196
197 if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
198 err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS);
199 if (err)
200 return err;
201 }
202
203 if (MLX5_CAP_GEN(dev, debug)) {
204 err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
205 if (err)
206 return err;
207 }
208
209 if (MLX5_CAP_GEN(dev, qos)) {
210 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
211 if (err)
212 return err;
213 }
214
215 if (MLX5_CAP_GEN(dev, qcam_reg)) {
216 err = mlx5_get_qcam_reg(dev);
217 if (err)
218 return err;
219 }
220
221 if (MLX5_CAP_GEN(dev, mcam_reg)) {
222 err = mlx5_get_mcam_reg(dev);
223 if (err)
224 return err;
225 }
226
227 if (MLX5_CAP_GEN(dev, pcam_reg)) {
228 err = mlx5_get_pcam_reg(dev);
229 if (err)
230 return err;
231 }
232
233 if (MLX5_CAP_GEN(dev, tls_tx)) {
234 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
235 if (err)
236 return err;
237 }
238
239 if (MLX5_CAP_GEN(dev, event_cap)) {
240 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
241 if (err)
242 return err;
243 }
244
245 err = mlx5_core_query_special_contexts(dev);
246 if (err)
247 return err;
248
249 return 0;
250 }
251
252 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
253 {
254 u32 in[MLX5_ST_SZ_DW(init_hca_in)];
255 u32 out[MLX5_ST_SZ_DW(init_hca_out)];
256
257 memset(in, 0, sizeof(in));
258
259 MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
260
261 memset(out, 0, sizeof(out));
262 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
263 }
264
265 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
266 {
267 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
268 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
269
270 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
271 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
272 }
273
274 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
275 {
276 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
277 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
278 int force_state;
279 int ret;
280
281 if (!MLX5_CAP_GEN(dev, force_teardown)) {
282 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
283 return -EOPNOTSUPP;
284 }
285
286 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
287 MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
288
289 ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
290 if (ret)
291 return ret;
292
293 force_state = MLX5_GET(teardown_hca_out, out, state);
294 if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
295 mlx5_core_err(dev, "teardown with force mode failed\n");
296 return -EIO;
297 }
298
299 return 0;
300 }
301
302 #define MLX5_FAST_TEARDOWN_WAIT_MS 3000
303 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
304 {
305 int end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
306 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
307 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
308 int state;
309 int ret;
310
311 if (!MLX5_CAP_GEN(dev, fast_teardown)) {
312 mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
313 return -EOPNOTSUPP;
314 }
315
316 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
317 MLX5_SET(teardown_hca_in, in, profile,
318 MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
319
320 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
321 if (ret)
322 return ret;
323
324 state = MLX5_GET(teardown_hca_out, out, state);
325 if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
326 mlx5_core_warn(dev, "teardown with fast mode failed\n");
327 return -EIO;
328 }
329
330 mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
331
332 /* Loop until device state turns to disable */
333 end = jiffies + msecs_to_jiffies(delay_ms);
334 do {
335 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
336 break;
337
338 pause("W", 1);
339 } while (!time_after(jiffies, end));
340
341 if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
342 mlx5_core_err(dev, "NIC IFC still %d after %ums.\n",
343 mlx5_get_nic_state(dev), delay_ms);
344 return -EIO;
345 }
346 return 0;
347 }
348
349 int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
350 u64 addr)
351 {
352 u32 in[MLX5_ST_SZ_DW(set_dc_cnak_trace_in)] = {0};
353 u32 out[MLX5_ST_SZ_DW(set_dc_cnak_trace_out)] = {0};
354 __be64 be_addr;
355 void *pas;
356
357 MLX5_SET(set_dc_cnak_trace_in, in, opcode, MLX5_CMD_OP_SET_DC_CNAK_TRACE);
358 MLX5_SET(set_dc_cnak_trace_in, in, enable, enable);
359 pas = MLX5_ADDR_OF(set_dc_cnak_trace_in, in, pas);
360 be_addr = cpu_to_be64(addr);
361 memcpy(MLX5_ADDR_OF(cmd_pas, pas, pa_h), &be_addr, sizeof(be_addr));
362
363 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
364 }
365
366 enum mlxsw_reg_mcc_instruction {
367 MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
368 MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
369 MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
370 MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
371 MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
372 MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
373 };
374
375 static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
376 enum mlxsw_reg_mcc_instruction instr,
377 u16 component_index, u32 update_handle,
378 u32 component_size)
379 {
380 u32 out[MLX5_ST_SZ_DW(mcc_reg)];
381 u32 in[MLX5_ST_SZ_DW(mcc_reg)];
382
383 memset(in, 0, sizeof(in));
384
385 MLX5_SET(mcc_reg, in, instruction, instr);
386 MLX5_SET(mcc_reg, in, component_index, component_index);
387 MLX5_SET(mcc_reg, in, update_handle, update_handle);
388 MLX5_SET(mcc_reg, in, component_size, component_size);
389
390 return mlx5_core_access_reg(dev, in, sizeof(in), out,
391 sizeof(out), MLX5_REG_MCC, 0, 1);
392 }
393
394 static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
395 u32 *update_handle, u8 *error_code,
396 u8 *control_state)
397 {
398 u32 out[MLX5_ST_SZ_DW(mcc_reg)];
399 u32 in[MLX5_ST_SZ_DW(mcc_reg)];
400 int err;
401
402 memset(in, 0, sizeof(in));
403 memset(out, 0, sizeof(out));
404 MLX5_SET(mcc_reg, in, update_handle, *update_handle);
405
406 err = mlx5_core_access_reg(dev, in, sizeof(in), out,
407 sizeof(out), MLX5_REG_MCC, 0, 0);
408 if (err)
409 goto out;
410
411 *update_handle = MLX5_GET(mcc_reg, out, update_handle);
412 *error_code = MLX5_GET(mcc_reg, out, error_code);
413 *control_state = MLX5_GET(mcc_reg, out, control_state);
414
415 out:
416 return err;
417 }
418
419 static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
420 u32 update_handle,
421 u32 offset, u16 size,
422 u8 *data)
423 {
424 int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
425 u32 out[MLX5_ST_SZ_DW(mcda_reg)];
426 int i, j, dw_size = size >> 2;
427 __be32 data_element;
428 u32 *in;
429
430 in = kzalloc(in_size, GFP_KERNEL);
431 if (!in)
432 return -ENOMEM;
433
434 MLX5_SET(mcda_reg, in, update_handle, update_handle);
435 MLX5_SET(mcda_reg, in, offset, offset);
436 MLX5_SET(mcda_reg, in, size, size);
437
438 for (i = 0; i < dw_size; i++) {
439 j = i * 4;
440 data_element = htonl(*(u32 *)&data[j]);
441 memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
442 }
443
444 err = mlx5_core_access_reg(dev, in, in_size, out,
445 sizeof(out), MLX5_REG_MCDA, 0, 1);
446 kfree(in);
447 return err;
448 }
449
450 static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
451 u16 component_index,
452 u32 *max_component_size,
453 u8 *log_mcda_word_size,
454 u16 *mcda_max_write_size)
455 {
456 u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
457 int offset = MLX5_ST_SZ_DW(mcqi_reg);
458 u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
459 int err;
460
461 memset(in, 0, sizeof(in));
462 memset(out, 0, sizeof(out));
463
464 MLX5_SET(mcqi_reg, in, component_index, component_index);
465 MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
466
467 err = mlx5_core_access_reg(dev, in, sizeof(in), out,
468 sizeof(out), MLX5_REG_MCQI, 0, 0);
469 if (err)
470 goto out;
471
472 *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
473 *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
474 *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
475
476 out:
477 return err;
478 }
479
480 struct mlx5_mlxfw_dev {
481 struct mlxfw_dev mlxfw_dev;
482 struct mlx5_core_dev *mlx5_core_dev;
483 };
484
485 static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
486 u16 component_index, u32 *p_max_size,
487 u8 *p_align_bits, u16 *p_max_write_size)
488 {
489 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
490 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
491 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
492
493 return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
494 p_align_bits, p_max_write_size);
495 }
496
497 static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
498 {
499 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
500 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
501 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
502 u8 control_state, error_code;
503 int err;
504
505 *fwhandle = 0;
506 err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state);
507 if (err)
508 return err;
509
510 if (control_state != MLXFW_FSM_STATE_IDLE)
511 return -EBUSY;
512
513 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
514 0, *fwhandle, 0);
515 }
516
517 static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
518 u16 component_index, u32 component_size)
519 {
520 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
521 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
522 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
523
524 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
525 component_index, fwhandle, component_size);
526 }
527
528 static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
529 u8 *data, u16 size, u32 offset)
530 {
531 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
532 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
533 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
534
535 return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data);
536 }
537
538 static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
539 u16 component_index)
540 {
541 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
542 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
543 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
544
545 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
546 component_index, fwhandle, 0);
547 }
548
549 static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
550 {
551 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
552 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
553 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
554
555 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0,
556 fwhandle, 0);
557 }
558
559 static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
560 enum mlxfw_fsm_state *fsm_state,
561 enum mlxfw_fsm_state_err *fsm_state_err)
562 {
563 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
564 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
565 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
566 u8 control_state, error_code;
567 int err;
568
569 err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state);
570 if (err)
571 return err;
572
573 *fsm_state = control_state;
574 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
575 MLXFW_FSM_STATE_ERR_MAX);
576 return 0;
577 }
578
579 static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
580 {
581 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
582 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
583 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
584
585 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
586 }
587
588 static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
589 {
590 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
591 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
592 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
593
594 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
595 fwhandle, 0);
596 }
597
598 static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
599 .component_query = mlx5_component_query,
600 .fsm_lock = mlx5_fsm_lock,
601 .fsm_component_update = mlx5_fsm_component_update,
602 .fsm_block_download = mlx5_fsm_block_download,
603 .fsm_component_verify = mlx5_fsm_component_verify,
604 .fsm_activate = mlx5_fsm_activate,
605 .fsm_query_state = mlx5_fsm_query_state,
606 .fsm_cancel = mlx5_fsm_cancel,
607 .fsm_release = mlx5_fsm_release
608 };
609
610 int mlx5_firmware_flash(struct mlx5_core_dev *dev,
611 const struct firmware *firmware)
612 {
613 struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
614 .mlxfw_dev = {
615 .ops = &mlx5_mlxfw_dev_ops,
616 .psid = dev->board_id,
617 .psid_size = strlen(dev->board_id),
618 },
619 .mlx5_core_dev = dev
620 };
621
622 if (!MLX5_CAP_GEN(dev, mcam_reg) ||
623 !MLX5_CAP_MCAM_REG(dev, mcqi) ||
624 !MLX5_CAP_MCAM_REG(dev, mcc) ||
625 !MLX5_CAP_MCAM_REG(dev, mcda)) {
626 pr_info("%s flashing isn't supported by the running FW\n", __func__);
627 return -EOPNOTSUPP;
628 }
629
630 return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
631 }
Cache object: c3df8d7bd30d01b1151347f7a606e44a
|