1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 /*
28 * File : ecore_mcp.c
29 */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include "bcm_osal.h"
34 #include "ecore.h"
35 #include "ecore_status.h"
36 #include "nvm_map.h"
37 #include "nvm_cfg.h"
38 #include "ecore_mcp.h"
39 #include "mcp_public.h"
40 #include "reg_addr.h"
41 #include "ecore_hw.h"
42 #include "ecore_init_fw_funcs.h"
43 #include "ecore_sriov.h"
44 #include "ecore_vf.h"
45 #include "ecore_iov_api.h"
46 #include "ecore_gtt_reg_addr.h"
47 #include "ecore_iro.h"
48 #include "ecore_dcbx.h"
49 #include "ecore_sp_commands.h"
50 #include "ecore_cxt.h"
51
52 #define CHIP_MCP_RESP_ITER_US 10
53 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
54
55 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
56 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
57
58 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
59 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
60 _val)
61
62 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
63 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
64
65 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
66 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
67 OFFSETOF(struct public_drv_mb, _field), _val)
68
69 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
70 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
71 OFFSETOF(struct public_drv_mb, _field))
72
73 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
74 DRV_ID_PDA_COMP_VER_OFFSET)
75
76 #define MCP_BYTES_PER_MBIT_OFFSET 17
77
78 #ifdef _NTDDK_
79 #pragma warning(push)
80 #pragma warning(disable : 28167)
81 #pragma warning(disable : 28123)
82 #endif
83
84 #ifndef ASIC_ONLY
85 static int loaded;
86 static int loaded_port[MAX_NUM_PORTS] = { 0 };
87 #endif
88
89 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
90 {
91 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
92 return false;
93 return true;
94 }
95
96 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
97 struct ecore_ptt *p_ptt)
98 {
99 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
100 PUBLIC_PORT);
101 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
102
103 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
104 MFW_PORT(p_hwfn));
105 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
106 "port_addr = 0x%x, port_id 0x%02x\n",
107 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
108 }
109
110 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
111 struct ecore_ptt *p_ptt)
112 {
113 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
114 OSAL_BE32 tmp;
115 u32 i;
116
117 #ifndef ASIC_ONLY
118 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
119 return;
120 #endif
121
122 if (!p_hwfn->mcp_info->public_base)
123 return;
124
125 for (i = 0; i < length; i++) {
126 tmp = ecore_rd(p_hwfn, p_ptt,
127 p_hwfn->mcp_info->mfw_mb_addr +
128 (i << 2) + sizeof(u32));
129
130 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
131 OSAL_BE32_TO_CPU(tmp);
132 }
133 }
134
135 struct ecore_mcp_cmd_elem {
136 osal_list_entry_t list;
137 struct ecore_mcp_mb_params *p_mb_params;
138 u16 expected_seq_num;
139 bool b_is_completed;
140 };
141
142 /* Must be called while cmd_lock is acquired */
143 static struct ecore_mcp_cmd_elem *
144 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
145 struct ecore_mcp_mb_params *p_mb_params,
146 u16 expected_seq_num)
147 {
148 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
149
150 p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
151 sizeof(*p_cmd_elem));
152 if (!p_cmd_elem) {
153 DP_NOTICE(p_hwfn, false,
154 "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
155 goto out;
156 }
157
158 p_cmd_elem->p_mb_params = p_mb_params;
159 p_cmd_elem->expected_seq_num = expected_seq_num;
160 OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
161 out:
162 return p_cmd_elem;
163 }
164
165 /* Must be called while cmd_lock is acquired */
166 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
167 struct ecore_mcp_cmd_elem *p_cmd_elem)
168 {
169 OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
170 OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
171 }
172
173 /* Must be called while cmd_lock is acquired */
174 static struct ecore_mcp_cmd_elem *
175 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
176 {
177 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
178
179 OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
180 struct ecore_mcp_cmd_elem) {
181 if (p_cmd_elem->expected_seq_num == seq_num)
182 return p_cmd_elem;
183 }
184
185 return OSAL_NULL;
186 }
187
188 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
189 {
190 if (p_hwfn->mcp_info) {
191 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
192
193 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
194 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
195
196 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
197 OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
198 &p_hwfn->mcp_info->cmd_list, list,
199 struct ecore_mcp_cmd_elem) {
200 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
201 }
202 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
203
204 #ifdef CONFIG_ECORE_LOCK_ALLOC
205 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
206 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
207 #endif
208 }
209
210 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
211 p_hwfn->mcp_info = OSAL_NULL;
212
213 return ECORE_SUCCESS;
214 }
215
216 /* Maximum of 1 sec to wait for the SHMEM ready indication */
217 #define ECPRE_MCP_SHMEM_RDY_MAX_RETRIES 20
218 #define ECORE_MCP_SHMEM_RDY_ITER_MS 50
219
220 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
221 struct ecore_ptt *p_ptt)
222 {
223 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
224 u8 cnt = ECPRE_MCP_SHMEM_RDY_MAX_RETRIES;
225 u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
226 u32 drv_mb_offsize, mfw_mb_offsize;
227 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
228
229 #ifndef ASIC_ONLY
230 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
231 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
232 p_info->public_base = 0;
233 return ECORE_INVAL;
234 }
235 #endif
236
237 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
238 if (!p_info->public_base)
239 return ECORE_INVAL;
240
241 p_info->public_base |= GRCBASE_MCP;
242
243 /* Get the MFW MB address and number of supported messages */
244 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
245 SECTION_OFFSIZE_ADDR(p_info->public_base,
246 PUBLIC_MFW_MB));
247 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
248 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
249 p_info->mfw_mb_addr);
250
251 /* @@@TBD:
252 * The driver can notify that there was an MCP reset, and read the SHMEM
253 * values before the MFW has completed initializing them.
254 * As a temporary solution, the "sup_msgs" field is used as a data ready
255 * indication.
256 * This should be replaced with an actual indication when it is provided
257 * by the MFW.
258 */
259 while (!p_info->mfw_mb_length && cnt--) {
260 OSAL_MSLEEP(msec);
261 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
262 p_info->mfw_mb_addr);
263 }
264
265 if (!cnt) {
266 DP_NOTICE(p_hwfn, false,
267 "Failed to get the SHMEM ready notification after %d msec\n",
268 ECPRE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
269 return ECORE_TIMEOUT;
270 }
271
272 /* Calculate the driver and MFW mailbox address */
273 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
274 SECTION_OFFSIZE_ADDR(p_info->public_base,
275 PUBLIC_DRV_MB));
276 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
277 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
278 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
279 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
280
281 /* Get the current driver mailbox sequence before sending
282 * the first command
283 */
284 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
285 DRV_MSG_SEQ_NUMBER_MASK;
286
287 /* Get current FW pulse sequence */
288 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
289 DRV_PULSE_SEQ_MASK;
290
291 p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
292
293 return ECORE_SUCCESS;
294 }
295
296 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
297 struct ecore_ptt *p_ptt)
298 {
299 struct ecore_mcp_info *p_info;
300 u32 size;
301
302 /* Allocate mcp_info structure */
303 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
304 sizeof(*p_hwfn->mcp_info));
305 if (!p_hwfn->mcp_info) {
306 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
307 return ECORE_NOMEM;
308 }
309 p_info = p_hwfn->mcp_info;
310
311 /* Initialize the MFW spinlocks */
312 #ifdef CONFIG_ECORE_LOCK_ALLOC
313 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
314 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
315 return ECORE_NOMEM;
316 }
317 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
318 OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
319 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
320 return ECORE_NOMEM;
321 }
322 #endif
323 OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
324 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
325
326 OSAL_LIST_INIT(&p_info->cmd_list);
327
328 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
329 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
330 /* Do not free mcp_info here, since public_base indicate that
331 * the MCP is not initialized
332 */
333 return ECORE_SUCCESS;
334 }
335
336 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
337 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
338 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
339 if (p_info->mfw_mb_cur == OSAL_NULL || p_info->mfw_mb_shadow == OSAL_NULL)
340 goto err;
341
342 return ECORE_SUCCESS;
343
344 err:
345 DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
346 ecore_mcp_free(p_hwfn);
347 return ECORE_NOMEM;
348 }
349
350 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
351 struct ecore_ptt *p_ptt)
352 {
353 u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
354
355 /* Use MCP history register to check if MCP reset occurred between init
356 * time and now.
357 */
358 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
359 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
360 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
361 p_hwfn->mcp_info->mcp_hist, generic_por_0);
362
363 ecore_load_mcp_offsets(p_hwfn, p_ptt);
364 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
365 }
366 }
367
368 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
369 struct ecore_ptt *p_ptt)
370 {
371 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
372 enum _ecore_status_t rc = ECORE_SUCCESS;
373
374 #ifndef ASIC_ONLY
375 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
376 delay = EMUL_MCP_RESP_ITER_US;
377 #endif
378
379 if (p_hwfn->mcp_info->b_block_cmd) {
380 DP_NOTICE(p_hwfn, false,
381 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
382 return ECORE_ABORTED;
383 }
384
385 /* Ensure that only a single thread is accessing the mailbox */
386 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
387
388 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
389
390 /* Set drv command along with the updated sequence */
391 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
392 seq = ++p_hwfn->mcp_info->drv_mb_seq;
393 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
394
395 do {
396 /* Wait for MFW response */
397 OSAL_UDELAY(delay);
398 /* Give the FW up to 500 second (50*1000*10usec) */
399 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
400 MISCS_REG_GENERIC_POR_0)) &&
401 (cnt++ < ECORE_MCP_RESET_RETRIES));
402
403 if (org_mcp_reset_seq !=
404 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
405 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
406 "MCP was reset after %d usec\n", cnt * delay);
407 } else {
408 DP_ERR(p_hwfn, "Failed to reset MCP\n");
409 rc = ECORE_AGAIN;
410 }
411
412 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
413
414 return rc;
415 }
416
417 /* Must be called while cmd_lock is acquired */
418 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
419 {
420 struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
421
422 /* There is at most one pending command at a certain time, and if it
423 * exists - it is placed at the HEAD of the list.
424 */
425 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
426 p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
427 struct ecore_mcp_cmd_elem,
428 list);
429 return !p_cmd_elem->b_is_completed;
430 }
431
432 return false;
433 }
434
435 /* Must be called while cmd_lock is acquired */
436 static enum _ecore_status_t
437 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
438 {
439 struct ecore_mcp_mb_params *p_mb_params;
440 struct ecore_mcp_cmd_elem *p_cmd_elem;
441 u32 mcp_resp;
442 u16 seq_num;
443
444 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
445 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
446
447 /* Return if no new non-handled response has been received */
448 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
449 return ECORE_AGAIN;
450
451 p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
452 if (!p_cmd_elem) {
453 DP_ERR(p_hwfn,
454 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
455 seq_num);
456 return ECORE_UNKNOWN_ERROR;
457 }
458
459 p_mb_params = p_cmd_elem->p_mb_params;
460
461 /* Get the MFW response along with the sequence number */
462 p_mb_params->mcp_resp = mcp_resp;
463
464 /* Get the MFW param */
465 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
466
467 /* Get the union data */
468 if (p_mb_params->p_data_dst != OSAL_NULL &&
469 p_mb_params->data_dst_size) {
470 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
471 OFFSETOF(struct public_drv_mb,
472 union_data);
473 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
474 union_data_addr, p_mb_params->data_dst_size);
475 }
476
477 p_cmd_elem->b_is_completed = true;
478
479 return ECORE_SUCCESS;
480 }
481
482 /* Must be called while cmd_lock is acquired */
483 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
484 struct ecore_ptt *p_ptt,
485 struct ecore_mcp_mb_params *p_mb_params,
486 u16 seq_num)
487 {
488 union drv_union_data union_data;
489 u32 union_data_addr;
490
491 /* Set the union data */
492 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
493 OFFSETOF(struct public_drv_mb, union_data);
494 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
495 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
496 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
497 p_mb_params->data_src_size);
498 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
499 sizeof(union_data));
500
501 /* Set the drv param */
502 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
503
504 /* Set the drv command along with the sequence number */
505 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
506
507 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
508 "MFW mailbox: command 0x%08x param 0x%08x\n",
509 (p_mb_params->cmd | seq_num), p_mb_params->param);
510 }
511
512 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
513 bool block_cmd)
514 {
515 p_hwfn->mcp_info->b_block_cmd = block_cmd;
516
517 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
518 block_cmd ? "Block" : "Unblock");
519 }
520
521 static void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
522 struct ecore_ptt *p_ptt)
523 {
524 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
525
526 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
527 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
528 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
529 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
530 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
531 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
532 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
533
534 DP_NOTICE(p_hwfn, false,
535 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
536 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
537 }
538
539 static enum _ecore_status_t
540 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
541 struct ecore_mcp_mb_params *p_mb_params,
542 u32 max_retries, u32 usecs)
543 {
544 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
545 struct ecore_mcp_cmd_elem *p_cmd_elem;
546 u16 seq_num;
547 enum _ecore_status_t rc = ECORE_SUCCESS;
548
549 /* Wait until the mailbox is non-occupied */
550 do {
551 /* Exit the loop if there is no pending command, or if the
552 * pending command is completed during this iteration.
553 * The spinlock stays locked until the command is sent.
554 */
555
556 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
557
558 if (!ecore_mcp_has_pending_cmd(p_hwfn))
559 break;
560
561 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
562 if (rc == ECORE_SUCCESS)
563 break;
564 else if (rc != ECORE_AGAIN)
565 goto err;
566
567 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
568 if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
569 OSAL_MSLEEP(msecs);
570 } else {
571 OSAL_UDELAY(usecs);
572 }
573 OSAL_MFW_CMD_PREEMPT(p_hwfn);
574 } while (++cnt < max_retries);
575
576 if (cnt >= max_retries) {
577 DP_NOTICE(p_hwfn, false,
578 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
579 p_mb_params->cmd, p_mb_params->param);
580 return ECORE_AGAIN;
581 }
582
583 /* Send the mailbox command */
584 ecore_mcp_reread_offsets(p_hwfn, p_ptt);
585 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
586 p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
587 if (!p_cmd_elem) {
588 rc = ECORE_NOMEM;
589 goto err;
590 }
591
592 __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
593 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
594
595 /* Wait for the MFW response */
596 do {
597 /* Exit the loop if the command is already completed, or if the
598 * command is completed during this iteration.
599 * The spinlock stays locked until the list element is removed.
600 */
601
602 if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
603 OSAL_MSLEEP(msecs);
604 } else {
605 OSAL_UDELAY(usecs);
606 }
607 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
608
609 if (p_cmd_elem->b_is_completed)
610 break;
611
612 rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
613 if (rc == ECORE_SUCCESS)
614 break;
615 else if (rc != ECORE_AGAIN)
616 goto err;
617
618 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
619 OSAL_MFW_CMD_PREEMPT(p_hwfn);
620 } while (++cnt < max_retries);
621
622 if (cnt >= max_retries) {
623 DP_NOTICE(p_hwfn, false,
624 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
625 p_mb_params->cmd, p_mb_params->param);
626 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
627
628 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
629 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
630 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
631
632 if (!ECORE_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
633 ecore_mcp_cmd_set_blocking(p_hwfn, true);
634 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
635 return ECORE_AGAIN;
636 }
637
638 ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
639 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
640
641 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
642 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
643 p_mb_params->mcp_resp, p_mb_params->mcp_param,
644 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
645
646 /* Clear the sequence number from the MFW response */
647 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
648
649 return ECORE_SUCCESS;
650
651 err:
652 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
653 return rc;
654 }
655
656 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
657 struct ecore_ptt *p_ptt,
658 struct ecore_mcp_mb_params *p_mb_params)
659 {
660 osal_size_t union_data_size = sizeof(union drv_union_data);
661 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
662 u32 usecs = CHIP_MCP_RESP_ITER_US;
663
664 #ifndef ASIC_ONLY
665 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
666 usecs = EMUL_MCP_RESP_ITER_US;
667 /* There is a built-in delay of 100usec in each MFW response read */
668 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
669 max_retries /= 10;
670 #endif
671 if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
672 max_retries = DIV_ROUND_UP(max_retries, 1000);
673 usecs *= 1000;
674 }
675
676 /* MCP not initialized */
677 if (!ecore_mcp_is_init(p_hwfn)) {
678 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
679 return ECORE_BUSY;
680 }
681
682 if (p_mb_params->data_src_size > union_data_size ||
683 p_mb_params->data_dst_size > union_data_size) {
684 DP_ERR(p_hwfn,
685 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
686 p_mb_params->data_src_size, p_mb_params->data_dst_size,
687 union_data_size);
688 return ECORE_INVAL;
689 }
690
691 if (p_hwfn->mcp_info->b_block_cmd) {
692 DP_NOTICE(p_hwfn, false,
693 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
694 p_mb_params->cmd, p_mb_params->param);
695 return ECORE_ABORTED;
696 }
697
698 return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
699 usecs);
700 }
701
702 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
703 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
704 u32 *o_mcp_resp, u32 *o_mcp_param)
705 {
706 struct ecore_mcp_mb_params mb_params;
707 enum _ecore_status_t rc;
708
709 #ifndef ASIC_ONLY
710 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
711 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
712 loaded--;
713 loaded_port[p_hwfn->port_id]--;
714 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
715 loaded);
716 }
717 return ECORE_SUCCESS;
718 }
719 #endif
720
721 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
722 mb_params.cmd = cmd;
723 mb_params.param = param;
724 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
725 if (rc != ECORE_SUCCESS)
726 return rc;
727
728 *o_mcp_resp = mb_params.mcp_resp;
729 *o_mcp_param = mb_params.mcp_param;
730
731 return ECORE_SUCCESS;
732 }
733
734 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
735 struct ecore_ptt *p_ptt,
736 u32 cmd,
737 u32 param,
738 u32 *o_mcp_resp,
739 u32 *o_mcp_param,
740 u32 i_txn_size,
741 u32 *i_buf)
742 {
743 struct ecore_mcp_mb_params mb_params;
744 enum _ecore_status_t rc;
745
746 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
747 mb_params.cmd = cmd;
748 mb_params.param = param;
749 mb_params.p_data_src = i_buf;
750 mb_params.data_src_size = (u8) i_txn_size;
751 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
752 if (rc != ECORE_SUCCESS)
753 return rc;
754
755 *o_mcp_resp = mb_params.mcp_resp;
756 *o_mcp_param = mb_params.mcp_param;
757
758 return ECORE_SUCCESS;
759 }
760
761 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
762 struct ecore_ptt *p_ptt,
763 u32 cmd,
764 u32 param,
765 u32 *o_mcp_resp,
766 u32 *o_mcp_param,
767 u32 *o_txn_size,
768 u32 *o_buf)
769 {
770 struct ecore_mcp_mb_params mb_params;
771 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
772 enum _ecore_status_t rc;
773
774 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
775 mb_params.cmd = cmd;
776 mb_params.param = param;
777 mb_params.p_data_dst = raw_data;
778
779 /* Use the maximal value since the actual one is part of the response */
780 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
781
782 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
783 if (rc != ECORE_SUCCESS)
784 return rc;
785
786 *o_mcp_resp = mb_params.mcp_resp;
787 *o_mcp_param = mb_params.mcp_param;
788
789 *o_txn_size = *o_mcp_param;
790 OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
791
792 return ECORE_SUCCESS;
793 }
794
795 #ifndef ASIC_ONLY
796 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
797 u32 *p_load_code)
798 {
799 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
800
801 if (!loaded) {
802 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
803 } else if (!loaded_port[p_hwfn->port_id]) {
804 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
805 } else {
806 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
807 }
808
809 /* On CMT, always tell that it's engine */
810 if (ECORE_IS_CMT(p_hwfn->p_dev))
811 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
812
813 *p_load_code = load_phase;
814 loaded++;
815 loaded_port[p_hwfn->port_id]++;
816
817 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
818 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
819 *p_load_code, loaded, p_hwfn->port_id,
820 loaded_port[p_hwfn->port_id]);
821 }
822 #endif
823
824 static bool
825 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
826 enum ecore_override_force_load override_force_load)
827 {
828 bool can_force_load = false;
829
830 switch (override_force_load) {
831 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
832 can_force_load = true;
833 break;
834 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
835 can_force_load = false;
836 break;
837 default:
838 can_force_load = (drv_role == DRV_ROLE_OS &&
839 exist_drv_role == DRV_ROLE_PREBOOT) ||
840 (drv_role == DRV_ROLE_KDUMP &&
841 exist_drv_role == DRV_ROLE_OS);
842 break;
843 }
844
845 return can_force_load;
846 }
847
848 enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
849 struct ecore_ptt *p_ptt)
850 {
851 u32 resp = 0, param = 0;
852 enum _ecore_status_t rc;
853
854 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
855 &resp, ¶m);
856 if (rc != ECORE_SUCCESS) {
857 DP_NOTICE(p_hwfn, false,
858 "Failed to send cancel load request, rc = %d\n", rc);
859 return rc;
860 }
861
862 if (resp == FW_MSG_CODE_UNSUPPORTED) {
863 DP_INFO(p_hwfn,
864 "The cancel load command is unsupported by the MFW\n");
865 return ECORE_NOTIMPL;
866 }
867
868 return ECORE_SUCCESS;
869 }
870
871 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
872 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
873 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
874 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
875 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
876 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
877 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
878
879 static u32 ecore_get_config_bitmap(void)
880 {
881 u32 config_bitmap = 0x0;
882
883 #ifdef CONFIG_ECORE_L2
884 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
885 #endif
886 #ifdef CONFIG_ECORE_SRIOV
887 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
888 #endif
889 #ifdef CONFIG_ECORE_ROCE
890 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
891 #endif
892 #ifdef CONFIG_ECORE_IWARP
893 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
894 #endif
895 #ifdef CONFIG_ECORE_FCOE
896 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
897 #endif
898 #ifdef CONFIG_ECORE_ISCSI
899 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
900 #endif
901 #ifdef CONFIG_ECORE_LL2
902 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
903 #endif
904
905 return config_bitmap;
906 }
907
908 struct ecore_load_req_in_params {
909 u8 hsi_ver;
910 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
911 #define ECORE_LOAD_REQ_HSI_VER_1 1
912 u32 drv_ver_0;
913 u32 drv_ver_1;
914 u32 fw_ver;
915 u8 drv_role;
916 u8 timeout_val;
917 u8 force_cmd;
918 bool avoid_eng_reset;
919 };
920
921 struct ecore_load_req_out_params {
922 u32 load_code;
923 u32 exist_drv_ver_0;
924 u32 exist_drv_ver_1;
925 u32 exist_fw_ver;
926 u8 exist_drv_role;
927 u8 mfw_hsi_ver;
928 bool drv_exists;
929 };
930
931 static enum _ecore_status_t
932 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
933 struct ecore_load_req_in_params *p_in_params,
934 struct ecore_load_req_out_params *p_out_params)
935 {
936 struct ecore_mcp_mb_params mb_params;
937 struct load_req_stc load_req;
938 struct load_rsp_stc load_rsp;
939 u32 hsi_ver;
940 enum _ecore_status_t rc;
941
942 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
943 load_req.drv_ver_0 = p_in_params->drv_ver_0;
944 load_req.drv_ver_1 = p_in_params->drv_ver_1;
945 load_req.fw_ver = p_in_params->fw_ver;
946 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
947 SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
948 p_in_params->timeout_val);
949 SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FORCE, p_in_params->force_cmd);
950 SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FLAGS0,
951 p_in_params->avoid_eng_reset);
952
953 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
954 DRV_ID_MCP_HSI_VER_CURRENT :
955 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
956
957 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
958 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
959 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
960 mb_params.p_data_src = &load_req;
961 mb_params.data_src_size = sizeof(load_req);
962 mb_params.p_data_dst = &load_rsp;
963 mb_params.data_dst_size = sizeof(load_rsp);
964 mb_params.flags = ECORE_MB_FLAG_CAN_SLEEP | ECORE_MB_FLAG_AVOID_BLOCK;
965
966 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
967 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
968 mb_params.param,
969 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
970 GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
971 GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
972 GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
973
974 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
975 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
976 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
977 load_req.drv_ver_0, load_req.drv_ver_1,
978 load_req.fw_ver, load_req.misc0,
979 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
980 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
981 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
982 GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
983
984 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
985 if (rc != ECORE_SUCCESS) {
986 DP_NOTICE(p_hwfn, false,
987 "Failed to send load request, rc = %d\n", rc);
988 return rc;
989 }
990
991 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
992 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
993 p_out_params->load_code = mb_params.mcp_resp;
994
995 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
996 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
997 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
998 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
999 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
1000 load_rsp.fw_ver, load_rsp.misc0,
1001 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
1002 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
1003 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
1004
1005 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
1006 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
1007 p_out_params->exist_fw_ver = load_rsp.fw_ver;
1008 p_out_params->exist_drv_role =
1009 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
1010 p_out_params->mfw_hsi_ver =
1011 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
1012 p_out_params->drv_exists =
1013 GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
1014 LOAD_RSP_FLAGS0_DRV_EXISTS;
1015 }
1016
1017 return ECORE_SUCCESS;
1018 }
1019
1020 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
1021 u8 *p_mfw_drv_role)
1022 {
1023 switch (drv_role) {
1024 case ECORE_DRV_ROLE_OS:
1025 *p_mfw_drv_role = DRV_ROLE_OS;
1026 break;
1027 case ECORE_DRV_ROLE_KDUMP:
1028 *p_mfw_drv_role = DRV_ROLE_KDUMP;
1029 break;
1030 }
1031 }
1032
1033 enum ecore_load_req_force {
1034 ECORE_LOAD_REQ_FORCE_NONE,
1035 ECORE_LOAD_REQ_FORCE_PF,
1036 ECORE_LOAD_REQ_FORCE_ALL,
1037 };
1038
1039 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
1040 u8 *p_mfw_force_cmd)
1041 {
1042 switch (force_cmd) {
1043 case ECORE_LOAD_REQ_FORCE_NONE:
1044 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
1045 break;
1046 case ECORE_LOAD_REQ_FORCE_PF:
1047 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
1048 break;
1049 case ECORE_LOAD_REQ_FORCE_ALL:
1050 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
1051 break;
1052 }
1053 }
1054
1055 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
1056 struct ecore_ptt *p_ptt,
1057 struct ecore_load_req_params *p_params)
1058 {
1059 struct ecore_load_req_out_params out_params;
1060 struct ecore_load_req_in_params in_params;
1061 u8 mfw_drv_role = 0, mfw_force_cmd;
1062 enum _ecore_status_t rc;
1063
1064 #ifndef ASIC_ONLY
1065 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1066 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
1067 return ECORE_SUCCESS;
1068 }
1069 #endif
1070
1071 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1072 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1073 in_params.drv_ver_0 = ECORE_VERSION;
1074 in_params.drv_ver_1 = ecore_get_config_bitmap();
1075 in_params.fw_ver = STORM_FW_VERSION;
1076 ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1077 in_params.drv_role = mfw_drv_role;
1078 in_params.timeout_val = p_params->timeout_val;
1079 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1080 in_params.force_cmd = mfw_force_cmd;
1081 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1082
1083 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1084 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1085 if (rc != ECORE_SUCCESS)
1086 return rc;
1087
1088 /* First handle cases where another load request should/might be sent:
1089 * - MFW expects the old interface [HSI version = 1]
1090 * - MFW responds that a force load request is required
1091 */
1092 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1093 DP_INFO(p_hwfn,
1094 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1095
1096 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1097 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1098 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1099 &out_params);
1100 if (rc != ECORE_SUCCESS)
1101 return rc;
1102 } else if (out_params.load_code ==
1103 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1104 if (ecore_mcp_can_force_load(in_params.drv_role,
1105 out_params.exist_drv_role,
1106 p_params->override_force_load)) {
1107 DP_INFO(p_hwfn,
1108 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1109 in_params.drv_role, in_params.fw_ver,
1110 in_params.drv_ver_0, in_params.drv_ver_1,
1111 out_params.exist_drv_role,
1112 out_params.exist_fw_ver,
1113 out_params.exist_drv_ver_0,
1114 out_params.exist_drv_ver_1);
1115
1116 ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1117 &mfw_force_cmd);
1118
1119 in_params.force_cmd = mfw_force_cmd;
1120 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1121 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1122 &out_params);
1123 if (rc != ECORE_SUCCESS)
1124 return rc;
1125 } else {
1126 DP_NOTICE(p_hwfn, false,
1127 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1128 in_params.drv_role, in_params.fw_ver,
1129 in_params.drv_ver_0, in_params.drv_ver_1,
1130 out_params.exist_drv_role,
1131 out_params.exist_fw_ver,
1132 out_params.exist_drv_ver_0,
1133 out_params.exist_drv_ver_1);
1134
1135 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1136 return ECORE_BUSY;
1137 }
1138 }
1139
1140 /* Now handle the other types of responses.
1141 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1142 * expected here after the additional revised load requests were sent.
1143 */
1144 switch (out_params.load_code) {
1145 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1146 case FW_MSG_CODE_DRV_LOAD_PORT:
1147 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1148 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1149 out_params.drv_exists) {
1150 /* The role and fw/driver version match, but the PF is
1151 * already loaded and has not been unloaded gracefully.
1152 * This is unexpected since a quasi-FLR request was
1153 * previously sent as part of ecore_hw_prepare().
1154 */
1155 DP_NOTICE(p_hwfn, false,
1156 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1157 return ECORE_INVAL;
1158 }
1159 break;
1160 default:
1161 DP_NOTICE(p_hwfn, false,
1162 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1163 out_params.load_code);
1164 return ECORE_BUSY;
1165 }
1166
1167 p_params->load_code = out_params.load_code;
1168
1169 return ECORE_SUCCESS;
1170 }
1171
1172 enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
1173 struct ecore_ptt *p_ptt)
1174 {
1175 u32 resp = 0, param = 0;
1176 enum _ecore_status_t rc;
1177
1178 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1179 ¶m);
1180 if (rc != ECORE_SUCCESS) {
1181 DP_NOTICE(p_hwfn, false,
1182 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1183 return rc;
1184 }
1185
1186 if (resp == FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT) {
1187 DP_NOTICE(p_hwfn, false,
1188 "Received a LOAD_REFUSED_REJECT response from the mfw\n");
1189 return ECORE_ABORTED;
1190 }
1191
1192 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1193 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1194 DP_NOTICE(p_hwfn, false,
1195 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1196
1197 return ECORE_SUCCESS;
1198 }
1199
1200 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1201 struct ecore_ptt *p_ptt)
1202 {
1203 struct ecore_mcp_mb_params mb_params;
1204 u32 wol_param;
1205
1206 switch (p_hwfn->p_dev->wol_config) {
1207 case ECORE_OV_WOL_DISABLED:
1208 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1209 break;
1210 case ECORE_OV_WOL_ENABLED:
1211 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1212 break;
1213 default:
1214 DP_NOTICE(p_hwfn, true,
1215 "Unknown WoL configuration %02x\n",
1216 p_hwfn->p_dev->wol_config);
1217 /* Fallthrough */
1218 case ECORE_OV_WOL_DEFAULT:
1219 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1220 }
1221
1222 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1223 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1224 mb_params.param = wol_param;
1225 mb_params.flags = ECORE_MB_FLAG_CAN_SLEEP | ECORE_MB_FLAG_AVOID_BLOCK;
1226
1227 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1228 }
1229
1230 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1231 struct ecore_ptt *p_ptt)
1232 {
1233 struct ecore_mcp_mb_params mb_params;
1234 struct mcp_mac wol_mac;
1235
1236 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1237 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1238
1239 /* Set the primary MAC if WoL is enabled */
1240 if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1241 u8 *p_mac = p_hwfn->p_dev->wol_mac;
1242
1243 OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1244 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1245 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1246 p_mac[4] << 8 | p_mac[5];
1247
1248 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1249 "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1250 p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1251 p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1252
1253 mb_params.p_data_src = &wol_mac;
1254 mb_params.data_src_size = sizeof(wol_mac);
1255 }
1256
1257 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1258 }
1259
1260 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1261 struct ecore_ptt *p_ptt)
1262 {
1263 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1264 PUBLIC_PATH);
1265 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1266 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1267 ECORE_PATH_ID(p_hwfn));
1268 u32 disabled_vfs[VF_MAX_STATIC / 32];
1269 int i;
1270
1271 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1272 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1273 mfw_path_offsize, path_addr);
1274
1275 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1276 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1277 path_addr +
1278 OFFSETOF(struct public_path,
1279 mcp_vf_disabled) +
1280 sizeof(u32) * i);
1281 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1282 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1283 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1284 }
1285
1286 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1287 OSAL_VF_FLR_UPDATE(p_hwfn);
1288 }
1289
1290 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1291 struct ecore_ptt *p_ptt,
1292 u32 *vfs_to_ack)
1293 {
1294 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1295 PUBLIC_FUNC);
1296 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1297 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1298 MCP_PF_ID(p_hwfn));
1299 struct ecore_mcp_mb_params mb_params;
1300 enum _ecore_status_t rc;
1301 int i;
1302
1303 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1304 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1305 "Acking VFs [%08x,...,%08x] - %08x\n",
1306 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1307
1308 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1309 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1310 mb_params.p_data_src = vfs_to_ack;
1311 mb_params.data_src_size = VF_MAX_STATIC / 8;
1312 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1313 if (rc != ECORE_SUCCESS) {
1314 DP_NOTICE(p_hwfn, false,
1315 "Failed to pass ACK for VF flr to MFW\n");
1316 return ECORE_TIMEOUT;
1317 }
1318
1319 /* TMP - clear the ACK bits; should be done by MFW */
1320 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1321 ecore_wr(p_hwfn, p_ptt,
1322 func_addr +
1323 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1324 i * sizeof(u32), 0);
1325
1326 return rc;
1327 }
1328
1329 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1330 struct ecore_ptt *p_ptt)
1331 {
1332 u32 transceiver_state;
1333
1334 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1335 p_hwfn->mcp_info->port_addr +
1336 OFFSETOF(struct public_port,
1337 transceiver_data));
1338
1339 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1340 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1341 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1342 OFFSETOF(struct public_port,
1343 transceiver_data)));
1344
1345 transceiver_state = GET_MFW_FIELD(transceiver_state,
1346 ETH_TRANSCEIVER_STATE);
1347
1348 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1349 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1350 else
1351 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1352
1353 OSAL_TRANSCEIVER_UPDATE(p_hwfn);
1354 }
1355
1356 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1357 struct ecore_ptt *p_ptt,
1358 struct ecore_mcp_link_state *p_link)
1359 {
1360 u32 eee_status, val;
1361
1362 p_link->eee_adv_caps = 0;
1363 p_link->eee_lp_adv_caps = 0;
1364 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1365 OFFSETOF(struct public_port, eee_status));
1366 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1367 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1368 if (val & EEE_1G_ADV)
1369 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1370 if (val & EEE_10G_ADV)
1371 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1372 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1373 if (val & EEE_1G_ADV)
1374 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1375 if (val & EEE_10G_ADV)
1376 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1377 }
1378
1379 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1380 struct ecore_ptt *p_ptt,
1381 struct public_func *p_data,
1382 int pfid)
1383 {
1384 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1385 PUBLIC_FUNC);
1386 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1387 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1388 u32 i, size;
1389
1390 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1391
1392 size = OSAL_MIN_T(u32, sizeof(*p_data),
1393 SECTION_SIZE(mfw_path_offsize));
1394 for (i = 0; i < size / sizeof(u32); i++)
1395 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1396 func_addr + (i << 2));
1397
1398 return size;
1399 }
1400
1401 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1402 struct public_func *p_shmem_info)
1403 {
1404 struct ecore_mcp_function_info *p_info;
1405
1406 p_info = &p_hwfn->mcp_info->func_info;
1407
1408 /* TODO - bandwidth min/max should have valid values of 1-100,
1409 * as well as some indication that the feature is disabled.
1410 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1411 * limit and correct value to min `1' and max `100' if limit isn't in
1412 * range.
1413 */
1414 p_info->bandwidth_min = (p_shmem_info->config &
1415 FUNC_MF_CFG_MIN_BW_MASK) >>
1416 FUNC_MF_CFG_MIN_BW_OFFSET;
1417 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1418 DP_INFO(p_hwfn,
1419 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1420 p_info->bandwidth_min);
1421 p_info->bandwidth_min = 1;
1422 }
1423
1424 p_info->bandwidth_max = (p_shmem_info->config &
1425 FUNC_MF_CFG_MAX_BW_MASK) >>
1426 FUNC_MF_CFG_MAX_BW_OFFSET;
1427 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1428 DP_INFO(p_hwfn,
1429 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1430 p_info->bandwidth_max);
1431 p_info->bandwidth_max = 100;
1432 }
1433 }
1434
1435 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1436 struct ecore_ptt *p_ptt,
1437 bool b_reset)
1438 {
1439 struct ecore_mcp_link_state *p_link;
1440 u8 max_bw, min_bw;
1441 u32 status = 0;
1442
1443 /* Prevent SW/attentions from doing this at the same time */
1444 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1445
1446 p_link = &p_hwfn->mcp_info->link_output;
1447 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1448 if (!b_reset) {
1449 status = ecore_rd(p_hwfn, p_ptt,
1450 p_hwfn->mcp_info->port_addr +
1451 OFFSETOF(struct public_port, link_status));
1452 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1453 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1454 status, (u32)(p_hwfn->mcp_info->port_addr +
1455 OFFSETOF(struct public_port, link_status)));
1456 } else {
1457 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1458 "Resetting link indications\n");
1459 goto out;
1460 }
1461
1462 if (p_hwfn->b_drv_link_init) {
1463 /* Link indication with modern MFW arrives as per-PF
1464 * indication.
1465 */
1466 if (p_hwfn->mcp_info->capabilities &
1467 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1468 struct public_func shmem_info;
1469
1470 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1471 MCP_PF_ID(p_hwfn));
1472 p_link->link_up = !!(shmem_info.status &
1473 FUNC_STATUS_VIRTUAL_LINK_UP);
1474 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1475 } else {
1476 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1477 }
1478 } else {
1479 p_link->link_up = false;
1480 }
1481
1482 p_link->full_duplex = true;
1483 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1484 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1485 p_link->speed = 100000;
1486 break;
1487 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1488 p_link->speed = 50000;
1489 break;
1490 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1491 p_link->speed = 40000;
1492 break;
1493 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1494 p_link->speed = 25000;
1495 break;
1496 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1497 p_link->speed = 20000;
1498 break;
1499 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1500 p_link->speed = 10000;
1501 break;
1502 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1503 p_link->full_duplex = false;
1504 /* Fall-through */
1505 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1506 p_link->speed = 1000;
1507 break;
1508 default:
1509 p_link->speed = 0;
1510 p_link->link_up = 0;
1511 }
1512
1513 /* We never store total line speed as p_link->speed is
1514 * again changes according to bandwidth allocation.
1515 */
1516 if (p_link->link_up && p_link->speed)
1517 p_link->line_speed = p_link->speed;
1518 else
1519 p_link->line_speed = 0;
1520
1521 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1522 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1523
1524 /* Max bandwidth configuration */
1525 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1526
1527 /* Min bandwidth configuration */
1528 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1529 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1530 p_link->min_pf_rate);
1531
1532 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1533 p_link->an_complete = !!(status &
1534 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1535 p_link->parallel_detection = !!(status &
1536 LINK_STATUS_PARALLEL_DETECTION_USED);
1537 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1538
1539 p_link->partner_adv_speed |=
1540 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1541 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1542 p_link->partner_adv_speed |=
1543 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1544 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1545 p_link->partner_adv_speed |=
1546 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1547 ECORE_LINK_PARTNER_SPEED_10G : 0;
1548 p_link->partner_adv_speed |=
1549 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1550 ECORE_LINK_PARTNER_SPEED_20G : 0;
1551 p_link->partner_adv_speed |=
1552 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1553 ECORE_LINK_PARTNER_SPEED_25G : 0;
1554 p_link->partner_adv_speed |=
1555 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1556 ECORE_LINK_PARTNER_SPEED_40G : 0;
1557 p_link->partner_adv_speed |=
1558 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1559 ECORE_LINK_PARTNER_SPEED_50G : 0;
1560 p_link->partner_adv_speed |=
1561 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1562 ECORE_LINK_PARTNER_SPEED_100G : 0;
1563
1564 p_link->partner_tx_flow_ctrl_en =
1565 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1566 p_link->partner_rx_flow_ctrl_en =
1567 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1568
1569 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1570 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1571 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1572 break;
1573 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1574 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1575 break;
1576 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1577 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1578 break;
1579 default:
1580 p_link->partner_adv_pause = 0;
1581 }
1582
1583 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1584
1585 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1586 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1587
1588 OSAL_LINK_UPDATE(p_hwfn, p_ptt);
1589 out:
1590 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1591 }
1592
1593 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1594 struct ecore_ptt *p_ptt,
1595 bool b_up)
1596 {
1597 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1598 struct ecore_mcp_mb_params mb_params;
1599 struct eth_phy_cfg phy_cfg;
1600 enum _ecore_status_t rc = ECORE_SUCCESS;
1601 u32 cmd;
1602
1603 #ifndef ASIC_ONLY
1604 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1605 return ECORE_SUCCESS;
1606 #endif
1607
1608 /* Set the shmem configuration according to params */
1609 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1610 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1611 if (!params->speed.autoneg)
1612 phy_cfg.speed = params->speed.forced_speed;
1613 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1614 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1615 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1616 phy_cfg.adv_speed = params->speed.advertised_speeds;
1617 phy_cfg.loopback_mode = params->loopback_mode;
1618
1619 /* There are MFWs that share this capability regardless of whether
1620 * this is feasible or not. And given that at the very least adv_caps
1621 * would be set internally by ecore, we want to make sure LFA would
1622 * still work.
1623 */
1624 if ((p_hwfn->mcp_info->capabilities &
1625 FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
1626 params->eee.enable) {
1627 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1628 if (params->eee.tx_lpi_enable)
1629 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1630 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1631 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1632 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1633 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1634 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1635 EEE_TX_TIMER_USEC_OFFSET) &
1636 EEE_TX_TIMER_USEC_MASK;
1637 }
1638
1639 p_hwfn->b_drv_link_init = b_up;
1640
1641 if (b_up)
1642 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1643 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1644 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1645 phy_cfg.loopback_mode);
1646 else
1647 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1648
1649 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1650 mb_params.cmd = cmd;
1651 mb_params.p_data_src = &phy_cfg;
1652 mb_params.data_src_size = sizeof(phy_cfg);
1653 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1654
1655 /* if mcp fails to respond we must abort */
1656 if (rc != ECORE_SUCCESS) {
1657 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1658 return rc;
1659 }
1660
1661 /* Mimic link-change attention, done for several reasons:
1662 * - On reset, there's no guarantee MFW would trigger
1663 * an attention.
1664 * - On initialization, older MFWs might not indicate link change
1665 * during LFA, so we'll never get an UP indication.
1666 */
1667 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1668
1669 return ECORE_SUCCESS;
1670 }
1671
1672 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1673 struct ecore_ptt *p_ptt)
1674 {
1675 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1676
1677 /* TODO - Add support for VFs */
1678 if (IS_VF(p_hwfn->p_dev))
1679 return ECORE_INVAL;
1680
1681 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1682 PUBLIC_PATH);
1683 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1684 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1685
1686 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1687 path_addr +
1688 OFFSETOF(struct public_path, process_kill)) &
1689 PROCESS_KILL_COUNTER_MASK;
1690
1691 return proc_kill_cnt;
1692 }
1693
1694 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1695 struct ecore_ptt *p_ptt)
1696 {
1697 struct ecore_dev *p_dev = p_hwfn->p_dev;
1698 u32 proc_kill_cnt;
1699
1700 /* Prevent possible attentions/interrupts during the recovery handling
1701 * and till its load phase, during which they will be re-enabled.
1702 */
1703 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1704
1705 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1706
1707 /* The following operations should be done once, and thus in CMT mode
1708 * are carried out by only the first HW function.
1709 */
1710 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1711 return;
1712
1713 if (p_dev->recov_in_prog) {
1714 DP_NOTICE(p_hwfn, false,
1715 "Ignoring the indication since a recovery process is already in progress\n");
1716 return;
1717 }
1718
1719 p_dev->recov_in_prog = true;
1720
1721 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1722 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1723
1724 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1725 }
1726
1727 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1728 struct ecore_ptt *p_ptt,
1729 enum MFW_DRV_MSG_TYPE type)
1730 {
1731 enum ecore_mcp_protocol_type stats_type;
1732 union ecore_mcp_protocol_stats stats;
1733 struct ecore_mcp_mb_params mb_params;
1734 u32 hsi_param;
1735 enum _ecore_status_t rc;
1736
1737 switch (type) {
1738 case MFW_DRV_MSG_GET_LAN_STATS:
1739 stats_type = ECORE_MCP_LAN_STATS;
1740 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1741 break;
1742 case MFW_DRV_MSG_GET_FCOE_STATS:
1743 stats_type = ECORE_MCP_FCOE_STATS;
1744 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1745 break;
1746 case MFW_DRV_MSG_GET_ISCSI_STATS:
1747 stats_type = ECORE_MCP_ISCSI_STATS;
1748 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1749 break;
1750 case MFW_DRV_MSG_GET_RDMA_STATS:
1751 stats_type = ECORE_MCP_RDMA_STATS;
1752 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1753 break;
1754 default:
1755 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1756 "Invalid protocol type %d\n", type);
1757 return;
1758 }
1759
1760 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1761
1762 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1763 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1764 mb_params.param = hsi_param;
1765 mb_params.p_data_src = &stats;
1766 mb_params.data_src_size = sizeof(stats);
1767 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1768 if (rc != ECORE_SUCCESS)
1769 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1770 }
1771
1772 static void
1773 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1774 {
1775 struct ecore_mcp_function_info *p_info;
1776 struct public_func shmem_info;
1777 u32 resp = 0, param = 0;
1778
1779 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1780
1781 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1782 MCP_PF_ID(p_hwfn));
1783
1784 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1785
1786 p_info = &p_hwfn->mcp_info->func_info;
1787
1788 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1789
1790 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1791
1792 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1793
1794 /* Acknowledge the MFW */
1795 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1796 ¶m);
1797 }
1798
1799 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1800 struct ecore_ptt *p_ptt)
1801 {
1802 struct public_func shmem_info;
1803 u32 resp = 0, param = 0;
1804
1805 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1806 MCP_PF_ID(p_hwfn));
1807
1808 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1809 FUNC_MF_CFG_OV_STAG_MASK;
1810 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1811 if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1812 (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1813 ecore_wr(p_hwfn, p_ptt,
1814 NIG_REG_LLH_FUNC_TAG_VALUE,
1815 p_hwfn->hw_info.ovlan);
1816 ecore_sp_pf_update_stag(p_hwfn);
1817 /* Configure doorbell to add external vlan to EDPM packets */
1818 ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1819 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1820 p_hwfn->hw_info.ovlan);
1821 }
1822
1823 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1824 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1825 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1826
1827 /* Acknowledge the MFW */
1828 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1829 &resp, ¶m);
1830 }
1831
1832 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1833 {
1834 /* A single notification should be sent to upper driver in CMT mode */
1835 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1836 return;
1837
1838 DP_NOTICE(p_hwfn, false,
1839 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1840
1841 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1842 }
1843
1844 struct ecore_mdump_cmd_params {
1845 u32 cmd;
1846 void *p_data_src;
1847 u8 data_src_size;
1848 void *p_data_dst;
1849 u8 data_dst_size;
1850 u32 mcp_resp;
1851 };
1852
1853 static enum _ecore_status_t
1854 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1855 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1856 {
1857 struct ecore_mcp_mb_params mb_params;
1858 enum _ecore_status_t rc;
1859
1860 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1861 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1862 mb_params.param = p_mdump_cmd_params->cmd;
1863 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1864 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1865 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1866 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1867 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1868 if (rc != ECORE_SUCCESS)
1869 return rc;
1870
1871 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1872
1873 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1874 DP_INFO(p_hwfn,
1875 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1876 p_mdump_cmd_params->cmd);
1877 rc = ECORE_NOTIMPL;
1878 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1879 DP_INFO(p_hwfn,
1880 "The mdump command is not supported by the MFW\n");
1881 rc = ECORE_NOTIMPL;
1882 }
1883
1884 return rc;
1885 }
1886
1887 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1888 struct ecore_ptt *p_ptt)
1889 {
1890 struct ecore_mdump_cmd_params mdump_cmd_params;
1891
1892 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1893 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1894
1895 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1896 }
1897
1898 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1899 struct ecore_ptt *p_ptt,
1900 u32 epoch)
1901 {
1902 struct ecore_mdump_cmd_params mdump_cmd_params;
1903
1904 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1905 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1906 mdump_cmd_params.p_data_src = &epoch;
1907 mdump_cmd_params.data_src_size = sizeof(epoch);
1908
1909 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1910 }
1911
1912 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1913 struct ecore_ptt *p_ptt)
1914 {
1915 struct ecore_mdump_cmd_params mdump_cmd_params;
1916
1917 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1918 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1919
1920 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1921 }
1922
1923 static enum _ecore_status_t
1924 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1925 struct mdump_config_stc *p_mdump_config)
1926 {
1927 struct ecore_mdump_cmd_params mdump_cmd_params;
1928 enum _ecore_status_t rc;
1929
1930 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1931 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1932 mdump_cmd_params.p_data_dst = p_mdump_config;
1933 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1934
1935 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1936 if (rc != ECORE_SUCCESS)
1937 return rc;
1938
1939 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1940 DP_INFO(p_hwfn,
1941 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1942 mdump_cmd_params.mcp_resp);
1943 rc = ECORE_UNKNOWN_ERROR;
1944 }
1945
1946 return rc;
1947 }
1948
1949 enum _ecore_status_t
1950 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1951 struct ecore_mdump_info *p_mdump_info)
1952 {
1953 u32 addr, global_offsize, global_addr;
1954 struct mdump_config_stc mdump_config;
1955 enum _ecore_status_t rc;
1956
1957 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1958
1959 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1960 PUBLIC_GLOBAL);
1961 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1962 global_addr = SECTION_ADDR(global_offsize, 0);
1963 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1964 global_addr +
1965 OFFSETOF(struct public_global,
1966 mdump_reason));
1967
1968 if (p_mdump_info->reason) {
1969 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1970 if (rc != ECORE_SUCCESS)
1971 return rc;
1972
1973 p_mdump_info->version = mdump_config.version;
1974 p_mdump_info->config = mdump_config.config;
1975 p_mdump_info->epoch = mdump_config.epoc;
1976 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1977 p_mdump_info->valid_logs = mdump_config.valid_logs;
1978
1979 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1980 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1981 p_mdump_info->reason, p_mdump_info->version,
1982 p_mdump_info->config, p_mdump_info->epoch,
1983 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1984 } else {
1985 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1986 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1987 }
1988
1989 return ECORE_SUCCESS;
1990 }
1991
1992 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1993 struct ecore_ptt *p_ptt)
1994 {
1995 struct ecore_mdump_cmd_params mdump_cmd_params;
1996
1997 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1998 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1999
2000 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
2001 }
2002
2003 enum _ecore_status_t
2004 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2005 struct ecore_mdump_retain_data *p_mdump_retain)
2006 {
2007 struct ecore_mdump_cmd_params mdump_cmd_params;
2008 struct mdump_retain_data_stc mfw_mdump_retain;
2009 enum _ecore_status_t rc;
2010
2011 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
2012 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
2013 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
2014 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
2015
2016 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
2017 if (rc != ECORE_SUCCESS)
2018 return rc;
2019
2020 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
2021 DP_INFO(p_hwfn,
2022 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
2023 mdump_cmd_params.mcp_resp);
2024 return ECORE_UNKNOWN_ERROR;
2025 }
2026
2027 p_mdump_retain->valid = mfw_mdump_retain.valid;
2028 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
2029 p_mdump_retain->pf = mfw_mdump_retain.pf;
2030 p_mdump_retain->status = mfw_mdump_retain.status;
2031
2032 return ECORE_SUCCESS;
2033 }
2034
2035 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
2036 struct ecore_ptt *p_ptt)
2037 {
2038 struct ecore_mdump_cmd_params mdump_cmd_params;
2039
2040 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
2041 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
2042
2043 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
2044 }
2045
2046 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
2047 struct ecore_ptt *p_ptt)
2048 {
2049 struct ecore_mdump_retain_data mdump_retain;
2050 enum _ecore_status_t rc;
2051
2052 /* In CMT mode - no need for more than a single acknowledgement to the
2053 * MFW, and no more than a single notification to the upper driver.
2054 */
2055 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
2056 return;
2057
2058 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
2059 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
2060 DP_NOTICE(p_hwfn, false,
2061 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
2062 mdump_retain.epoch, mdump_retain.pf,
2063 mdump_retain.status);
2064 } else {
2065 DP_NOTICE(p_hwfn, false,
2066 "The MFW notified that a critical error occurred in the device\n");
2067 }
2068
2069 if (p_hwfn->p_dev->allow_mdump) {
2070 DP_NOTICE(p_hwfn, false,
2071 "Not acknowledging the notification to allow the MFW crash dump\n");
2072 return;
2073 }
2074
2075 DP_NOTICE(p_hwfn, false,
2076 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
2077 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
2078 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
2079 }
2080
2081 void
2082 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2083 {
2084 struct public_func shmem_info;
2085 u32 port_cfg, val;
2086
2087 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
2088 return;
2089
2090 OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
2091 port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2092 OFFSETOF(struct public_port, oem_cfg_port));
2093 val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
2094 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
2095 DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
2096 val);
2097
2098 val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
2099 if (val == OEM_CFG_SCHED_TYPE_ETS)
2100 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
2101 else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
2102 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
2103 else {
2104 p_hwfn->ufp_info.mode = ECORE_UFP_MODE_UNKNOWN;
2105 DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
2106 val);
2107 }
2108
2109 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2110 MCP_PF_ID(p_hwfn));
2111 val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
2112 p_hwfn->ufp_info.tc = (u8)val;
2113 val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
2114 OEM_CFG_FUNC_HOST_PRI_CTRL);
2115 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
2116 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
2117 else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
2118 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
2119 else {
2120 p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_UNKNOWN;
2121 DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
2122 val);
2123 }
2124
2125 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
2126 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
2127 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
2128 p_hwfn->ufp_info.pri_type);
2129 }
2130
2131 static enum _ecore_status_t
2132 ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2133 {
2134 ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
2135
2136 if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
2137 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
2138 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
2139
2140 ecore_qm_reconf(p_hwfn, p_ptt);
2141 } else if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_ETS) {
2142 /* Merge UFP TC with the dcbx TC data */
2143 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2144 ECORE_DCBX_OPERATIONAL_MIB);
2145 } else {
2146 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
2147 return ECORE_INVAL;
2148 }
2149
2150 /* update storm FW with negotiation results */
2151 ecore_sp_pf_update_ufp(p_hwfn);
2152
2153 return ECORE_SUCCESS;
2154 }
2155
2156 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
2157 struct ecore_ptt *p_ptt)
2158 {
2159 struct ecore_mcp_info *info = p_hwfn->mcp_info;
2160 enum _ecore_status_t rc = ECORE_SUCCESS;
2161 bool found = false;
2162 u16 i;
2163
2164 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
2165
2166 /* Read Messages from MFW */
2167 ecore_mcp_read_mb(p_hwfn, p_ptt);
2168
2169 /* Compare current messages to old ones */
2170 for (i = 0; i < info->mfw_mb_length; i++) {
2171 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2172 continue;
2173
2174 found = true;
2175
2176 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2177 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2178 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2179
2180 switch (i) {
2181 case MFW_DRV_MSG_LINK_CHANGE:
2182 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2183 break;
2184 case MFW_DRV_MSG_VF_DISABLED:
2185 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2186 break;
2187 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2188 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2189 ECORE_DCBX_REMOTE_LLDP_MIB);
2190 break;
2191 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2192 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2193 ECORE_DCBX_REMOTE_MIB);
2194 break;
2195 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2196 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2197 ECORE_DCBX_OPERATIONAL_MIB);
2198 /* clear the user-config cache */
2199 OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
2200 sizeof(struct ecore_dcbx_set));
2201 break;
2202 case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
2203 ecore_lldp_mib_update_event(p_hwfn, p_ptt);
2204 break;
2205 case MFW_DRV_MSG_OEM_CFG_UPDATE:
2206 ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
2207 break;
2208 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2209 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2210 break;
2211 case MFW_DRV_MSG_ERROR_RECOVERY:
2212 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2213 break;
2214 case MFW_DRV_MSG_GET_LAN_STATS:
2215 case MFW_DRV_MSG_GET_FCOE_STATS:
2216 case MFW_DRV_MSG_GET_ISCSI_STATS:
2217 case MFW_DRV_MSG_GET_RDMA_STATS:
2218 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2219 break;
2220 case MFW_DRV_MSG_BW_UPDATE:
2221 ecore_mcp_update_bw(p_hwfn, p_ptt);
2222 break;
2223 case MFW_DRV_MSG_S_TAG_UPDATE:
2224 ecore_mcp_update_stag(p_hwfn, p_ptt);
2225 break;
2226 case MFW_DRV_MSG_FAILURE_DETECTED:
2227 ecore_mcp_handle_fan_failure(p_hwfn);
2228 break;
2229 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2230 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2231 break;
2232 case MFW_DRV_MSG_GET_TLV_REQ:
2233 OSAL_MFW_TLV_REQ(p_hwfn);
2234 break;
2235 default:
2236 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2237 rc = ECORE_INVAL;
2238 }
2239 }
2240
2241 /* ACK everything */
2242 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2243 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2244
2245 /* MFW expect answer in BE, so we force write in that format */
2246 ecore_wr(p_hwfn, p_ptt,
2247 info->mfw_mb_addr + sizeof(u32) +
2248 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2249 sizeof(u32) + i * sizeof(u32), val);
2250 }
2251
2252 if (!found) {
2253 DP_INFO(p_hwfn,
2254 "Received an MFW message indication but no new message!\n");
2255 rc = ECORE_INVAL;
2256 }
2257
2258 /* Copy the new mfw messages into the shadow */
2259 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2260
2261 return rc;
2262 }
2263
2264 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2265 struct ecore_ptt *p_ptt,
2266 u32 *p_mfw_ver,
2267 u32 *p_running_bundle_id)
2268 {
2269 u32 global_offsize;
2270
2271 #ifndef ASIC_ONLY
2272 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2273 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2274 return ECORE_SUCCESS;
2275 }
2276 #endif
2277
2278 if (IS_VF(p_hwfn->p_dev)) {
2279 if (p_hwfn->vf_iov_info) {
2280 struct pfvf_acquire_resp_tlv *p_resp;
2281
2282 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2283 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2284 return ECORE_SUCCESS;
2285 } else {
2286 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2287 "VF requested MFW version prior to ACQUIRE\n");
2288 return ECORE_INVAL;
2289 }
2290 }
2291
2292 global_offsize = ecore_rd(p_hwfn, p_ptt,
2293 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
2294 PUBLIC_GLOBAL));
2295 *p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
2296 SECTION_ADDR(global_offsize, 0) +
2297 OFFSETOF(struct public_global, mfw_ver));
2298
2299 if (p_running_bundle_id != OSAL_NULL) {
2300 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2301 SECTION_ADDR(global_offsize, 0) +
2302 OFFSETOF(struct public_global,
2303 running_bundle_id));
2304 }
2305
2306 return ECORE_SUCCESS;
2307 }
2308
2309 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2310 struct ecore_ptt *p_ptt,
2311 u32 *p_mbi_ver)
2312 {
2313 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2314
2315 #ifndef ASIC_ONLY
2316 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2317 DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2318 return ECORE_SUCCESS;
2319 }
2320 #endif
2321
2322 if (IS_VF(p_hwfn->p_dev))
2323 return ECORE_INVAL;
2324
2325 /* Read the address of the nvm_cfg */
2326 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2327 if (!nvm_cfg_addr) {
2328 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2329 return ECORE_INVAL;
2330 }
2331
2332 /* Read the offset of nvm_cfg1 */
2333 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2334
2335 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2336 OFFSETOF(struct nvm_cfg1, glob) +
2337 OFFSETOF(struct nvm_cfg1_glob, mbi_version);
2338 *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2339 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2340 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2341 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2342
2343 return ECORE_SUCCESS;
2344 }
2345
2346 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
2347 struct ecore_ptt *p_ptt,
2348 u32 *p_media_type)
2349 {
2350
2351 /* TODO - Add support for VFs */
2352 if (IS_VF(p_hwfn->p_dev))
2353 return ECORE_INVAL;
2354
2355 if (!ecore_mcp_is_init(p_hwfn)) {
2356 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2357 return ECORE_BUSY;
2358 }
2359 if (!p_ptt) {
2360 *p_media_type = MEDIA_UNSPECIFIED;
2361 return ECORE_INVAL;
2362 } else {
2363 *p_media_type = ecore_rd(p_hwfn, p_ptt,
2364 p_hwfn->mcp_info->port_addr +
2365 OFFSETOF(struct public_port,
2366 media_type));
2367 }
2368
2369 return ECORE_SUCCESS;
2370 }
2371
2372 enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
2373 struct ecore_ptt *p_ptt,
2374 u32 *p_tranceiver_type)
2375 {
2376 /* TODO - Add support for VFs */
2377 if (IS_VF(p_hwfn->p_dev))
2378 return ECORE_INVAL;
2379
2380 if (!ecore_mcp_is_init(p_hwfn)) {
2381 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2382 return ECORE_BUSY;
2383 }
2384 if (!p_ptt) {
2385 *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2386 return ECORE_INVAL;
2387 } else {
2388 *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
2389 p_hwfn->mcp_info->port_addr +
2390 offsetof(struct public_port,
2391 transceiver_data));
2392 }
2393
2394 return 0;
2395 }
2396
2397 static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
2398 {
2399
2400 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2401 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2402 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) {
2403 return 1;
2404 }
2405
2406 return 0;
2407 }
2408
2409 enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
2410 struct ecore_ptt *p_ptt,
2411 u32 *p_speed_mask)
2412 {
2413 u32 transceiver_data, transceiver_type, transceiver_state;
2414
2415 ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
2416
2417 transceiver_state = GET_MFW_FIELD(transceiver_data,
2418 ETH_TRANSCEIVER_STATE);
2419
2420 transceiver_type = GET_MFW_FIELD(transceiver_data,
2421 ETH_TRANSCEIVER_TYPE);
2422
2423 if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) {
2424 return ECORE_INVAL;
2425 }
2426
2427 switch (transceiver_type) {
2428 case ETH_TRANSCEIVER_TYPE_1G_LX:
2429 case ETH_TRANSCEIVER_TYPE_1G_SX:
2430 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2431 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2432 case ETH_TRANSCEIVER_TYPE_1000BASET:
2433 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2434 break;
2435
2436 case ETH_TRANSCEIVER_TYPE_10G_SR:
2437 case ETH_TRANSCEIVER_TYPE_10G_LR:
2438 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2439 case ETH_TRANSCEIVER_TYPE_10G_ER:
2440 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2441 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2442 case ETH_TRANSCEIVER_TYPE_4x10G:
2443 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2444 break;
2445
2446 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2447 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2448 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2449 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2450 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2451 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2452 break;
2453
2454 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2455 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2456 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2457 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2458 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2459 *p_speed_mask =
2460 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2461 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2462 break;
2463
2464 case ETH_TRANSCEIVER_TYPE_25G_SR:
2465 case ETH_TRANSCEIVER_TYPE_25G_LR:
2466 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2467 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2468 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2469 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2470 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2471 break;
2472
2473 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2474 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2475 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2476 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2477 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2478 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2479 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2480 break;
2481
2482 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2483 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2484 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2485 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2486 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2487 break;
2488
2489 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2490 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2491 *p_speed_mask =
2492 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2493 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2494 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2495 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2496 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2497 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2498 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2499 break;
2500
2501 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2502 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2503 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2504 *p_speed_mask =
2505 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2506 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2507 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2508 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2509 break;
2510
2511 case ETH_TRANSCEIVER_TYPE_XLPPI:
2512 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2513 break;
2514
2515 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2516 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2517 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2518 break;
2519
2520 default:
2521 DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
2522 transceiver_type);
2523 *p_speed_mask = 0xff;
2524 break;
2525 }
2526
2527 return ECORE_SUCCESS;
2528 }
2529
2530 enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
2531 struct ecore_ptt *p_ptt,
2532 u32 *p_board_config)
2533 {
2534 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2535
2536 /* TODO - Add support for VFs */
2537 if (IS_VF(p_hwfn->p_dev))
2538 return ECORE_INVAL;
2539
2540 if (!ecore_mcp_is_init(p_hwfn)) {
2541 DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
2542 return ECORE_BUSY;
2543 }
2544 if (!p_ptt) {
2545 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2546 return ECORE_INVAL;
2547 } else {
2548 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
2549 MISC_REG_GEN_PURP_CR0);
2550 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
2551 nvm_cfg_addr + 4);
2552 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2553 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2554 *p_board_config = ecore_rd(p_hwfn, p_ptt,
2555 port_cfg_addr +
2556 offsetof(struct nvm_cfg1_port,
2557 board_cfg));
2558 }
2559
2560 return ECORE_SUCCESS;
2561 }
2562
2563 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2564 static void
2565 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2566 enum ecore_pci_personality *p_proto)
2567 {
2568 /* There wasn't ever a legacy MFW that published iwarp.
2569 * So at this point, this is either plain l2 or RoCE.
2570 */
2571 if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2572 &p_hwfn->hw_info.device_capabilities))
2573 *p_proto = ECORE_PCI_ETH_ROCE;
2574 else
2575 *p_proto = ECORE_PCI_ETH;
2576
2577 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2578 "According to Legacy capabilities, L2 personality is %08x\n",
2579 (u32) *p_proto);
2580 }
2581
2582 static enum _ecore_status_t
2583 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2584 struct ecore_ptt *p_ptt,
2585 enum ecore_pci_personality *p_proto)
2586 {
2587 u32 resp = 0, param = 0;
2588 enum _ecore_status_t rc;
2589
2590 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2591 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2592 if (rc != ECORE_SUCCESS)
2593 return rc;
2594 if (resp != FW_MSG_CODE_OK) {
2595 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2596 "MFW lacks support for command; Returns %08x\n",
2597 resp);
2598 return ECORE_INVAL;
2599 }
2600
2601 switch (param) {
2602 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2603 *p_proto = ECORE_PCI_ETH;
2604 break;
2605 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2606 *p_proto = ECORE_PCI_ETH_ROCE;
2607 break;
2608 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2609 *p_proto = ECORE_PCI_ETH_IWARP;
2610 break;
2611 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2612 *p_proto = ECORE_PCI_ETH_RDMA;
2613 break;
2614 default:
2615 DP_NOTICE(p_hwfn, true,
2616 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2617 param);
2618 return ECORE_INVAL;
2619 }
2620
2621 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2622 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2623 (u32) *p_proto, resp, param);
2624 return ECORE_SUCCESS;
2625 }
2626
2627 static enum _ecore_status_t
2628 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2629 struct public_func *p_info,
2630 struct ecore_ptt *p_ptt,
2631 enum ecore_pci_personality *p_proto)
2632 {
2633 enum _ecore_status_t rc = ECORE_SUCCESS;
2634
2635 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2636 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2637 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2638 ECORE_SUCCESS)
2639 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2640 break;
2641 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2642 *p_proto = ECORE_PCI_ISCSI;
2643 break;
2644 case FUNC_MF_CFG_PROTOCOL_FCOE:
2645 *p_proto = ECORE_PCI_FCOE;
2646 break;
2647 case FUNC_MF_CFG_PROTOCOL_ROCE:
2648 DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2649 /* Fallthrough */
2650 default:
2651 rc = ECORE_INVAL;
2652 }
2653
2654 return rc;
2655 }
2656
2657 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2658 struct ecore_ptt *p_ptt)
2659 {
2660 struct ecore_mcp_function_info *info;
2661 struct public_func shmem_info;
2662
2663 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2664 MCP_PF_ID(p_hwfn));
2665 info = &p_hwfn->mcp_info->func_info;
2666
2667 info->pause_on_host = (shmem_info.config &
2668 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2669
2670 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2671 &info->protocol)) {
2672 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2673 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2674 return ECORE_INVAL;
2675 }
2676
2677 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2678
2679 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2680 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2681 info->mac[1] = (u8)(shmem_info.mac_upper);
2682 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2683 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2684 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2685 info->mac[5] = (u8)(shmem_info.mac_lower);
2686
2687 /* Store primary MAC for later possible WoL */
2688 OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2689
2690 } else {
2691 /* TODO - are there protocols for which there's no MAC? */
2692 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2693 }
2694
2695 /* TODO - are these calculations true for BE machine? */
2696 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2697 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2698 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2699 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2700
2701 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2702
2703 info->mtu = (u16)shmem_info.mtu_size;
2704
2705 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2706 p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2707 if (ecore_mcp_is_init(p_hwfn)) {
2708 u32 resp = 0, param = 0;
2709 enum _ecore_status_t rc;
2710
2711 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2712 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2713 if (rc != ECORE_SUCCESS)
2714 return rc;
2715 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2716 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2717 }
2718
2719 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2720 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2721 info->pause_on_host, info->protocol,
2722 info->bandwidth_min, info->bandwidth_max,
2723 info->mac[0], info->mac[1], info->mac[2],
2724 info->mac[3], info->mac[4], info->mac[5],
2725 (unsigned long long)info->wwn_port, (unsigned long long)info->wwn_node, info->ovlan,
2726 (u8)p_hwfn->hw_info.b_wol_support);
2727
2728 return ECORE_SUCCESS;
2729 }
2730
2731 struct ecore_mcp_link_params
2732 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2733 {
2734 if (!p_hwfn || !p_hwfn->mcp_info)
2735 return OSAL_NULL;
2736 return &p_hwfn->mcp_info->link_input;
2737 }
2738
2739 struct ecore_mcp_link_state
2740 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2741 {
2742 if (!p_hwfn || !p_hwfn->mcp_info)
2743 return OSAL_NULL;
2744
2745 #ifndef ASIC_ONLY
2746 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2747 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2748 p_hwfn->mcp_info->link_output.link_up = true;
2749 }
2750 #endif
2751
2752 return &p_hwfn->mcp_info->link_output;
2753 }
2754
2755 struct ecore_mcp_link_capabilities
2756 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2757 {
2758 if (!p_hwfn || !p_hwfn->mcp_info)
2759 return OSAL_NULL;
2760 return &p_hwfn->mcp_info->link_capabilities;
2761 }
2762
2763 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2764 struct ecore_ptt *p_ptt)
2765 {
2766 u32 resp = 0, param = 0;
2767 enum _ecore_status_t rc;
2768
2769 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2770 DRV_MSG_CODE_NIG_DRAIN, 1000,
2771 &resp, ¶m);
2772
2773 /* Wait for the drain to complete before returning */
2774 OSAL_MSLEEP(1020);
2775
2776 return rc;
2777 }
2778
2779 #ifndef LINUX_REMOVE
2780 const struct ecore_mcp_function_info
2781 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2782 {
2783 if (!p_hwfn || !p_hwfn->mcp_info)
2784 return OSAL_NULL;
2785 return &p_hwfn->mcp_info->func_info;
2786 }
2787
2788 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2789 struct ecore_ptt *p_ptt,
2790 u32 personalities)
2791 {
2792 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2793 struct public_func shmem_info;
2794 int i, count = 0, num_pfs;
2795
2796 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2797
2798 for (i = 0; i < num_pfs; i++) {
2799 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2800 MCP_PF_ID_BY_REL(p_hwfn, i));
2801 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2802 continue;
2803
2804 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2805 &protocol) !=
2806 ECORE_SUCCESS)
2807 continue;
2808
2809 if ((1 << ((u32)protocol)) & personalities)
2810 count++;
2811 }
2812
2813 return count;
2814 }
2815 #endif
2816
2817 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2818 struct ecore_ptt *p_ptt,
2819 u32 *p_flash_size)
2820 {
2821 u32 flash_size;
2822
2823 #ifndef ASIC_ONLY
2824 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2825 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2826 return ECORE_INVAL;
2827 }
2828 #endif
2829
2830 if (IS_VF(p_hwfn->p_dev))
2831 return ECORE_INVAL;
2832
2833 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2834 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2835 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2836 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2837
2838 *p_flash_size = flash_size;
2839
2840 return ECORE_SUCCESS;
2841 }
2842
2843 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2844 struct ecore_ptt *p_ptt)
2845 {
2846 struct ecore_dev *p_dev = p_hwfn->p_dev;
2847
2848 if (p_dev->recov_in_prog) {
2849 DP_NOTICE(p_hwfn, false,
2850 "Avoid triggering a recovery since such a process is already in progress\n");
2851 return ECORE_AGAIN;
2852 }
2853
2854 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2855 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2856
2857 return ECORE_SUCCESS;
2858 }
2859
2860 #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
2861
2862 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
2863 {
2864 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2865 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
2866 enum _ecore_status_t rc;
2867
2868 /* Allow ongoing PCIe transactions to complete */
2869 OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
2870
2871 /* Clear the PF's internal FID_enable in the PXP */
2872 rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2873 if (rc != ECORE_SUCCESS)
2874 DP_NOTICE(p_hwfn, false,
2875 "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
2876 rc);
2877
2878 return rc;
2879 }
2880
2881 static enum _ecore_status_t
2882 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2883 struct ecore_ptt *p_ptt,
2884 u8 vf_id, u8 num)
2885 {
2886 u32 resp = 0, param = 0, rc_param = 0;
2887 enum _ecore_status_t rc;
2888
2889 /* Only Leader can configure MSIX, and need to take CMT into account */
2890 if (!IS_LEAD_HWFN(p_hwfn))
2891 return ECORE_SUCCESS;
2892 num *= p_hwfn->p_dev->num_hwfns;
2893
2894 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2895 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2896 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2897 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2898
2899 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2900 &resp, &rc_param);
2901
2902 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2903 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2904 vf_id);
2905 rc = ECORE_INVAL;
2906 } else {
2907 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2908 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2909 num, vf_id);
2910 }
2911
2912 return rc;
2913 }
2914
2915 static enum _ecore_status_t
2916 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2917 struct ecore_ptt *p_ptt,
2918 u8 num)
2919 {
2920 u32 resp = 0, param = num, rc_param = 0;
2921 enum _ecore_status_t rc;
2922
2923 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2924 param, &resp, &rc_param);
2925
2926 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2927 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2928 rc = ECORE_INVAL;
2929 } else {
2930 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2931 "Requested 0x%02x MSI-x interrupts for VFs\n",
2932 num);
2933 }
2934
2935 return rc;
2936 }
2937
2938 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2939 struct ecore_ptt *p_ptt,
2940 u8 vf_id, u8 num)
2941 {
2942 if (ECORE_IS_BB(p_hwfn->p_dev))
2943 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2944 else
2945 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2946 }
2947
2948 enum _ecore_status_t
2949 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2950 struct ecore_mcp_drv_version *p_ver)
2951 {
2952 struct ecore_mcp_mb_params mb_params;
2953 struct drv_version_stc drv_version;
2954 u32 num_words, i;
2955 void *p_name;
2956 OSAL_BE32 val;
2957 enum _ecore_status_t rc;
2958
2959 #ifndef ASIC_ONLY
2960 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2961 return ECORE_SUCCESS;
2962 #endif
2963
2964 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2965 drv_version.version = p_ver->version;
2966 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2967 for (i = 0; i < num_words; i++) {
2968 /* The driver name is expected to be in a big-endian format */
2969 p_name = &p_ver->name[i * sizeof(u32)];
2970 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2971 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2972 }
2973
2974 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2975 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2976 mb_params.p_data_src = &drv_version;
2977 mb_params.data_src_size = sizeof(drv_version);
2978 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2979 if (rc != ECORE_SUCCESS)
2980 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2981
2982 return rc;
2983 }
2984
2985 /* A maximal 100 msec waiting time for the MCP to halt */
2986 #define ECORE_MCP_HALT_SLEEP_MS 10
2987 #define ECORE_MCP_HALT_MAX_RETRIES 10
2988
2989 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2990 struct ecore_ptt *p_ptt)
2991 {
2992 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2993 enum _ecore_status_t rc;
2994
2995 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2996 ¶m);
2997 if (rc != ECORE_SUCCESS) {
2998 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2999 return rc;
3000 }
3001
3002 do {
3003 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
3004 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
3005 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
3006 break;
3007 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
3008
3009 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
3010 DP_NOTICE(p_hwfn, false,
3011 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
3012 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
3013 return ECORE_BUSY;
3014 }
3015
3016 ecore_mcp_cmd_set_blocking(p_hwfn, true);
3017
3018 return ECORE_SUCCESS;
3019 }
3020
3021 #define ECORE_MCP_RESUME_SLEEP_MS 10
3022
3023 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
3024 struct ecore_ptt *p_ptt)
3025 {
3026 u32 cpu_mode, cpu_state;
3027
3028 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
3029
3030 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
3031 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
3032 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
3033
3034 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
3035 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
3036
3037 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
3038 DP_NOTICE(p_hwfn, false,
3039 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
3040 cpu_mode, cpu_state);
3041 return ECORE_BUSY;
3042 }
3043
3044 ecore_mcp_cmd_set_blocking(p_hwfn, false);
3045
3046 return ECORE_SUCCESS;
3047 }
3048
3049 enum _ecore_status_t
3050 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
3051 struct ecore_ptt *p_ptt,
3052 enum ecore_ov_client client)
3053 {
3054 u32 resp = 0, param = 0;
3055 u32 drv_mb_param;
3056 enum _ecore_status_t rc;
3057
3058 switch (client) {
3059 case ECORE_OV_CLIENT_DRV:
3060 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
3061 break;
3062 case ECORE_OV_CLIENT_USER:
3063 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
3064 break;
3065 case ECORE_OV_CLIENT_VENDOR_SPEC:
3066 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
3067 break;
3068 default:
3069 DP_NOTICE(p_hwfn, true,
3070 "Invalid client type %d\n", client);
3071 return ECORE_INVAL;
3072 }
3073
3074 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
3075 drv_mb_param, &resp, ¶m);
3076 if (rc != ECORE_SUCCESS)
3077 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3078
3079 return rc;
3080 }
3081
3082 enum _ecore_status_t
3083 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
3084 struct ecore_ptt *p_ptt,
3085 enum ecore_ov_driver_state drv_state)
3086 {
3087 u32 resp = 0, param = 0;
3088 u32 drv_mb_param;
3089 enum _ecore_status_t rc;
3090
3091 switch (drv_state) {
3092 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
3093 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
3094 break;
3095 case ECORE_OV_DRIVER_STATE_DISABLED:
3096 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
3097 break;
3098 case ECORE_OV_DRIVER_STATE_ACTIVE:
3099 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
3100 break;
3101 default:
3102 DP_NOTICE(p_hwfn, true,
3103 "Invalid driver state %d\n", drv_state);
3104 return ECORE_INVAL;
3105 }
3106
3107 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
3108 drv_mb_param, &resp, ¶m);
3109 if (rc != ECORE_SUCCESS)
3110 DP_ERR(p_hwfn, "Failed to send driver state\n");
3111
3112 return rc;
3113 }
3114
3115 enum _ecore_status_t
3116 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3117 struct ecore_fc_npiv_tbl *p_table)
3118 {
3119 struct dci_fc_npiv_tbl *p_npiv_table;
3120 u8 *p_buf = OSAL_NULL;
3121 u32 addr, size, i;
3122 enum _ecore_status_t rc = ECORE_SUCCESS;
3123
3124 p_table->num_wwpn = 0;
3125 p_table->num_wwnn = 0;
3126 addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
3127 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr));
3128 if (addr == NPIV_TBL_INVALID_ADDR) {
3129 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
3130 return rc;
3131 }
3132
3133 size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
3134 OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size));
3135 if (!size) {
3136 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
3137 return rc;
3138 }
3139
3140 p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
3141 if (!p_buf) {
3142 DP_ERR(p_hwfn, "Buffer allocation failed\n");
3143 return ECORE_NOMEM;
3144 }
3145
3146 rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
3147 if (rc != ECORE_SUCCESS) {
3148 OSAL_VFREE(p_hwfn->p_dev, p_buf);
3149 return rc;
3150 }
3151
3152 p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
3153 p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
3154 p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
3155 for (i = 0; i < p_table->num_wwpn; i++) {
3156 OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
3157 ECORE_WWN_SIZE);
3158 OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
3159 ECORE_WWN_SIZE);
3160 }
3161
3162 OSAL_VFREE(p_hwfn->p_dev, p_buf);
3163
3164 return ECORE_SUCCESS;
3165 }
3166
3167 enum _ecore_status_t
3168 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3169 u16 mtu)
3170 {
3171 u32 resp = 0, param = 0;
3172 u32 drv_mb_param;
3173 enum _ecore_status_t rc;
3174
3175 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_OFFSET;
3176 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
3177 drv_mb_param, &resp, ¶m);
3178 if (rc != ECORE_SUCCESS)
3179 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
3180
3181 return rc;
3182 }
3183
3184 enum _ecore_status_t
3185 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3186 u8 *mac)
3187 {
3188 struct ecore_mcp_mb_params mb_params;
3189 u32 mfw_mac[2];
3190 enum _ecore_status_t rc;
3191
3192 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3193 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
3194 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
3195 DRV_MSG_CODE_VMAC_TYPE_OFFSET;
3196 mb_params.param |= MCP_PF_ID(p_hwfn);
3197
3198 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
3199 * in 32-bit granularity.
3200 * So the MAC has to be set in native order [and not byte order],
3201 * otherwise it would be read incorrectly by MFW after swap.
3202 */
3203 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
3204 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
3205
3206 mb_params.p_data_src = (u8 *)mfw_mac;
3207 mb_params.data_src_size = 8;
3208 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3209 if (rc != ECORE_SUCCESS)
3210 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
3211
3212 /* Store primary MAC for later possible WoL */
3213 OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
3214
3215 return rc;
3216 }
3217
3218 enum _ecore_status_t
3219 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3220 enum ecore_ov_wol wol)
3221 {
3222 u32 resp = 0, param = 0;
3223 u32 drv_mb_param;
3224 enum _ecore_status_t rc;
3225
3226 if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
3227 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3228 "Can't change WoL configuration when WoL isn't supported\n");
3229 return ECORE_INVAL;
3230 }
3231
3232 switch (wol) {
3233 case ECORE_OV_WOL_DEFAULT:
3234 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
3235 break;
3236 case ECORE_OV_WOL_DISABLED:
3237 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
3238 break;
3239 case ECORE_OV_WOL_ENABLED:
3240 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
3241 break;
3242 default:
3243 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
3244 return ECORE_INVAL;
3245 }
3246
3247 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
3248 drv_mb_param, &resp, ¶m);
3249 if (rc != ECORE_SUCCESS)
3250 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
3251
3252 /* Store the WoL update for a future unload */
3253 p_hwfn->p_dev->wol_config = (u8)wol;
3254
3255 return rc;
3256 }
3257
3258 enum _ecore_status_t
3259 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3260 enum ecore_ov_eswitch eswitch)
3261 {
3262 u32 resp = 0, param = 0;
3263 u32 drv_mb_param;
3264 enum _ecore_status_t rc;
3265
3266 switch (eswitch) {
3267 case ECORE_OV_ESWITCH_NONE:
3268 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
3269 break;
3270 case ECORE_OV_ESWITCH_VEB:
3271 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
3272 break;
3273 case ECORE_OV_ESWITCH_VEPA:
3274 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
3275 break;
3276 default:
3277 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
3278 return ECORE_INVAL;
3279 }
3280
3281 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
3282 drv_mb_param, &resp, ¶m);
3283 if (rc != ECORE_SUCCESS)
3284 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
3285
3286 return rc;
3287 }
3288
3289 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
3290 struct ecore_ptt *p_ptt,
3291 enum ecore_led_mode mode)
3292 {
3293 u32 resp = 0, param = 0, drv_mb_param;
3294 enum _ecore_status_t rc;
3295
3296 switch (mode) {
3297 case ECORE_LED_MODE_ON:
3298 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
3299 break;
3300 case ECORE_LED_MODE_OFF:
3301 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
3302 break;
3303 case ECORE_LED_MODE_RESTORE:
3304 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
3305 break;
3306 default:
3307 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
3308 return ECORE_INVAL;
3309 }
3310
3311 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
3312 drv_mb_param, &resp, ¶m);
3313 if (rc != ECORE_SUCCESS)
3314 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
3315
3316 return rc;
3317 }
3318
3319 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
3320 struct ecore_ptt *p_ptt,
3321 u32 mask_parities)
3322 {
3323 u32 resp = 0, param = 0;
3324 enum _ecore_status_t rc;
3325
3326 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3327 mask_parities, &resp, ¶m);
3328
3329 if (rc != ECORE_SUCCESS) {
3330 DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
3331 } else if (resp != FW_MSG_CODE_OK) {
3332 DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
3333 rc = ECORE_INVAL;
3334 }
3335
3336 return rc;
3337 }
3338
3339 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
3340 u8 *p_buf, u32 len)
3341 {
3342 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3343 u32 bytes_left, offset, bytes_to_copy, buf_size;
3344 u32 nvm_offset, resp = 0, param;
3345 struct ecore_ptt *p_ptt;
3346 enum _ecore_status_t rc = ECORE_SUCCESS;
3347
3348 p_ptt = ecore_ptt_acquire(p_hwfn);
3349 if (!p_ptt)
3350 return ECORE_BUSY;
3351
3352 bytes_left = len;
3353 offset = 0;
3354 while (bytes_left > 0) {
3355 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3356 MCP_DRV_NVM_BUF_LEN);
3357 nvm_offset = (addr + offset) | (bytes_to_copy <<
3358 DRV_MB_PARAM_NVM_LEN_OFFSET);
3359 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3360 DRV_MSG_CODE_NVM_READ_NVRAM,
3361 nvm_offset, &resp, ¶m, &buf_size,
3362 (u32 *)(p_buf + offset));
3363 if (rc != ECORE_SUCCESS) {
3364 DP_NOTICE(p_dev, false,
3365 "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
3366 rc);
3367 resp = FW_MSG_CODE_ERROR;
3368 break;
3369 }
3370
3371 if (resp != FW_MSG_CODE_NVM_OK) {
3372 DP_NOTICE(p_dev, false,
3373 "nvm read failed, resp = 0x%08x\n", resp);
3374 rc = ECORE_UNKNOWN_ERROR;
3375 break;
3376 }
3377
3378 /* This can be a lengthy process, and it's possible scheduler
3379 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3380 */
3381 if (bytes_left % 0x1000 <
3382 (bytes_left - buf_size) % 0x1000)
3383 OSAL_MSLEEP(1);
3384
3385 offset += buf_size;
3386 bytes_left -= buf_size;
3387 }
3388
3389 p_dev->mcp_nvm_resp = resp;
3390 ecore_ptt_release(p_hwfn, p_ptt);
3391
3392 return rc;
3393 }
3394
3395 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
3396 u32 addr, u8 *p_buf, u32 len)
3397 {
3398 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3399 struct ecore_ptt *p_ptt;
3400 u32 resp, param;
3401 enum _ecore_status_t rc;
3402
3403 p_ptt = ecore_ptt_acquire(p_hwfn);
3404 if (!p_ptt)
3405 return ECORE_BUSY;
3406
3407 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3408 (cmd == ECORE_PHY_CORE_READ) ?
3409 DRV_MSG_CODE_PHY_CORE_READ :
3410 DRV_MSG_CODE_PHY_RAW_READ,
3411 addr, &resp, ¶m, &len, (u32 *)p_buf);
3412 if (rc != ECORE_SUCCESS)
3413 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3414
3415 p_dev->mcp_nvm_resp = resp;
3416 ecore_ptt_release(p_hwfn, p_ptt);
3417
3418 return rc;
3419 }
3420
3421 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3422 {
3423 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3424 struct ecore_ptt *p_ptt;
3425
3426 p_ptt = ecore_ptt_acquire(p_hwfn);
3427 if (!p_ptt)
3428 return ECORE_BUSY;
3429
3430 OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3431 ecore_ptt_release(p_hwfn, p_ptt);
3432
3433 return ECORE_SUCCESS;
3434 }
3435
3436 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
3437 u32 addr)
3438 {
3439 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3440 struct ecore_ptt *p_ptt;
3441 u32 resp, param;
3442 enum _ecore_status_t rc;
3443
3444 p_ptt = ecore_ptt_acquire(p_hwfn);
3445 if (!p_ptt)
3446 return ECORE_BUSY;
3447 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3448 &resp, ¶m);
3449 p_dev->mcp_nvm_resp = resp;
3450 ecore_ptt_release(p_hwfn, p_ptt);
3451
3452 return rc;
3453 }
3454
3455 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3456 u32 addr)
3457 {
3458 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3459 struct ecore_ptt *p_ptt;
3460 u32 resp, param;
3461 enum _ecore_status_t rc;
3462
3463 p_ptt = ecore_ptt_acquire(p_hwfn);
3464 if (!p_ptt)
3465 return ECORE_BUSY;
3466 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3467 &resp, ¶m);
3468 p_dev->mcp_nvm_resp = resp;
3469 ecore_ptt_release(p_hwfn, p_ptt);
3470
3471 return rc;
3472 }
3473
3474 /* rc recieves ECORE_INVAL as default parameter because
3475 * it might not enter the while loop if the len is 0
3476 */
3477 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3478 u32 addr, u8 *p_buf, u32 len)
3479 {
3480 u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
3481 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3482 enum _ecore_status_t rc = ECORE_INVAL;
3483 struct ecore_ptt *p_ptt;
3484
3485 p_ptt = ecore_ptt_acquire(p_hwfn);
3486 if (!p_ptt)
3487 return ECORE_BUSY;
3488
3489 switch (cmd) {
3490 case ECORE_PUT_FILE_DATA:
3491 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3492 break;
3493 case ECORE_NVM_WRITE_NVRAM:
3494 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3495 break;
3496 case ECORE_EXT_PHY_FW_UPGRADE:
3497 nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3498 break;
3499 case ECORE_ENCRYPT_PASSWORD:
3500 nvm_cmd = DRV_MSG_CODE_ENCRYPT_PASSWORD;
3501 break;
3502 default:
3503 DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3504 cmd);
3505 rc = ECORE_INVAL;
3506 goto out;
3507 }
3508
3509 buf_idx = 0;
3510 while (buf_idx < len) {
3511 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3512 MCP_DRV_NVM_BUF_LEN);
3513 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3514 addr) +
3515 buf_idx;
3516 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3517 &resp, ¶m, buf_size,
3518 (u32 *)&p_buf[buf_idx]);
3519 if (rc != ECORE_SUCCESS) {
3520 DP_NOTICE(p_dev, false,
3521 "ecore_mcp_nvm_write() failed, rc = %d\n",
3522 rc);
3523 resp = FW_MSG_CODE_ERROR;
3524 break;
3525 }
3526
3527 if (resp != FW_MSG_CODE_OK &&
3528 resp != FW_MSG_CODE_NVM_OK &&
3529 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3530 DP_NOTICE(p_dev, false,
3531 "nvm write failed, resp = 0x%08x\n", resp);
3532 rc = ECORE_UNKNOWN_ERROR;
3533 break;
3534 }
3535
3536 /* This can be a lengthy process, and it's possible scheduler
3537 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3538 */
3539 if (buf_idx % 0x1000 >
3540 (buf_idx + buf_size) % 0x1000)
3541 OSAL_MSLEEP(1);
3542
3543 buf_idx += buf_size;
3544 }
3545
3546 p_dev->mcp_nvm_resp = resp;
3547 out:
3548 ecore_ptt_release(p_hwfn, p_ptt);
3549
3550 return rc;
3551 }
3552
3553 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3554 u32 addr, u8 *p_buf, u32 len)
3555 {
3556 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3557 struct ecore_ptt *p_ptt;
3558 u32 resp, param, nvm_cmd;
3559 enum _ecore_status_t rc;
3560
3561 p_ptt = ecore_ptt_acquire(p_hwfn);
3562 if (!p_ptt)
3563 return ECORE_BUSY;
3564
3565 nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
3566 DRV_MSG_CODE_PHY_RAW_WRITE;
3567 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3568 &resp, ¶m, len, (u32 *)p_buf);
3569 if (rc != ECORE_SUCCESS)
3570 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3571 p_dev->mcp_nvm_resp = resp;
3572 ecore_ptt_release(p_hwfn, p_ptt);
3573
3574 return rc;
3575 }
3576
3577 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3578 u32 addr)
3579 {
3580 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3581 struct ecore_ptt *p_ptt;
3582 u32 resp, param;
3583 enum _ecore_status_t rc;
3584
3585 p_ptt = ecore_ptt_acquire(p_hwfn);
3586 if (!p_ptt)
3587 return ECORE_BUSY;
3588
3589 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3590 &resp, ¶m);
3591 p_dev->mcp_nvm_resp = resp;
3592 ecore_ptt_release(p_hwfn, p_ptt);
3593
3594 return rc;
3595 }
3596
3597 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3598 struct ecore_ptt *p_ptt,
3599 u32 port, u32 addr, u32 offset,
3600 u32 len, u8 *p_buf)
3601 {
3602 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3603 u32 resp, param;
3604 enum _ecore_status_t rc;
3605
3606 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3607 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3608 addr = offset;
3609 offset = 0;
3610 bytes_left = len;
3611 while (bytes_left > 0) {
3612 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3613 MAX_I2C_TRANSACTION_SIZE);
3614 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3615 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3616 nvm_offset |= ((addr + offset) <<
3617 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3618 nvm_offset |= (bytes_to_copy <<
3619 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3620 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3621 DRV_MSG_CODE_TRANSCEIVER_READ,
3622 nvm_offset, &resp, ¶m, &buf_size,
3623 (u32 *)(p_buf + offset));
3624 if (rc != ECORE_SUCCESS) {
3625 DP_NOTICE(p_hwfn, false,
3626 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3627 rc);
3628 return rc;
3629 }
3630
3631 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3632 return ECORE_NODEV;
3633 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3634 return ECORE_UNKNOWN_ERROR;
3635
3636 offset += buf_size;
3637 bytes_left -= buf_size;
3638 }
3639
3640 return ECORE_SUCCESS;
3641 }
3642
3643 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3644 struct ecore_ptt *p_ptt,
3645 u32 port, u32 addr, u32 offset,
3646 u32 len, u8 *p_buf)
3647 {
3648 u32 buf_idx, buf_size, nvm_offset, resp, param;
3649 enum _ecore_status_t rc;
3650
3651 nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3652 (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3653 buf_idx = 0;
3654 while (buf_idx < len) {
3655 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3656 MAX_I2C_TRANSACTION_SIZE);
3657 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3658 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3659 nvm_offset |= ((offset + buf_idx) <<
3660 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3661 nvm_offset |= (buf_size <<
3662 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3663 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3664 DRV_MSG_CODE_TRANSCEIVER_WRITE,
3665 nvm_offset, &resp, ¶m, buf_size,
3666 (u32 *)&p_buf[buf_idx]);
3667 if (rc != ECORE_SUCCESS) {
3668 DP_NOTICE(p_hwfn, false,
3669 "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3670 rc);
3671 return rc;
3672 }
3673
3674 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3675 return ECORE_NODEV;
3676 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3677 return ECORE_UNKNOWN_ERROR;
3678
3679 buf_idx += buf_size;
3680 }
3681
3682 return ECORE_SUCCESS;
3683 }
3684
3685 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3686 struct ecore_ptt *p_ptt,
3687 u16 gpio, u32 *gpio_val)
3688 {
3689 enum _ecore_status_t rc = ECORE_SUCCESS;
3690 u32 drv_mb_param = 0, rsp;
3691
3692 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3693
3694 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3695 drv_mb_param, &rsp, gpio_val);
3696
3697 if (rc != ECORE_SUCCESS)
3698 return rc;
3699
3700 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3701 return ECORE_UNKNOWN_ERROR;
3702
3703 return ECORE_SUCCESS;
3704 }
3705
3706 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3707 struct ecore_ptt *p_ptt,
3708 u16 gpio, u16 gpio_val)
3709 {
3710 enum _ecore_status_t rc = ECORE_SUCCESS;
3711 u32 drv_mb_param = 0, param, rsp;
3712
3713 drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3714 (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3715
3716 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3717 drv_mb_param, &rsp, ¶m);
3718
3719 if (rc != ECORE_SUCCESS)
3720 return rc;
3721
3722 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3723 return ECORE_UNKNOWN_ERROR;
3724
3725 return ECORE_SUCCESS;
3726 }
3727
3728 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3729 struct ecore_ptt *p_ptt,
3730 u16 gpio, u32 *gpio_direction,
3731 u32 *gpio_ctrl)
3732 {
3733 u32 drv_mb_param = 0, rsp, val = 0;
3734 enum _ecore_status_t rc = ECORE_SUCCESS;
3735
3736 drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3737
3738 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3739 drv_mb_param, &rsp, &val);
3740 if (rc != ECORE_SUCCESS)
3741 return rc;
3742
3743 *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3744 DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3745 *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3746 DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3747
3748 if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3749 return ECORE_UNKNOWN_ERROR;
3750
3751 return ECORE_SUCCESS;
3752 }
3753
3754 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3755 struct ecore_ptt *p_ptt)
3756 {
3757 u32 drv_mb_param = 0, rsp, param;
3758 enum _ecore_status_t rc = ECORE_SUCCESS;
3759
3760 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3761 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3762
3763 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3764 drv_mb_param, &rsp, ¶m);
3765
3766 if (rc != ECORE_SUCCESS)
3767 return rc;
3768
3769 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3770 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3771 rc = ECORE_UNKNOWN_ERROR;
3772
3773 return rc;
3774 }
3775
3776 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3777 struct ecore_ptt *p_ptt)
3778 {
3779 u32 drv_mb_param, rsp, param;
3780 enum _ecore_status_t rc = ECORE_SUCCESS;
3781
3782 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3783 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3784
3785 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3786 drv_mb_param, &rsp, ¶m);
3787
3788 if (rc != ECORE_SUCCESS)
3789 return rc;
3790
3791 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3792 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3793 rc = ECORE_UNKNOWN_ERROR;
3794
3795 return rc;
3796 }
3797
3798 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3799 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3800 {
3801 u32 drv_mb_param = 0, rsp;
3802 enum _ecore_status_t rc = ECORE_SUCCESS;
3803
3804 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3805 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3806
3807 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3808 drv_mb_param, &rsp, num_images);
3809
3810 if (rc != ECORE_SUCCESS)
3811 return rc;
3812
3813 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3814 rc = ECORE_UNKNOWN_ERROR;
3815
3816 return rc;
3817 }
3818
3819 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3820 struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3821 struct bist_nvm_image_att *p_image_att, u32 image_index)
3822 {
3823 u32 buf_size, nvm_offset, resp, param;
3824 enum _ecore_status_t rc;
3825
3826 nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3827 DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3828 nvm_offset |= (image_index <<
3829 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3830 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3831 nvm_offset, &resp, ¶m, &buf_size,
3832 (u32 *)p_image_att);
3833 if (rc != ECORE_SUCCESS)
3834 return rc;
3835
3836 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3837 (p_image_att->return_code != 1))
3838 rc = ECORE_UNKNOWN_ERROR;
3839
3840 return rc;
3841 }
3842
3843 enum _ecore_status_t
3844 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3845 enum ecore_nvm_images image_id,
3846 struct ecore_nvm_image_att *p_image_att)
3847 {
3848 struct bist_nvm_image_att mfw_image_att;
3849 enum nvm_image_type type;
3850 u32 num_images, i;
3851 enum _ecore_status_t rc;
3852
3853 /* Translate image_id into MFW definitions */
3854 switch (image_id) {
3855 case ECORE_NVM_IMAGE_ISCSI_CFG:
3856 type = NVM_TYPE_ISCSI_CFG;
3857 break;
3858 case ECORE_NVM_IMAGE_FCOE_CFG:
3859 type = NVM_TYPE_FCOE_CFG;
3860 break;
3861 case ECORE_NVM_IMAGE_MDUMP:
3862 type = NVM_TYPE_MDUMP;
3863 break;
3864 default:
3865 DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3866 image_id);
3867 return ECORE_INVAL;
3868 }
3869
3870 /* Learn number of images, then traverse and see if one fits */
3871 rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3872 if (rc != ECORE_SUCCESS || !num_images)
3873 return ECORE_INVAL;
3874
3875 for (i = 0; i < num_images; i++) {
3876 rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3877 &mfw_image_att, i);
3878 if (rc != ECORE_SUCCESS)
3879 return rc;
3880
3881 if (type == mfw_image_att.image_type)
3882 break;
3883 }
3884 if (i == num_images) {
3885 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3886 "Failed to find nvram image of type %08x\n",
3887 image_id);
3888 return ECORE_INVAL;
3889 }
3890
3891 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3892 p_image_att->length = mfw_image_att.len;
3893
3894 return ECORE_SUCCESS;
3895 }
3896
3897 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3898 struct ecore_ptt *p_ptt,
3899 enum ecore_nvm_images image_id,
3900 u8 *p_buffer, u32 buffer_len)
3901 {
3902 struct ecore_nvm_image_att image_att;
3903 enum _ecore_status_t rc;
3904
3905 OSAL_MEM_ZERO(p_buffer, buffer_len);
3906
3907 rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3908 if (rc != ECORE_SUCCESS)
3909 return rc;
3910
3911 /* Validate sizes - both the image's and the supplied buffer's */
3912 if (image_att.length <= 4) {
3913 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3914 "Image [%d] is too small - only %d bytes\n",
3915 image_id, image_att.length);
3916 return ECORE_INVAL;
3917 }
3918
3919 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3920 image_att.length -= 4;
3921
3922 if (image_att.length > buffer_len) {
3923 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3924 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3925 image_id, image_att.length, buffer_len);
3926 return ECORE_NOMEM;
3927 }
3928
3929 return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3930 p_buffer, image_att.length);
3931 }
3932
3933 enum _ecore_status_t
3934 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3935 struct ecore_ptt *p_ptt,
3936 struct ecore_temperature_info *p_temp_info)
3937 {
3938 struct ecore_temperature_sensor *p_temp_sensor;
3939 struct temperature_status_stc mfw_temp_info;
3940 struct ecore_mcp_mb_params mb_params;
3941 u32 val;
3942 enum _ecore_status_t rc;
3943 u8 i;
3944
3945 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3946 mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3947 mb_params.p_data_dst = &mfw_temp_info;
3948 mb_params.data_dst_size = sizeof(mfw_temp_info);
3949 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3950 if (rc != ECORE_SUCCESS)
3951 return rc;
3952
3953 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3954 p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3955 ECORE_MAX_NUM_OF_SENSORS);
3956 for (i = 0; i < p_temp_info->num_sensors; i++) {
3957 val = mfw_temp_info.sensor[i];
3958 p_temp_sensor = &p_temp_info->sensors[i];
3959 p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3960 SENSOR_LOCATION_OFFSET;
3961 p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3962 THRESHOLD_HIGH_OFFSET;
3963 p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3964 CRITICAL_TEMPERATURE_OFFSET;
3965 p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3966 CURRENT_TEMP_OFFSET;
3967 }
3968
3969 return ECORE_SUCCESS;
3970 }
3971
3972 enum _ecore_status_t ecore_mcp_get_mba_versions(
3973 struct ecore_hwfn *p_hwfn,
3974 struct ecore_ptt *p_ptt,
3975 struct ecore_mba_vers *p_mba_vers)
3976 {
3977 u32 buf_size, resp, param;
3978 enum _ecore_status_t rc;
3979
3980 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3981 0, &resp, ¶m, &buf_size,
3982 &(p_mba_vers->mba_vers[0]));
3983
3984 if (rc != ECORE_SUCCESS)
3985 return rc;
3986
3987 if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3988 rc = ECORE_UNKNOWN_ERROR;
3989
3990 if (buf_size != MCP_DRV_NVM_BUF_LEN)
3991 rc = ECORE_UNKNOWN_ERROR;
3992
3993 return rc;
3994 }
3995
3996 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3997 struct ecore_ptt *p_ptt,
3998 u64 *num_events)
3999 {
4000 struct ecore_mcp_mb_params mb_params;
4001
4002 OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
4003 mb_params.cmd = DRV_MSG_CODE_MEM_ECC_EVENTS;
4004 mb_params.p_data_dst = (union drv_union_data *)num_events;
4005
4006 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4007 }
4008
4009 static enum resource_id_enum
4010 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
4011 {
4012 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
4013
4014 switch (res_id) {
4015 case ECORE_SB:
4016 mfw_res_id = RESOURCE_NUM_SB_E;
4017 break;
4018 case ECORE_L2_QUEUE:
4019 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
4020 break;
4021 case ECORE_VPORT:
4022 mfw_res_id = RESOURCE_NUM_VPORT_E;
4023 break;
4024 case ECORE_RSS_ENG:
4025 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
4026 break;
4027 case ECORE_PQ:
4028 mfw_res_id = RESOURCE_NUM_PQ_E;
4029 break;
4030 case ECORE_RL:
4031 mfw_res_id = RESOURCE_NUM_RL_E;
4032 break;
4033 case ECORE_MAC:
4034 case ECORE_VLAN:
4035 /* Each VFC resource can accommodate both a MAC and a VLAN */
4036 mfw_res_id = RESOURCE_VFC_FILTER_E;
4037 break;
4038 case ECORE_ILT:
4039 mfw_res_id = RESOURCE_ILT_E;
4040 break;
4041 case ECORE_LL2_QUEUE:
4042 mfw_res_id = RESOURCE_LL2_QUEUE_E;
4043 break;
4044 case ECORE_RDMA_CNQ_RAM:
4045 case ECORE_CMDQS_CQS:
4046 /* CNQ/CMDQS are the same resource */
4047 mfw_res_id = RESOURCE_CQS_E;
4048 break;
4049 case ECORE_RDMA_STATS_QUEUE:
4050 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
4051 break;
4052 case ECORE_BDQ:
4053 mfw_res_id = RESOURCE_BDQ_E;
4054 break;
4055 default:
4056 break;
4057 }
4058
4059 return mfw_res_id;
4060 }
4061
4062 #define ECORE_RESC_ALLOC_VERSION_MAJOR 2
4063 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
4064 #define ECORE_RESC_ALLOC_VERSION \
4065 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
4066 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
4067 (ECORE_RESC_ALLOC_VERSION_MINOR << \
4068 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
4069
4070 struct ecore_resc_alloc_in_params {
4071 u32 cmd;
4072 enum ecore_resources res_id;
4073 u32 resc_max_val;
4074 };
4075
4076 struct ecore_resc_alloc_out_params {
4077 u32 mcp_resp;
4078 u32 mcp_param;
4079 u32 resc_num;
4080 u32 resc_start;
4081 u32 vf_resc_num;
4082 u32 vf_resc_start;
4083 u32 flags;
4084 };
4085
4086 static enum _ecore_status_t
4087 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
4088 struct ecore_ptt *p_ptt,
4089 struct ecore_resc_alloc_in_params *p_in_params,
4090 struct ecore_resc_alloc_out_params *p_out_params)
4091 {
4092 struct ecore_mcp_mb_params mb_params;
4093 struct resource_info mfw_resc_info;
4094 enum _ecore_status_t rc;
4095
4096 OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
4097
4098 mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
4099 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
4100 DP_ERR(p_hwfn,
4101 "Failed to match resource %d [%s] with the MFW resources\n",
4102 p_in_params->res_id,
4103 ecore_hw_get_resc_name(p_in_params->res_id));
4104 return ECORE_INVAL;
4105 }
4106
4107 switch (p_in_params->cmd) {
4108 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
4109 mfw_resc_info.size = p_in_params->resc_max_val;
4110 /* Fallthrough */
4111 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
4112 break;
4113 default:
4114 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
4115 p_in_params->cmd);
4116 return ECORE_INVAL;
4117 }
4118
4119 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4120 mb_params.cmd = p_in_params->cmd;
4121 mb_params.param = ECORE_RESC_ALLOC_VERSION;
4122 mb_params.p_data_src = &mfw_resc_info;
4123 mb_params.data_src_size = sizeof(mfw_resc_info);
4124 mb_params.p_data_dst = mb_params.p_data_src;
4125 mb_params.data_dst_size = mb_params.data_src_size;
4126
4127 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4128 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
4129 p_in_params->cmd, p_in_params->res_id,
4130 ecore_hw_get_resc_name(p_in_params->res_id),
4131 GET_MFW_FIELD(mb_params.param,
4132 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
4133 GET_MFW_FIELD(mb_params.param,
4134 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
4135 p_in_params->resc_max_val);
4136
4137 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4138 if (rc != ECORE_SUCCESS)
4139 return rc;
4140
4141 p_out_params->mcp_resp = mb_params.mcp_resp;
4142 p_out_params->mcp_param = mb_params.mcp_param;
4143 p_out_params->resc_num = mfw_resc_info.size;
4144 p_out_params->resc_start = mfw_resc_info.offset;
4145 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
4146 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
4147 p_out_params->flags = mfw_resc_info.flags;
4148
4149 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4150 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
4151 GET_MFW_FIELD(p_out_params->mcp_param,
4152 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
4153 GET_MFW_FIELD(p_out_params->mcp_param,
4154 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
4155 p_out_params->resc_num, p_out_params->resc_start,
4156 p_out_params->vf_resc_num, p_out_params->vf_resc_start,
4157 p_out_params->flags);
4158
4159 return ECORE_SUCCESS;
4160 }
4161
4162 enum _ecore_status_t
4163 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4164 enum ecore_resources res_id, u32 resc_max_val,
4165 u32 *p_mcp_resp)
4166 {
4167 struct ecore_resc_alloc_out_params out_params;
4168 struct ecore_resc_alloc_in_params in_params;
4169 enum _ecore_status_t rc;
4170
4171 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
4172 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
4173 in_params.res_id = res_id;
4174 in_params.resc_max_val = resc_max_val;
4175 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
4176 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
4177 &out_params);
4178 if (rc != ECORE_SUCCESS)
4179 return rc;
4180
4181 *p_mcp_resp = out_params.mcp_resp;
4182
4183 return ECORE_SUCCESS;
4184 }
4185
4186 enum _ecore_status_t
4187 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4188 enum ecore_resources res_id, u32 *p_mcp_resp,
4189 u32 *p_resc_num, u32 *p_resc_start)
4190 {
4191 struct ecore_resc_alloc_out_params out_params;
4192 struct ecore_resc_alloc_in_params in_params;
4193 enum _ecore_status_t rc;
4194
4195 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
4196 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
4197 in_params.res_id = res_id;
4198 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
4199 rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
4200 &out_params);
4201 if (rc != ECORE_SUCCESS)
4202 return rc;
4203
4204 *p_mcp_resp = out_params.mcp_resp;
4205
4206 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
4207 *p_resc_num = out_params.resc_num;
4208 *p_resc_start = out_params.resc_start;
4209 }
4210
4211 return ECORE_SUCCESS;
4212 }
4213
4214 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
4215 struct ecore_ptt *p_ptt)
4216 {
4217 u32 mcp_resp, mcp_param;
4218
4219 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
4220 &mcp_resp, &mcp_param);
4221 }
4222
4223 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
4224 struct ecore_ptt *p_ptt,
4225 u8 lldp_mac_addr[ETH_ALEN])
4226 {
4227 struct ecore_mcp_mb_params mb_params;
4228 struct mcp_mac lldp_mac;
4229 enum _ecore_status_t rc;
4230
4231 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4232 mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
4233 mb_params.p_data_dst = &lldp_mac;
4234 mb_params.data_dst_size = sizeof(lldp_mac);
4235 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4236 if (rc != ECORE_SUCCESS)
4237 return rc;
4238
4239 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4240 DP_NOTICE(p_hwfn, false,
4241 "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
4242 mb_params.mcp_resp);
4243 return ECORE_INVAL;
4244 }
4245
4246 *(u16 *)lldp_mac_addr = OSAL_BE16_TO_CPU(*(u16 *)&lldp_mac.mac_upper);
4247 *(u32 *)(lldp_mac_addr + 2) = OSAL_BE32_TO_CPU(lldp_mac.mac_lower);
4248
4249 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4250 "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
4251 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
4252 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
4253
4254 return ECORE_SUCCESS;
4255 }
4256
4257 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
4258 struct ecore_ptt *p_ptt,
4259 u8 lldp_mac_addr[ETH_ALEN])
4260 {
4261 struct ecore_mcp_mb_params mb_params;
4262 struct mcp_mac lldp_mac;
4263 enum _ecore_status_t rc;
4264
4265 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4266 "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
4267 lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
4268 lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
4269
4270 OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
4271 lldp_mac.mac_upper = OSAL_CPU_TO_BE16(*(u16 *)lldp_mac_addr);
4272 lldp_mac.mac_lower = OSAL_CPU_TO_BE32(*(u32 *)(lldp_mac_addr + 2));
4273
4274 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4275 mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
4276 mb_params.p_data_src = &lldp_mac;
4277 mb_params.data_src_size = sizeof(lldp_mac);
4278 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4279 if (rc != ECORE_SUCCESS)
4280 return rc;
4281
4282 if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4283 DP_NOTICE(p_hwfn, false,
4284 "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
4285 mb_params.mcp_resp);
4286 return ECORE_INVAL;
4287 }
4288
4289 return ECORE_SUCCESS;
4290 }
4291
4292 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
4293 struct ecore_ptt *p_ptt,
4294 u32 param, u32 *p_mcp_resp,
4295 u32 *p_mcp_param)
4296 {
4297 enum _ecore_status_t rc;
4298
4299 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
4300 p_mcp_resp, p_mcp_param);
4301 if (rc != ECORE_SUCCESS)
4302 return rc;
4303
4304 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4305 DP_INFO(p_hwfn,
4306 "The resource command is unsupported by the MFW\n");
4307 return ECORE_NOTIMPL;
4308 }
4309
4310 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
4311 u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
4312
4313 DP_NOTICE(p_hwfn, false,
4314 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
4315 param, opcode);
4316 return ECORE_INVAL;
4317 }
4318
4319 return rc;
4320 }
4321
4322 static enum _ecore_status_t
4323 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4324 struct ecore_resc_lock_params *p_params)
4325 {
4326 u32 param = 0, mcp_resp, mcp_param;
4327 u8 opcode, timeout;
4328 enum _ecore_status_t rc;
4329
4330 switch (p_params->timeout) {
4331 case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
4332 opcode = RESOURCE_OPCODE_REQ;
4333 timeout = 0;
4334 break;
4335 case ECORE_MCP_RESC_LOCK_TO_NONE:
4336 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
4337 timeout = 0;
4338 break;
4339 default:
4340 opcode = RESOURCE_OPCODE_REQ_W_AGING;
4341 timeout = p_params->timeout;
4342 break;
4343 }
4344
4345 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
4346 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4347 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, timeout);
4348
4349 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4350 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
4351 param, timeout, opcode, p_params->resource);
4352
4353 /* Attempt to acquire the resource */
4354 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4355 &mcp_param);
4356 if (rc != ECORE_SUCCESS)
4357 return rc;
4358
4359 /* Analyze the response */
4360 p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
4361 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4362
4363 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4364 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
4365 mcp_param, opcode, p_params->owner);
4366
4367 switch (opcode) {
4368 case RESOURCE_OPCODE_GNT:
4369 p_params->b_granted = true;
4370 break;
4371 case RESOURCE_OPCODE_BUSY:
4372 p_params->b_granted = false;
4373 break;
4374 default:
4375 DP_NOTICE(p_hwfn, false,
4376 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
4377 mcp_param, opcode);
4378 return ECORE_INVAL;
4379 }
4380
4381 return ECORE_SUCCESS;
4382 }
4383
4384 enum _ecore_status_t
4385 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4386 struct ecore_resc_lock_params *p_params)
4387 {
4388 u32 retry_cnt = 0;
4389 enum _ecore_status_t rc;
4390
4391 do {
4392 /* No need for an interval before the first iteration */
4393 if (retry_cnt) {
4394 if (p_params->sleep_b4_retry) {
4395 u32 retry_interval_in_ms =
4396 DIV_ROUND_UP(p_params->retry_interval,
4397 1000);
4398
4399 OSAL_MSLEEP(retry_interval_in_ms);
4400 } else {
4401 OSAL_UDELAY(p_params->retry_interval);
4402 }
4403 }
4404
4405 rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
4406 if (rc != ECORE_SUCCESS)
4407 return rc;
4408
4409 if (p_params->b_granted)
4410 break;
4411 } while (retry_cnt++ < p_params->retry_num);
4412
4413 return ECORE_SUCCESS;
4414 }
4415
4416 enum _ecore_status_t
4417 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4418 struct ecore_resc_unlock_params *p_params)
4419 {
4420 u32 param = 0, mcp_resp, mcp_param;
4421 u8 opcode;
4422 enum _ecore_status_t rc;
4423
4424 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
4425 : RESOURCE_OPCODE_RELEASE;
4426 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
4427 SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
4428
4429 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4430 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
4431 param, opcode, p_params->resource);
4432
4433 /* Attempt to release the resource */
4434 rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4435 &mcp_param);
4436 if (rc != ECORE_SUCCESS)
4437 return rc;
4438
4439 /* Analyze the response */
4440 opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4441
4442 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4443 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4444 mcp_param, opcode);
4445
4446 switch (opcode) {
4447 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4448 DP_INFO(p_hwfn,
4449 "Resource unlock request for an already released resource [%d]\n",
4450 p_params->resource);
4451 /* Fallthrough */
4452 case RESOURCE_OPCODE_RELEASED:
4453 p_params->b_released = true;
4454 break;
4455 case RESOURCE_OPCODE_WRONG_OWNER:
4456 p_params->b_released = false;
4457 break;
4458 default:
4459 DP_NOTICE(p_hwfn, false,
4460 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4461 mcp_param, opcode);
4462 return ECORE_INVAL;
4463 }
4464
4465 return ECORE_SUCCESS;
4466 }
4467
4468 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
4469 struct ecore_resc_unlock_params *p_unlock,
4470 enum ecore_resc_lock resource,
4471 bool b_is_permanent)
4472 {
4473 if (p_lock != OSAL_NULL) {
4474 OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
4475
4476 /* Permanent resources don't require aging, and there's no
4477 * point in trying to acquire them more than once since it's
4478 * unexpected another entity would release them.
4479 */
4480 if (b_is_permanent) {
4481 p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
4482 } else {
4483 p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
4484 p_lock->retry_interval =
4485 ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
4486 p_lock->sleep_b4_retry = true;
4487 }
4488
4489 p_lock->resource = resource;
4490 }
4491
4492 if (p_unlock != OSAL_NULL) {
4493 OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
4494 p_unlock->resource = resource;
4495 }
4496 }
4497
4498 enum _ecore_status_t
4499 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4500 u16 vlan)
4501 {
4502 u32 resp = 0, param = 0;
4503 enum _ecore_status_t rc;
4504
4505 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
4506 (u32)vlan << DRV_MB_PARAM_FCOE_CVID_OFFSET,
4507 &resp, ¶m);
4508 if (rc != ECORE_SUCCESS)
4509 DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
4510
4511 return rc;
4512 }
4513
4514 enum _ecore_status_t
4515 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
4516 struct ecore_ptt *p_ptt, u8 *wwn)
4517 {
4518 struct ecore_mcp_mb_params mb_params;
4519 struct mcp_wwn fabric_name;
4520 enum _ecore_status_t rc;
4521
4522 OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
4523 fabric_name.wwn_upper = *(u32 *)wwn;
4524 fabric_name.wwn_lower = *(u32 *)(wwn + 4);
4525
4526 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4527 mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
4528 mb_params.p_data_src = &fabric_name;
4529 mb_params.data_src_size = sizeof(fabric_name);
4530 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4531 if (rc != ECORE_SUCCESS)
4532 DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
4533
4534 return rc;
4535 }
4536
4537 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4538 u32 offset, u32 val)
4539 {
4540 struct ecore_mcp_mb_params mb_params = {0};
4541 enum _ecore_status_t rc = ECORE_SUCCESS;
4542 u32 dword = val;
4543
4544 mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4545 mb_params.param = offset;
4546 mb_params.p_data_src = &dword;
4547 mb_params.data_src_size = sizeof(dword);
4548
4549 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4550 if (rc != ECORE_SUCCESS) {
4551 DP_NOTICE(p_hwfn, false,
4552 "Failed to wol write request, rc = %d\n", rc);
4553 }
4554
4555 if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4556 DP_NOTICE(p_hwfn, false,
4557 "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4558 val, offset, mb_params.mcp_resp);
4559 rc = ECORE_UNKNOWN_ERROR;
4560 }
4561 }
4562
4563 bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
4564 {
4565 return !!(p_hwfn->mcp_info->capabilities &
4566 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
4567 }
4568
4569 bool ecore_mcp_rlx_odr_supported(struct ecore_hwfn *p_hwfn)
4570 {
4571 return !!(p_hwfn->mcp_info->capabilities &
4572 FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD);
4573 }
4574
4575 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4576 struct ecore_ptt *p_ptt)
4577 {
4578 u32 mcp_resp;
4579 enum _ecore_status_t rc;
4580
4581 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4582 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4583 if (rc == ECORE_SUCCESS)
4584 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4585 "MFW supported features: %08x\n",
4586 p_hwfn->mcp_info->capabilities);
4587
4588 return rc;
4589 }
4590
4591 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4592 struct ecore_ptt *p_ptt)
4593 {
4594 u32 mcp_resp, mcp_param, features;
4595
4596 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4597 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
4598 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
4599
4600 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4601 features, &mcp_resp, &mcp_param);
4602 }
4603
4604 enum _ecore_status_t
4605 ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4606 struct ecore_mcp_drv_attr *p_drv_attr)
4607 {
4608 struct attribute_cmd_write_stc attr_cmd_write;
4609 enum _attribute_commands_e mfw_attr_cmd;
4610 struct ecore_mcp_mb_params mb_params;
4611 enum _ecore_status_t rc;
4612
4613 switch (p_drv_attr->attr_cmd) {
4614 case ECORE_MCP_DRV_ATTR_CMD_READ:
4615 mfw_attr_cmd = ATTRIBUTE_CMD_READ;
4616 break;
4617 case ECORE_MCP_DRV_ATTR_CMD_WRITE:
4618 mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
4619 break;
4620 case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
4621 mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
4622 break;
4623 case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
4624 mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
4625 break;
4626 default:
4627 DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
4628 p_drv_attr->attr_cmd);
4629 return ECORE_INVAL;
4630 }
4631
4632 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4633 mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
4634 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
4635 p_drv_attr->attr_num);
4636 SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
4637 mfw_attr_cmd);
4638 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
4639 OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
4640 attr_cmd_write.val = p_drv_attr->val;
4641 attr_cmd_write.mask = p_drv_attr->mask;
4642 attr_cmd_write.offset = p_drv_attr->offset;
4643
4644 mb_params.p_data_src = &attr_cmd_write;
4645 mb_params.data_src_size = sizeof(attr_cmd_write);
4646 }
4647
4648 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4649 if (rc != ECORE_SUCCESS)
4650 return rc;
4651
4652 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4653 DP_INFO(p_hwfn,
4654 "The attribute command is not supported by the MFW\n");
4655 return ECORE_NOTIMPL;
4656 } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
4657 DP_INFO(p_hwfn,
4658 "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
4659 mb_params.mcp_resp, p_drv_attr->attr_cmd,
4660 p_drv_attr->attr_num);
4661 return ECORE_INVAL;
4662 }
4663
4664 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4665 "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
4666 p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
4667 p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
4668 mb_params.mcp_param);
4669
4670 if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
4671 p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
4672 p_drv_attr->val = mb_params.mcp_param;
4673
4674 return ECORE_SUCCESS;
4675 }
4676
4677 enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
4678 struct ecore_ptt *p_ptt)
4679 {
4680 struct ecore_dev *p_dev = p_hwfn->p_dev;
4681 struct ecore_mcp_mb_params mb_params;
4682 u8 fir_valid, l2_valid;
4683 enum _ecore_status_t rc;
4684
4685 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4686 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
4687 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4688 if (rc != ECORE_SUCCESS)
4689 return rc;
4690
4691 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4692 DP_INFO(p_hwfn,
4693 "The get_engine_config command is unsupported by the MFW\n");
4694 return ECORE_NOTIMPL;
4695 }
4696
4697 fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
4698 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
4699 if (fir_valid)
4700 p_dev->fir_affin =
4701 GET_MFW_FIELD(mb_params.mcp_param,
4702 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
4703
4704 l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
4705 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
4706 if (l2_valid)
4707 p_dev->l2_affin_hint =
4708 GET_MFW_FIELD(mb_params.mcp_param,
4709 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
4710
4711 DP_INFO(p_hwfn,
4712 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
4713 fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
4714
4715 return ECORE_SUCCESS;
4716 }
4717
4718 enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4719 struct ecore_ptt *p_ptt)
4720 {
4721 struct ecore_dev *p_dev = p_hwfn->p_dev;
4722 struct ecore_mcp_mb_params mb_params;
4723 enum _ecore_status_t rc;
4724
4725 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4726 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
4727 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4728 if (rc != ECORE_SUCCESS)
4729 return rc;
4730
4731 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4732 DP_INFO(p_hwfn,
4733 "The get_ppfid_bitmap command is unsupported by the MFW\n");
4734 return ECORE_NOTIMPL;
4735 }
4736
4737 p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
4738 FW_MB_PARAM_PPFID_BITMAP);
4739
4740 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
4741 p_dev->ppfid_bitmap);
4742
4743 return ECORE_SUCCESS;
4744 }
4745
4746 enum _ecore_status_t
4747 ecore_mcp_ind_table_lock(struct ecore_hwfn *p_hwfn,
4748 struct ecore_ptt *p_ptt,
4749 u8 retry_num,
4750 u32 retry_interval)
4751 {
4752 struct ecore_resc_lock_params resc_lock_params;
4753 enum _ecore_status_t rc;
4754
4755 OSAL_MEM_ZERO(&resc_lock_params,
4756 sizeof(struct ecore_resc_lock_params));
4757 resc_lock_params.resource = ECORE_RESC_LOCK_IND_TABLE;
4758 if (!retry_num)
4759 retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
4760 resc_lock_params.retry_num = retry_num;
4761
4762 if (!retry_interval)
4763 retry_interval = ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
4764 resc_lock_params.retry_interval = retry_interval;
4765
4766 rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
4767 if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
4768 DP_NOTICE(p_hwfn, false,
4769 "Failed to acquire the resource lock for IDT access\n");
4770 return ECORE_BUSY;
4771 }
4772 return rc;
4773 }
4774
4775 enum _ecore_status_t
4776 ecore_mcp_ind_table_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
4777 {
4778 struct ecore_resc_unlock_params resc_unlock_params;
4779 enum _ecore_status_t rc;
4780
4781 OSAL_MEM_ZERO(&resc_unlock_params,
4782 sizeof(struct ecore_resc_unlock_params));
4783 resc_unlock_params.resource = ECORE_RESC_LOCK_IND_TABLE;
4784 rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
4785 &resc_unlock_params);
4786 return rc;
4787 }
4788 #ifdef _NTDDK_
4789 #pragma warning(pop)
4790 #endif
Cache object: 16896c22a5c3ea8d0747dab9e0b7f176
|