1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File : ecore_init_ops.c
30 */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /* include the precompiled configuration values - only once */
35 #include "bcm_osal.h"
36 #include "ecore_hsi_common.h"
37 #include "ecore.h"
38 #include "ecore_hw.h"
39 #include "ecore_status.h"
40 #include "ecore_rt_defs.h"
41 #include "ecore_init_fw_funcs.h"
42
43 #ifndef CONFIG_ECORE_BINARY_FW
44 #ifdef CONFIG_ECORE_ZIPPED_FW
45 #include "ecore_init_values_zipped.h"
46 #else
47 #include "ecore_init_values.h"
48 #endif
49 #endif
50
51 #include "ecore_iro_values.h"
52 #include "ecore_sriov.h"
53 #include "ecore_gtt_values.h"
54 #include "reg_addr.h"
55 #include "ecore_init_ops.h"
56
57 #define ECORE_INIT_MAX_POLL_COUNT 100
58 #define ECORE_INIT_POLL_PERIOD_US 500
59
60 void ecore_init_iro_array(struct ecore_dev *p_dev)
61 {
62 p_dev->iro_arr = iro_arr;
63 }
64
65 /* Runtime configuration helpers */
66 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
67 {
68 int i;
69
70 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
71 p_hwfn->rt_data.b_valid[i] = false;
72 }
73
74 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
75 u32 rt_offset, u32 val)
76 {
77 if (rt_offset >= RUNTIME_ARRAY_SIZE) {
78 DP_ERR(p_hwfn,
79 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
80 val, rt_offset, RUNTIME_ARRAY_SIZE);
81 return;
82 }
83
84 p_hwfn->rt_data.init_val[rt_offset] = val;
85 p_hwfn->rt_data.b_valid[rt_offset] = true;
86 }
87
88 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
89 u32 rt_offset, u32 *p_val,
90 osal_size_t size)
91 {
92 osal_size_t i;
93
94 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
95 DP_ERR(p_hwfn,
96 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
97 rt_offset, (u32)(rt_offset + size - 1),
98 RUNTIME_ARRAY_SIZE);
99 return;
100 }
101
102 for (i = 0; i < size / sizeof(u32); i++) {
103 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
104 p_hwfn->rt_data.b_valid[rt_offset + i] = true;
105 }
106 }
107
108 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
109 struct ecore_ptt *p_ptt,
110 u32 addr,
111 u16 rt_offset,
112 u16 size,
113 bool b_must_dmae)
114 {
115 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
116 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
117 u16 i, segment;
118 enum _ecore_status_t rc = ECORE_SUCCESS;
119
120 /* Since not all RT entries are initialized, go over the RT and
121 * for each segment of initialized values use DMA.
122 */
123 for (i = 0; i < size; i++) {
124 if (!p_valid[i])
125 continue;
126
127 /* In case there isn't any wide-bus configuration here,
128 * simply write the data instead of using dmae.
129 */
130 if (!b_must_dmae) {
131 ecore_wr(p_hwfn, p_ptt, addr + (i << 2),
132 p_init_val[i]);
133 continue;
134 }
135
136 /* Start of a new segment */
137 for (segment = 1; i + segment < size; segment++)
138 if (!p_valid[i + segment])
139 break;
140
141 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
142 (osal_uintptr_t)(p_init_val + i),
143 addr + (i << 2), segment,
144 OSAL_NULL /* default parameters */);
145 if (rc != ECORE_SUCCESS)
146 return rc;
147
148 /* Jump over the entire segment, including invalid entry */
149 i += segment;
150 }
151
152 return rc;
153 }
154
155 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
156 {
157 struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
158
159 if (IS_VF(p_hwfn->p_dev))
160 return ECORE_SUCCESS;
161
162 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
163 sizeof(bool) * RUNTIME_ARRAY_SIZE);
164 if (!rt_data->b_valid)
165 return ECORE_NOMEM;
166
167 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
168 sizeof(u32) * RUNTIME_ARRAY_SIZE);
169 if (!rt_data->init_val) {
170 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
171 rt_data->b_valid = OSAL_NULL;
172 return ECORE_NOMEM;
173 }
174
175 return ECORE_SUCCESS;
176 }
177
178 void ecore_init_free(struct ecore_hwfn *p_hwfn)
179 {
180 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
181 p_hwfn->rt_data.init_val = OSAL_NULL;
182 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
183 p_hwfn->rt_data.b_valid = OSAL_NULL;
184 }
185
186 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
187 struct ecore_ptt *p_ptt,
188 u32 addr, u32 dmae_data_offset,
189 u32 size, const u32 *p_buf,
190 bool b_must_dmae, bool b_can_dmae)
191 {
192 enum _ecore_status_t rc = ECORE_SUCCESS;
193
194 /* Perform DMAE only for lengthy enough sections or for wide-bus */
195 #ifndef ASIC_ONLY
196 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
197 !b_can_dmae || (!b_must_dmae && (size < 16))) {
198 #else
199 if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
200 #endif
201 const u32 *data = p_buf + dmae_data_offset;
202 u32 i;
203
204 for (i = 0; i < size; i++)
205 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
206 } else {
207 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
208 (osal_uintptr_t)(p_buf +
209 dmae_data_offset),
210 addr, size,
211 OSAL_NULL /* default parameters */);
212 }
213
214 return rc;
215 }
216
217 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
218 struct ecore_ptt *p_ptt,
219 u32 addr, u32 fill_count)
220 {
221 static u32 zero_buffer[DMAE_MAX_RW_SIZE];
222 struct ecore_dmae_params params;
223
224 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
225
226 OSAL_MEMSET(¶ms, 0, sizeof(params));
227 params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
228 return ecore_dmae_host2grc(p_hwfn, p_ptt,
229 (osal_uintptr_t)(&(zero_buffer[0])),
230 addr, fill_count, ¶ms);
231 }
232
233 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
234 struct ecore_ptt *p_ptt,
235 u32 addr, u32 fill, u32 fill_count)
236 {
237 u32 i;
238
239 for (i = 0; i < fill_count; i++, addr += sizeof(u32))
240 ecore_wr(p_hwfn, p_ptt, addr, fill);
241 }
242
243 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
244 struct ecore_ptt *p_ptt,
245 struct init_write_op *cmd,
246 bool b_must_dmae,
247 bool b_can_dmae)
248 {
249 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
250 u32 data = OSAL_LE32_TO_CPU(cmd->data);
251 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
252 #ifdef CONFIG_ECORE_ZIPPED_FW
253 u32 offset, output_len, input_len, max_size;
254 #endif
255 struct ecore_dev *p_dev = p_hwfn->p_dev;
256 union init_array_hdr *hdr;
257 const u32 *array_data;
258 enum _ecore_status_t rc = ECORE_SUCCESS;
259 u32 size;
260
261 array_data = p_dev->fw_data->arr_data;
262
263 hdr = (union init_array_hdr *) (array_data +
264 dmae_array_offset);
265 data = OSAL_LE32_TO_CPU(hdr->raw.data);
266 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
267 case INIT_ARR_ZIPPED:
268 #ifdef CONFIG_ECORE_ZIPPED_FW
269 offset = dmae_array_offset + 1;
270 input_len = GET_FIELD(data,
271 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
272 max_size = MAX_ZIPPED_SIZE * 4;
273 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
274
275 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
276 (u8 *)&array_data[offset],
277 max_size, (u8 *)p_hwfn->unzip_buf);
278 if (output_len) {
279 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
280 output_len,
281 p_hwfn->unzip_buf,
282 b_must_dmae, b_can_dmae);
283 } else {
284 DP_NOTICE(p_hwfn, true,
285 "Failed to unzip dmae data\n");
286 rc = ECORE_INVAL;
287 }
288 #else
289 DP_NOTICE(p_hwfn, true,
290 "Using zipped firmware without config enabled\n");
291 rc = ECORE_INVAL;
292 #endif
293 break;
294 case INIT_ARR_PATTERN:
295 {
296 u32 repeats = GET_FIELD(data,
297 INIT_ARRAY_PATTERN_HDR_REPETITIONS);
298 u32 i;
299
300 size = GET_FIELD(data,
301 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
302
303 for (i = 0; i < repeats; i++, addr += size << 2) {
304 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
305 dmae_array_offset + 1,
306 size, array_data,
307 b_must_dmae, b_can_dmae);
308 if (rc)
309 break;
310 }
311 break;
312 }
313 case INIT_ARR_STANDARD:
314 size = GET_FIELD(data,
315 INIT_ARRAY_STANDARD_HDR_SIZE);
316 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
317 dmae_array_offset + 1,
318 size, array_data,
319 b_must_dmae, b_can_dmae);
320 break;
321 }
322
323 return rc;
324 }
325
326 /* init_ops write command */
327 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
328 struct ecore_ptt *p_ptt,
329 struct init_write_op *p_cmd,
330 bool b_can_dmae)
331 {
332 u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
333 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
334 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
335 enum _ecore_status_t rc = ECORE_SUCCESS;
336
337 /* Sanitize */
338 if (b_must_dmae && !b_can_dmae) {
339 DP_NOTICE(p_hwfn, true,
340 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
341 addr);
342 return ECORE_INVAL;
343 }
344
345 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
346 case INIT_SRC_INLINE:
347 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
348 ecore_wr(p_hwfn, p_ptt, addr, data);
349 break;
350 case INIT_SRC_ZEROS:
351 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
352 if (b_must_dmae || (b_can_dmae && (data >= 64)))
353 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
354 else
355 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
356 break;
357 case INIT_SRC_ARRAY:
358 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
359 b_must_dmae, b_can_dmae);
360 break;
361 case INIT_SRC_RUNTIME:
362 rc = ecore_init_rt(p_hwfn, p_ptt, addr,
363 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
364 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
365 b_must_dmae);
366 break;
367 }
368
369 return rc;
370 }
371
372 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
373 {
374 return (val == expected_val);
375 }
376
377 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
378 {
379 return (val & expected_val) == expected_val;
380 }
381
382 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
383 {
384 return (val | expected_val) > 0;
385 }
386
387 /* init_ops read/poll commands */
388 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
389 struct ecore_ptt *p_ptt,
390 struct init_read_op *cmd)
391 {
392 bool (*comp_check)(u32 val, u32 expected_val);
393 u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
394 u32 data, addr, poll;
395 int i;
396
397 data = OSAL_LE32_TO_CPU(cmd->op_data);
398 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
399 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
400
401 #ifndef ASIC_ONLY
402 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
403 delay *= 100;
404 #endif
405
406 val = ecore_rd(p_hwfn, p_ptt, addr);
407
408 if (poll == INIT_POLL_NONE)
409 return;
410
411 switch (poll) {
412 case INIT_POLL_EQ:
413 comp_check = comp_eq;
414 break;
415 case INIT_POLL_OR:
416 comp_check = comp_or;
417 break;
418 case INIT_POLL_AND:
419 comp_check = comp_and;
420 break;
421 default:
422 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
423 cmd->op_data);
424 return;
425 }
426
427 data = OSAL_LE32_TO_CPU(cmd->expected_val);
428 for (i = 0;
429 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data);
430 i++) {
431 OSAL_UDELAY(delay);
432 val = ecore_rd(p_hwfn, p_ptt, addr);
433 }
434
435 if (i == ECORE_INIT_MAX_POLL_COUNT)
436 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
437 addr,
438 OSAL_LE32_TO_CPU(cmd->expected_val), val,
439 OSAL_LE32_TO_CPU(cmd->op_data));
440 }
441
442 /* init_ops callbacks entry point */
443 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
444 struct ecore_ptt *p_ptt,
445 struct init_callback_op *p_cmd)
446 {
447 enum _ecore_status_t rc;
448
449 switch (p_cmd->callback_id) {
450 case DMAE_READY_CB:
451 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
452 break;
453 default:
454 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
455 p_cmd->callback_id);
456 return ECORE_INVAL;
457 }
458
459 return rc;
460 }
461
462 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
463 u16 *p_offset, int modes)
464 {
465 struct ecore_dev *p_dev = p_hwfn->p_dev;
466 const u8 *modes_tree_buf;
467 u8 arg1, arg2, tree_val;
468
469 modes_tree_buf = p_dev->fw_data->modes_tree_buf;
470 tree_val = modes_tree_buf[(*p_offset)++];
471 switch(tree_val) {
472 case INIT_MODE_OP_NOT:
473 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
474 case INIT_MODE_OP_OR:
475 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
476 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
477 return arg1 | arg2;
478 case INIT_MODE_OP_AND:
479 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
480 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
481 return arg1 & arg2;
482 default:
483 tree_val -= MAX_INIT_MODE_OPS;
484 return (modes & (1 << tree_val)) ? 1 : 0;
485 }
486 }
487
488 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
489 struct init_if_mode_op *p_cmd, int modes)
490 {
491 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
492
493 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
494 return 0;
495 else
496 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
497 INIT_IF_MODE_OP_CMD_OFFSET);
498 }
499
500 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
501 u32 phase, u32 phase_id)
502 {
503 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
504 u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
505
506 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
507 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
508 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
509 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
510 else
511 return 0;
512 }
513
514 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
515 struct ecore_ptt *p_ptt,
516 int phase,
517 int phase_id,
518 int modes)
519 {
520 struct ecore_dev *p_dev = p_hwfn->p_dev;
521 u32 cmd_num, num_init_ops;
522 union init_op *init_ops;
523 bool b_dmae = false;
524 enum _ecore_status_t rc = ECORE_SUCCESS;
525
526 num_init_ops = p_dev->fw_data->init_ops_size;
527 init_ops = p_dev->fw_data->init_ops;
528
529 #ifdef CONFIG_ECORE_ZIPPED_FW
530 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
531 MAX_ZIPPED_SIZE * 4);
532 if (!p_hwfn->unzip_buf) {
533 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
534 return ECORE_NOMEM;
535 }
536 #endif
537
538 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
539 union init_op *cmd = &init_ops[cmd_num];
540 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
541
542 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
543 case INIT_OP_WRITE:
544 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
545 b_dmae);
546 break;
547
548 case INIT_OP_READ:
549 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
550 break;
551
552 case INIT_OP_IF_MODE:
553 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
554 modes);
555 break;
556 case INIT_OP_IF_PHASE:
557 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
558 phase_id);
559 b_dmae = GET_FIELD(data,
560 INIT_IF_PHASE_OP_DMAE_ENABLE);
561 break;
562 case INIT_OP_DELAY:
563 /* ecore_init_run is always invoked from
564 * sleep-able context
565 */
566 OSAL_UDELAY(cmd->delay.delay);
567 break;
568
569 case INIT_OP_CALLBACK:
570 rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
571 break;
572 }
573
574 if (rc)
575 break;
576 }
577 #ifdef CONFIG_ECORE_ZIPPED_FW
578 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
579 p_hwfn->unzip_buf = OSAL_NULL;
580 #endif
581 return rc;
582 }
583
584 void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
585 struct ecore_ptt *p_ptt)
586 {
587 u32 gtt_base;
588 u32 i;
589
590 #ifndef ASIC_ONLY
591 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
592 /* This is done by MFW on ASIC; regardless, this should only
593 * be done once per chip [i.e., common]. Implementation is
594 * not too bright, but it should work on the simple FPGA/EMUL
595 * scenarios.
596 */
597 static bool initialized = false;
598 int poll_cnt = 500;
599 u32 val;
600
601 /* initialize PTT/GTT (poll for completion) */
602 if (!initialized) {
603 ecore_wr(p_hwfn, p_ptt,
604 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
605 initialized = true;
606 }
607
608 do {
609 /* ptt might be overrided by HW until this is done */
610 OSAL_UDELAY(10);
611 ecore_ptt_invalidate(p_hwfn);
612 val = ecore_rd(p_hwfn, p_ptt,
613 PGLUE_B_REG_INIT_DONE_PTT_GTT);
614 } while ((val != 1) && --poll_cnt);
615
616 if (!poll_cnt)
617 DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n");
618 }
619 #endif
620
621 /* Set the global windows */
622 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
623
624 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
625 if (pxp_global_win[i])
626 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
627 pxp_global_win[i]);
628 }
629
630 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
631 #ifdef CONFIG_ECORE_BINARY_FW
632 const u8 *fw_data)
633 #else
634 const u8 OSAL_UNUSED *fw_data)
635 #endif
636 {
637 struct ecore_fw_data *fw = p_dev->fw_data;
638
639 #ifdef CONFIG_ECORE_BINARY_FW
640 struct bin_buffer_hdr *buf_hdr;
641 u32 offset, len;
642
643 if (!fw_data) {
644 DP_NOTICE(p_dev, true, "Invalid fw data\n");
645 return ECORE_INVAL;
646 }
647
648 buf_hdr = (struct bin_buffer_hdr *)fw_data;
649
650 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
651 fw->fw_ver_info = (struct fw_ver_info *)(fw_data + offset);
652
653 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
654 fw->init_ops = (union init_op *)(fw_data + offset);
655
656 offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
657 fw->arr_data = (u32 *)(fw_data + offset);
658
659 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
660 fw->modes_tree_buf = (u8 *)(fw_data + offset);
661 len = buf_hdr[BIN_BUF_INIT_CMD].length;
662 fw->init_ops_size = len / sizeof(struct init_raw_op);
663 #else
664 fw->init_ops = (union init_op *)init_ops;
665 fw->arr_data = (u32 *)init_val;
666 fw->modes_tree_buf = (u8 *)modes_tree_buf;
667 fw->init_ops_size = init_ops_size;
668 #endif
669
670 return ECORE_SUCCESS;
671 }
Cache object: 02c3b6845f13c66c29dcdd14d6e8b69b
|