1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File: qlnx_ioctl.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "qlnx_os.h"
37 #include "bcm_osal.h"
38
39 #include "reg_addr.h"
40 #include "ecore_gtt_reg_addr.h"
41 #include "ecore.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
44 #include "ecore_hw.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
58 #include "nvm_cfg.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
61 #include "ecore_dcbx_api.h"
62
63 #include "qlnx_ioctl.h"
64 #include "qlnx_def.h"
65 #include "qlnx_ver.h"
66 #include <sys/smp.h>
67
68 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
69 struct thread *td);
70
71 static struct cdevsw qlnx_cdevsw = {
72 .d_version = D_VERSION,
73 .d_ioctl = qlnx_eioctl,
74 .d_name = "qlnxioctl",
75 };
76
77 int
78 qlnx_make_cdev(qlnx_host_t *ha)
79 {
80 ha->ioctl_dev = make_dev(&qlnx_cdevsw,
81 ha->ifp->if_dunit,
82 UID_ROOT,
83 GID_WHEEL,
84 0600,
85 "%s",
86 if_name(ha->ifp));
87
88 if (ha->ioctl_dev == NULL)
89 return (-1);
90
91 ha->ioctl_dev->si_drv1 = ha;
92
93 return (0);
94 }
95
96 void
97 qlnx_del_cdev(qlnx_host_t *ha)
98 {
99 if (ha->ioctl_dev != NULL)
100 destroy_dev(ha->ioctl_dev);
101 return;
102 }
103
104 int
105 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
106 {
107 int rval = EINVAL;
108 struct ecore_hwfn *p_hwfn;
109 struct ecore_ptt *p_ptt;
110
111 if (ha->grcdump_dwords[hwfn_index]) {
112 /* the grcdump is already available */
113 *num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
114 return (0);
115 }
116
117 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
118
119 p_hwfn = &ha->cdev.hwfns[hwfn_index];
120 p_ptt = ecore_ptt_acquire(p_hwfn);
121
122 if (!p_ptt) {
123 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
124 return (rval);
125 }
126
127 if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
128 ha->grcdump[hwfn_index],
129 (ha->grcdump_size[hwfn_index] >> 2),
130 num_dumped_dwords)) == DBG_STATUS_OK) {
131 rval = 0;
132 ha->grcdump_taken = 1;
133 } else
134 QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
135 hwfn_index, rval);
136
137 ecore_ptt_release(p_hwfn, p_ptt);
138
139 return (rval);
140 }
141
142 static void
143 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
144 {
145 int i;
146
147 grcdump->pci_func = ha->pci_func;
148
149 for (i = 0; i < ha->cdev.num_hwfns; i++)
150 grcdump->grcdump_size[i] = ha->grcdump_size[i];
151
152 return;
153 }
154
155 static int
156 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
157 {
158 int i;
159 int rval = 0;
160 uint32_t dwords = 0;
161
162 grcdump->pci_func = ha->pci_func;
163
164 for (i = 0; i < ha->cdev.num_hwfns; i++) {
165 if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
166 (grcdump->grcdump_size[i] < ha->grcdump_size[i]))
167 return (EINVAL);
168
169 rval = qlnx_grc_dump(ha, &dwords, i);
170
171 if (rval)
172 break;
173
174 grcdump->grcdump_dwords[i] = dwords;
175
176 QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
177
178 rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
179 ha->grcdump_size[i]);
180
181 if (rval)
182 break;
183
184 ha->grcdump_dwords[i] = 0;
185 }
186
187 ha->grcdump_taken = 0;
188
189 return (rval);
190 }
191
192 int
193 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
194 {
195 int rval = EINVAL;
196 struct ecore_hwfn *p_hwfn;
197 struct ecore_ptt *p_ptt;
198
199 if (ha->idle_chk_dwords[hwfn_index]) {
200 /* the idle check is already available */
201 *num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
202 return (0);
203 }
204
205 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
206
207 p_hwfn = &ha->cdev.hwfns[hwfn_index];
208 p_ptt = ecore_ptt_acquire(p_hwfn);
209
210 if (!p_ptt) {
211 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
212 return (rval);
213 }
214
215 if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
216 ha->idle_chk[hwfn_index],
217 (ha->idle_chk_size[hwfn_index] >> 2),
218 num_dumped_dwords)) == DBG_STATUS_OK) {
219 rval = 0;
220 ha->idle_chk_taken = 1;
221 } else
222 QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
223 hwfn_index, rval);
224
225 ecore_ptt_release(p_hwfn, p_ptt);
226
227 return (rval);
228 }
229
230 static void
231 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
232 {
233 int i;
234
235 idle_chk->pci_func = ha->pci_func;
236
237 for (i = 0; i < ha->cdev.num_hwfns; i++)
238 idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
239
240 return;
241 }
242
243 static int
244 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
245 {
246 int i;
247 int rval = 0;
248 uint32_t dwords = 0;
249
250 idle_chk->pci_func = ha->pci_func;
251
252 for (i = 0; i < ha->cdev.num_hwfns; i++) {
253 if ((ha->idle_chk[i] == NULL) ||
254 (idle_chk->idle_chk[i] == NULL) ||
255 (idle_chk->idle_chk_size[i] <
256 ha->idle_chk_size[i]))
257 return (EINVAL);
258
259 rval = qlnx_idle_chk(ha, &dwords, i);
260
261 if (rval)
262 break;
263
264 idle_chk->idle_chk_dwords[i] = dwords;
265
266 QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
267
268 rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
269 ha->idle_chk_size[i]);
270
271 if (rval)
272 break;
273
274 ha->idle_chk_dwords[i] = 0;
275 }
276 ha->idle_chk_taken = 0;
277
278 return (rval);
279 }
280
281 static uint32_t
282 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
283 {
284 int rval = -1;
285 struct ecore_hwfn *p_hwfn;
286 struct ecore_ptt *p_ptt;
287 uint32_t num_dwords = 0;
288
289 p_hwfn = &ha->cdev.hwfns[hwfn_index];
290 p_ptt = ecore_ptt_acquire(p_hwfn);
291
292 if (!p_ptt) {
293 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
294 hwfn_index, cmd);
295 return (0);
296 }
297
298 switch (cmd) {
299 case QLNX_MCP_TRACE:
300 rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
301 p_ptt, &num_dwords);
302 break;
303
304 case QLNX_REG_FIFO:
305 rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
306 p_ptt, &num_dwords);
307 break;
308
309 case QLNX_IGU_FIFO:
310 rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
311 p_ptt, &num_dwords);
312 break;
313
314 case QLNX_PROTECTION_OVERRIDE:
315 rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
316 p_ptt, &num_dwords);
317 break;
318
319 case QLNX_FW_ASSERTS:
320 rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
321 p_ptt, &num_dwords);
322 break;
323 }
324
325 if (rval != DBG_STATUS_OK) {
326 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
327 num_dwords = 0;
328 }
329
330 ecore_ptt_release(p_hwfn, p_ptt);
331
332 return ((num_dwords * sizeof (uint32_t)));
333 }
334
335 static void
336 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
337 {
338 int i;
339
340 trace->pci_func = ha->pci_func;
341
342 for (i = 0; i < ha->cdev.num_hwfns; i++) {
343 trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
344 }
345
346 return;
347 }
348
349 static int
350 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
351 {
352 int rval = -1;
353 struct ecore_hwfn *p_hwfn;
354 struct ecore_ptt *p_ptt;
355 uint32_t num_dwords = 0;
356 void *buffer;
357
358 buffer = qlnx_zalloc(trace->size[hwfn_index]);
359 if (buffer == NULL) {
360 QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
361 hwfn_index, trace->cmd);
362 return (ENXIO);
363 }
364 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
365
366 p_hwfn = &ha->cdev.hwfns[hwfn_index];
367 p_ptt = ecore_ptt_acquire(p_hwfn);
368
369 if (!p_ptt) {
370 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
371 hwfn_index, trace->cmd);
372 return (ENXIO);
373 }
374
375 switch (trace->cmd) {
376 case QLNX_MCP_TRACE:
377 rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
378 buffer, (trace->size[hwfn_index] >> 2),
379 &num_dwords);
380 break;
381
382 case QLNX_REG_FIFO:
383 rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
384 buffer, (trace->size[hwfn_index] >> 2),
385 &num_dwords);
386 break;
387
388 case QLNX_IGU_FIFO:
389 rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
390 buffer, (trace->size[hwfn_index] >> 2),
391 &num_dwords);
392 break;
393
394 case QLNX_PROTECTION_OVERRIDE:
395 rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
396 buffer, (trace->size[hwfn_index] >> 2),
397 &num_dwords);
398 break;
399
400 case QLNX_FW_ASSERTS:
401 rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
402 buffer, (trace->size[hwfn_index] >> 2),
403 &num_dwords);
404 break;
405 }
406
407 if (rval != DBG_STATUS_OK) {
408 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
409 num_dwords = 0;
410 }
411
412 ecore_ptt_release(p_hwfn, p_ptt);
413
414 trace->dwords[hwfn_index] = num_dwords;
415
416 if (num_dwords) {
417 rval = copyout(buffer, trace->buffer[hwfn_index],
418 (num_dwords << 2));
419 }
420
421 return (rval);
422 }
423
424 static int
425 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
426 {
427 int rval = 0;
428 struct ecore_hwfn *p_hwfn;
429
430 if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
431 return (EINVAL);
432 }
433
434 p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
435
436 switch (reg_rd_wr->cmd) {
437 case QLNX_REG_READ_CMD:
438 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
439 reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
440 reg_rd_wr->addr);
441 }
442 break;
443
444 case QLNX_REG_WRITE_CMD:
445 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
446 qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
447 reg_rd_wr->val);
448 }
449 break;
450
451 default:
452 rval = EINVAL;
453 break;
454 }
455
456 return (rval);
457 }
458
459 static int
460 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
461 {
462 int rval = 0;
463
464 switch (pci_cfg_rd_wr->cmd) {
465 case QLNX_PCICFG_READ:
466 pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
467 pci_cfg_rd_wr->reg,
468 pci_cfg_rd_wr->width);
469 break;
470
471 case QLNX_PCICFG_WRITE:
472 pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
473 pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
474 break;
475
476 default:
477 rval = EINVAL;
478 break;
479 }
480
481 return (rval);
482 }
483
484 static void
485 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
486 {
487 bzero(mac_addr->addr, sizeof(mac_addr->addr));
488 snprintf(mac_addr->addr, sizeof(mac_addr->addr),
489 "%02x:%02x:%02x:%02x:%02x:%02x",
490 ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
491 ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
492
493 return;
494 }
495
496 static int
497 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
498 {
499 int i;
500 int rval = 0;
501 uint32_t dwords = 0;
502 uint8_t *outb;
503
504 regs->reg_buf_len = 0;
505 outb = regs->reg_buf;
506
507 for (i = 0; i < ha->cdev.num_hwfns; i++) {
508 rval = qlnx_grc_dump(ha, &dwords, i);
509
510 if (rval)
511 break;
512
513 regs->reg_buf_len += (dwords << 2);
514
515 rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
516
517 if (rval)
518 break;
519
520 ha->grcdump_dwords[i] = 0;
521 outb += regs->reg_buf_len;
522 }
523
524 ha->grcdump_taken = 0;
525
526 return (rval);
527 }
528
529 extern char qlnx_name_str[];
530 extern char qlnx_ver_str[];
531
532 static int
533 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
534 {
535 int i;
536
537 bzero(drv_info, sizeof(qlnx_drvinfo_t));
538
539 snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
540 qlnx_name_str);
541 snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
542 qlnx_ver_str);
543 snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
544 ha->mfw_ver);
545 snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
546 "%s", ha->stormfw_ver);
547
548 drv_info->eeprom_dump_len = ha->flash_size;
549
550 for (i = 0; i < ha->cdev.num_hwfns; i++) {
551 drv_info->reg_dump_len += ha->grcdump_size[i];
552 }
553
554 snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
555 "%d:%d:%d", pci_get_bus(ha->pci_dev),
556 pci_get_slot(ha->pci_dev), ha->pci_func);
557
558 return (0);
559 }
560
561 static int
562 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
563 {
564 struct ecore_hwfn *p_hwfn;
565 struct qlnx_link_output if_link;
566
567 p_hwfn = &ha->cdev.hwfns[0];
568
569 qlnx_fill_link(ha, p_hwfn, &if_link);
570
571 dev_info->supported = if_link.supported_caps;
572 dev_info->advertising = if_link.advertised_caps;
573 dev_info->speed = if_link.speed;
574 dev_info->duplex = if_link.duplex;
575 dev_info->port = ha->pci_func & 0x1;
576 dev_info->autoneg = if_link.autoneg;
577
578 return (0);
579 }
580
581 static int
582 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
583 {
584 uint8_t *buf;
585 int ret = 0;
586
587 if ((nvram->data == NULL) || (nvram->data_len == 0))
588 return (EINVAL);
589
590 buf = qlnx_zalloc(nvram->data_len);
591
592 ret = copyin(nvram->data, buf, nvram->data_len);
593
594 QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
595 data_len = 0x%x ret = 0x%x exit\n",
596 cmd, nvram->data, nvram->data_len, ret);
597
598 if (ret == 0) {
599 ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
600 nvram->offset, buf, nvram->data_len);
601 }
602
603 QL_DPRINT9(ha, "cmd = 0x%x data = %p \
604 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
605 cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
606
607 free(buf, M_QLNXBUF);
608
609 return (ret);
610 }
611
612 static int
613 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
614 {
615 uint8_t *buf;
616 int ret = 0;
617
618 if ((nvram->data == NULL) || (nvram->data_len == 0))
619 return (EINVAL);
620
621 buf = qlnx_zalloc(nvram->data_len);
622
623 ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
624 nvram->data_len);
625
626 QL_DPRINT9(ha, " data = %p data_len = 0x%x \
627 resp = 0x%x ret = 0x%x exit\n",
628 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
629
630 if (ret == 0) {
631 ret = copyout(buf, nvram->data, nvram->data_len);
632 }
633
634 free(buf, M_QLNXBUF);
635
636 return (ret);
637 }
638
639 static int
640 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
641 {
642 uint8_t *buf;
643 int ret = 0;
644
645 if ((nvram->data == NULL) || (nvram->data_len == 0))
646 return (EINVAL);
647
648 buf = qlnx_zalloc(nvram->data_len);
649
650 ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
651
652 QL_DPRINT9(ha, "data = %p data_len = 0x%x \
653 resp = 0x%x ret = 0x%x exit\n",
654 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
655
656 if (ret == 0) {
657 ret = copyout(buf, nvram->data, nvram->data_len);
658 }
659
660 free(buf, M_QLNXBUF);
661
662 return (ret);
663 }
664
665 static int
666 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
667 {
668 int ret = 0;
669
670 switch (nvram->cmd) {
671 case QLNX_NVRAM_CMD_WRITE_NVRAM:
672 ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
673 break;
674
675 case QLNX_NVRAM_CMD_PUT_FILE_DATA:
676 ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
677 break;
678
679 case QLNX_NVRAM_CMD_READ_NVRAM:
680 ret = qlnx_read_nvram(ha, nvram);
681 break;
682
683 case QLNX_NVRAM_CMD_SET_SECURE_MODE:
684 ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
685
686 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
687 resp = 0x%x ret = 0x%x exit\n",
688 ha->cdev.mcp_nvm_resp, ret);
689 break;
690
691 case QLNX_NVRAM_CMD_DEL_FILE:
692 ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
693
694 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
695 resp = 0x%x ret = 0x%x exit\n",
696 ha->cdev.mcp_nvm_resp, ret);
697 break;
698
699 case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
700 ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
701
702 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
703 resp = 0x%x ret = 0x%x exit\n",
704 ha->cdev.mcp_nvm_resp, ret);
705 break;
706
707 case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
708 ret = qlnx_get_nvram_resp(ha, nvram);
709 break;
710
711 default:
712 ret = EINVAL;
713 break;
714 }
715
716 return (ret);
717 }
718
719 static void
720 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
721 {
722 int i;
723 int index;
724 int ret;
725 int stats_copied = 0;
726
727 s_stats->num_hwfns = ha->cdev.num_hwfns;
728
729 // if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
730 // return;
731
732 s_stats->num_samples = ha->storm_stats_index;
733
734 for (i = 0; i < ha->cdev.num_hwfns; i++) {
735 index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
736
737 if (s_stats->buffer[i]) {
738 ret = copyout(&ha->storm_stats[index],
739 s_stats->buffer[i],
740 QLNX_STORM_STATS_BYTES_PER_HWFN);
741 if (ret) {
742 printf("%s [%d]: failed\n", __func__, i);
743 }
744
745 if (s_stats->num_samples ==
746 QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
747 bzero((void *)&ha->storm_stats[i],
748 QLNX_STORM_STATS_BYTES_PER_HWFN);
749
750 stats_copied = 1;
751 }
752 }
753 }
754
755 if (stats_copied)
756 ha->storm_stats_index = 0;
757
758 return;
759 }
760
761 #ifdef QLNX_USER_LLDP
762
763 static int
764 qlnx_lldp_configure(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
765 struct ecore_ptt *p_ptt, uint32_t enable)
766 {
767 int ret = 0;
768 uint8_t lldp_mac[6] = {0};
769 struct ecore_lldp_config_params lldp_params;
770 struct ecore_lldp_sys_tlvs tlv_params;
771
772 ret = ecore_mcp_get_lldp_mac(p_hwfn, p_ptt, lldp_mac);
773
774 if (ret != ECORE_SUCCESS) {
775 device_printf(ha->pci_dev,
776 "%s: ecore_mcp_get_lldp_mac failed\n", __func__);
777 return (-1);
778 }
779
780 bzero(&lldp_params, sizeof(struct ecore_lldp_config_params));
781 bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
782
783 lldp_params.agent = ECORE_LLDP_NEAREST_BRIDGE;
784 lldp_params.tx_interval = 30; //Default value used as suggested by MFW
785 lldp_params.tx_hold = 4; //Default value used as suggested by MFW
786 lldp_params.tx_credit = 5; //Default value used as suggested by MFW
787 lldp_params.rx_enable = enable ? 1 : 0;
788 lldp_params.tx_enable = enable ? 1 : 0;
789
790 lldp_params.chassis_id_tlv[0] = 0;
791 lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_TYPE_CHASSIS_ID << 1);
792 lldp_params.chassis_id_tlv[0] |=
793 ((QLNX_LLDP_CHASSIS_ID_SUBTYPE_OCTETS +
794 QLNX_LLDP_CHASSIS_ID_MAC_ADDR_LEN) << 8);
795 lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_CHASSIS_ID_SUBTYPE_MAC << 16);
796 lldp_params.chassis_id_tlv[0] |= lldp_mac[0] << 24;
797 lldp_params.chassis_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
798 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
799 lldp_params.chassis_id_tlv[2] = lldp_mac[5];
800
801 lldp_params.port_id_tlv[0] = 0;
802 lldp_params.port_id_tlv[0] |= (QLNX_LLDP_TYPE_PORT_ID << 1);
803 lldp_params.port_id_tlv[0] |=
804 ((QLNX_LLDP_PORT_ID_SUBTYPE_OCTETS +
805 QLNX_LLDP_PORT_ID_MAC_ADDR_LEN) << 8);
806 lldp_params.port_id_tlv[0] |= (QLNX_LLDP_PORT_ID_SUBTYPE_MAC << 16);
807 lldp_params.port_id_tlv[0] |= lldp_mac[0] << 24;
808 lldp_params.port_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
809 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
810 lldp_params.port_id_tlv[2] = lldp_mac[5];
811
812 ret = ecore_lldp_set_params(p_hwfn, p_ptt, &lldp_params);
813
814 if (ret != ECORE_SUCCESS) {
815 device_printf(ha->pci_dev,
816 "%s: ecore_lldp_set_params failed\n", __func__);
817 return (-1);
818 }
819
820 //If LLDP is disable then disable discard_mandatory_tlv flag
821 if (!enable) {
822 tlv_params.discard_mandatory_tlv = false;
823 tlv_params.buf_size = 0;
824 ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
825 }
826
827 if (ret != ECORE_SUCCESS) {
828 device_printf(ha->pci_dev,
829 "%s: ecore_lldp_set_system_tlvs failed\n", __func__);
830 }
831
832 return (ret);
833 }
834
835 static int
836 qlnx_register_default_lldp_tlvs(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
837 struct ecore_ptt *p_ptt)
838 {
839 int ret = 0;
840
841 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
842 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_CHASSIS_ID);
843 if (ret != ECORE_SUCCESS) {
844 device_printf(ha->pci_dev,
845 "%s: QLNX_LLDP_TYPE_CHASSIS_ID failed\n", __func__);
846 goto qlnx_register_default_lldp_tlvs_exit;
847 }
848
849 //register Port ID TLV
850 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
851 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_ID);
852 if (ret != ECORE_SUCCESS) {
853 device_printf(ha->pci_dev,
854 "%s: QLNX_LLDP_TYPE_PORT_ID failed\n", __func__);
855 goto qlnx_register_default_lldp_tlvs_exit;
856 }
857
858 //register TTL TLV
859 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
860 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_TTL);
861 if (ret != ECORE_SUCCESS) {
862 device_printf(ha->pci_dev,
863 "%s: QLNX_LLDP_TYPE_TTL failed\n", __func__);
864 goto qlnx_register_default_lldp_tlvs_exit;
865 }
866
867 //register Port Description TLV
868 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
869 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_DESC);
870 if (ret != ECORE_SUCCESS) {
871 device_printf(ha->pci_dev,
872 "%s: QLNX_LLDP_TYPE_PORT_DESC failed\n", __func__);
873 goto qlnx_register_default_lldp_tlvs_exit;
874 }
875
876 //register System Name TLV
877 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
878 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_NAME);
879 if (ret != ECORE_SUCCESS) {
880 device_printf(ha->pci_dev,
881 "%s: QLNX_LLDP_TYPE_SYS_NAME failed\n", __func__);
882 goto qlnx_register_default_lldp_tlvs_exit;
883 }
884
885 //register System Description TLV
886 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
887 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_DESC);
888 if (ret != ECORE_SUCCESS) {
889 device_printf(ha->pci_dev,
890 "%s: QLNX_LLDP_TYPE_SYS_DESC failed\n", __func__);
891 goto qlnx_register_default_lldp_tlvs_exit;
892 }
893
894 //register System Capabilities TLV
895 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
896 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_CAPS);
897 if (ret != ECORE_SUCCESS) {
898 device_printf(ha->pci_dev,
899 "%s: QLNX_LLDP_TYPE_SYS_CAPS failed\n", __func__);
900 goto qlnx_register_default_lldp_tlvs_exit;
901 }
902
903 //register Management Address TLV
904 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
905 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_MGMT_ADDR);
906 if (ret != ECORE_SUCCESS) {
907 device_printf(ha->pci_dev,
908 "%s: QLNX_LLDP_TYPE_MGMT_ADDR failed\n", __func__);
909 goto qlnx_register_default_lldp_tlvs_exit;
910 }
911
912 //register Organizationally Specific TLVs
913 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
914 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_ORG_SPECIFIC);
915 if (ret != ECORE_SUCCESS) {
916 device_printf(ha->pci_dev,
917 "%s: QLNX_LLDP_TYPE_ORG_SPECIFIC failed\n", __func__);
918 }
919
920 qlnx_register_default_lldp_tlvs_exit:
921 return (ret);
922 }
923
924 int
925 qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs)
926 {
927 int ret = 0;
928 struct ecore_hwfn *p_hwfn;
929 struct ecore_ptt *p_ptt;
930 struct ecore_lldp_sys_tlvs tlv_params;
931
932 p_hwfn = &ha->cdev.hwfns[0];
933 p_ptt = ecore_ptt_acquire(p_hwfn);
934
935 if (!p_ptt) {
936 device_printf(ha->pci_dev,
937 "%s: ecore_ptt_acquire failed\n", __func__);
938 return (ENXIO);
939 }
940
941 ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 0);
942
943 if (ret) {
944 device_printf(ha->pci_dev,
945 "%s: qlnx_lldp_configure disable failed\n", __func__);
946 goto qlnx_set_lldp_tlvx_exit;
947 }
948
949 ret = qlnx_register_default_lldp_tlvs(ha, p_hwfn, p_ptt);
950
951 if (ret) {
952 device_printf(ha->pci_dev,
953 "%s: qlnx_register_default_lldp_tlvs failed\n",
954 __func__);
955 goto qlnx_set_lldp_tlvx_exit;
956 }
957
958 ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 1);
959
960 if (ret) {
961 device_printf(ha->pci_dev,
962 "%s: qlnx_lldp_configure enable failed\n", __func__);
963 goto qlnx_set_lldp_tlvx_exit;
964 }
965
966 if (lldp_tlvs != NULL) {
967 bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
968
969 tlv_params.discard_mandatory_tlv =
970 (lldp_tlvs->discard_mandatory_tlv ? true: false);
971 tlv_params.buf_size = lldp_tlvs->buf_size;
972 memcpy(tlv_params.buf, lldp_tlvs->buf, lldp_tlvs->buf_size);
973
974 ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
975
976 if (ret) {
977 device_printf(ha->pci_dev,
978 "%s: ecore_lldp_set_system_tlvs failed\n",
979 __func__);
980 }
981 }
982 qlnx_set_lldp_tlvx_exit:
983
984 ecore_ptt_release(p_hwfn, p_ptt);
985 return (ret);
986 }
987
988 #endif /* #ifdef QLNX_USER_LLDP */
989
990 static int
991 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
992 struct thread *td)
993 {
994 qlnx_host_t *ha;
995 int rval = 0;
996 qlnx_trace_t *trace;
997 int i;
998
999 if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
1000 return ENXIO;
1001
1002 switch (cmd) {
1003 case QLNX_GRC_DUMP_SIZE:
1004 qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
1005 break;
1006
1007 case QLNX_GRC_DUMP:
1008 rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
1009 break;
1010
1011 case QLNX_IDLE_CHK_SIZE:
1012 qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
1013 break;
1014
1015 case QLNX_IDLE_CHK:
1016 rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
1017 break;
1018
1019 case QLNX_DRV_INFO:
1020 rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
1021 break;
1022
1023 case QLNX_DEV_SETTING:
1024 rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
1025 break;
1026
1027 case QLNX_GET_REGS:
1028 rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
1029 break;
1030
1031 case QLNX_NVRAM:
1032 rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
1033 break;
1034
1035 case QLNX_RD_WR_REG:
1036 rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
1037 break;
1038
1039 case QLNX_RD_WR_PCICFG:
1040 rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
1041 break;
1042
1043 case QLNX_MAC_ADDR:
1044 qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
1045 break;
1046
1047 case QLNX_STORM_STATS:
1048 qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
1049 break;
1050
1051 case QLNX_TRACE_SIZE:
1052 qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
1053 break;
1054
1055 case QLNX_TRACE:
1056 trace = (qlnx_trace_t *)data;
1057
1058 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1059 if (trace->size[i] && trace->cmd && trace->buffer[i])
1060 rval = qlnx_get_trace(ha, i, trace);
1061
1062 if (rval)
1063 break;
1064 }
1065 break;
1066
1067 #ifdef QLNX_USER_LLDP
1068 case QLNX_SET_LLDP_TLVS:
1069 rval = qlnx_set_lldp_tlvx(ha, (qlnx_lldp_sys_tlvs_t *)data);
1070 break;
1071 #endif /* #ifdef QLNX_USER_LLDP */
1072
1073 default:
1074 rval = EINVAL;
1075 break;
1076 }
1077
1078 return (rval);
1079 }
Cache object: 0c59708469054da9c61e00e6c2f51966
|