1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File: qlnx_os.c
30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "qlnx_os.h"
37 #include "bcm_osal.h"
38 #include "reg_addr.h"
39 #include "ecore_gtt_reg_addr.h"
40 #include "ecore.h"
41 #include "ecore_chain.h"
42 #include "ecore_status.h"
43 #include "ecore_hw.h"
44 #include "ecore_rt_defs.h"
45 #include "ecore_init_ops.h"
46 #include "ecore_int.h"
47 #include "ecore_cxt.h"
48 #include "ecore_spq.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sp_commands.h"
51 #include "ecore_dev_api.h"
52 #include "ecore_l2_api.h"
53 #include "ecore_mcp.h"
54 #include "ecore_hw_defs.h"
55 #include "mcp_public.h"
56 #include "ecore_iro.h"
57 #include "nvm_cfg.h"
58 #include "ecore_dev_api.h"
59 #include "ecore_dbg_fw_funcs.h"
60 #include "ecore_iov_api.h"
61 #include "ecore_vf_api.h"
62
63 #include "qlnx_ioctl.h"
64 #include "qlnx_def.h"
65 #include "qlnx_ver.h"
66
67 #ifdef QLNX_ENABLE_IWARP
68 #include "qlnx_rdma.h"
69 #endif /* #ifdef QLNX_ENABLE_IWARP */
70
71 #include <sys/smp.h>
72
73 /*
74 * static functions
75 */
76 /*
77 * ioctl related functions
78 */
79 static void qlnx_add_sysctls(qlnx_host_t *ha);
80
81 /*
82 * main driver
83 */
84 static void qlnx_release(qlnx_host_t *ha);
85 static void qlnx_fp_isr(void *arg);
86 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
87 static void qlnx_init(void *arg);
88 static void qlnx_init_locked(qlnx_host_t *ha);
89 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
90 static int qlnx_set_promisc(qlnx_host_t *ha);
91 static int qlnx_set_allmulti(qlnx_host_t *ha);
92 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
93 static int qlnx_media_change(struct ifnet *ifp);
94 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
95 static void qlnx_stop(qlnx_host_t *ha);
96 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
97 struct mbuf **m_headp);
98 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
99 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
100 struct qlnx_link_output *if_link);
101 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
102 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
103 struct mbuf *mp);
104 static void qlnx_qflush(struct ifnet *ifp);
105
106 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
107 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
108 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
109 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
110 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
111 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
112
113 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
114 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
115
116 static int qlnx_nic_setup(struct ecore_dev *cdev,
117 struct ecore_pf_params *func_params);
118 static int qlnx_nic_start(struct ecore_dev *cdev);
119 static int qlnx_slowpath_start(qlnx_host_t *ha);
120 static int qlnx_slowpath_stop(qlnx_host_t *ha);
121 static int qlnx_init_hw(qlnx_host_t *ha);
122 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
123 char ver_str[VER_SIZE]);
124 static void qlnx_unload(qlnx_host_t *ha);
125 static int qlnx_load(qlnx_host_t *ha);
126 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
127 uint32_t add_mac);
128 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
129 uint32_t len);
130 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
131 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
132 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
133 struct qlnx_rx_queue *rxq);
134 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
135 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
136 int hwfn_index);
137 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
138 int hwfn_index);
139 static void qlnx_timer(void *arg);
140 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
141 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
142 static void qlnx_trigger_dump(qlnx_host_t *ha);
143 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
144 struct qlnx_tx_queue *txq);
145 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
146 struct qlnx_tx_queue *txq);
147 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
148 int lro_enable);
149 static void qlnx_fp_taskqueue(void *context, int pending);
150 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
151 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
152 struct qlnx_agg_info *tpa);
153 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
154
155 #if __FreeBSD_version >= 1100000
156 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
157 #endif
158
159 /*
160 * Hooks to the Operating Systems
161 */
162 static int qlnx_pci_probe (device_t);
163 static int qlnx_pci_attach (device_t);
164 static int qlnx_pci_detach (device_t);
165
166 #ifndef QLNX_VF
167
168 #ifdef CONFIG_ECORE_SRIOV
169
170 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
171 static void qlnx_iov_uninit(device_t dev);
172 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
173 static void qlnx_initialize_sriov(qlnx_host_t *ha);
174 static void qlnx_pf_taskqueue(void *context, int pending);
175 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
176 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
178
179 #endif /* #ifdef CONFIG_ECORE_SRIOV */
180
181 static device_method_t qlnx_pci_methods[] = {
182 /* Device interface */
183 DEVMETHOD(device_probe, qlnx_pci_probe),
184 DEVMETHOD(device_attach, qlnx_pci_attach),
185 DEVMETHOD(device_detach, qlnx_pci_detach),
186
187 #ifdef CONFIG_ECORE_SRIOV
188 DEVMETHOD(pci_iov_init, qlnx_iov_init),
189 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
190 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
191 #endif /* #ifdef CONFIG_ECORE_SRIOV */
192 { 0, 0 }
193 };
194
195 static driver_t qlnx_pci_driver = {
196 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
197 };
198
199 MODULE_VERSION(if_qlnxe,1);
200 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
201
202 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
203 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
204
205 #else
206
207 static device_method_t qlnxv_pci_methods[] = {
208 /* Device interface */
209 DEVMETHOD(device_probe, qlnx_pci_probe),
210 DEVMETHOD(device_attach, qlnx_pci_attach),
211 DEVMETHOD(device_detach, qlnx_pci_detach),
212 { 0, 0 }
213 };
214
215 static driver_t qlnxv_pci_driver = {
216 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
217 };
218
219 MODULE_VERSION(if_qlnxev,1);
220 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
221
222 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
223 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
224
225 #endif /* #ifdef QLNX_VF */
226
227 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
228
229 char qlnx_dev_str[128];
230 char qlnx_ver_str[VER_SIZE];
231 char qlnx_name_str[NAME_SIZE];
232
233 /*
234 * Some PCI Configuration Space Related Defines
235 */
236
237 #ifndef PCI_VENDOR_QLOGIC
238 #define PCI_VENDOR_QLOGIC 0x1077
239 #endif
240
241 /* 40G Adapter QLE45xxx*/
242 #ifndef QLOGIC_PCI_DEVICE_ID_1634
243 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
244 #endif
245
246 /* 100G Adapter QLE45xxx*/
247 #ifndef QLOGIC_PCI_DEVICE_ID_1644
248 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
249 #endif
250
251 /* 25G Adapter QLE45xxx*/
252 #ifndef QLOGIC_PCI_DEVICE_ID_1656
253 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
254 #endif
255
256 /* 50G Adapter QLE45xxx*/
257 #ifndef QLOGIC_PCI_DEVICE_ID_1654
258 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
259 #endif
260
261 /* 10G/25G/40G Adapter QLE41xxx*/
262 #ifndef QLOGIC_PCI_DEVICE_ID_8070
263 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
264 #endif
265
266 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
267 #ifndef QLOGIC_PCI_DEVICE_ID_8090
268 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
269 #endif
270
271 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
272 "qlnxe driver parameters");
273
274 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
275 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
276
277 #if __FreeBSD_version < 1100000
278
279 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count);
280
281 #endif
282
283 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
284 &qlnxe_queue_count, 0, "Multi-Queue queue count");
285
286 /*
287 * Note on RDMA personality setting
288 *
289 * Read the personality configured in NVRAM
290 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
291 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
292 * use the personality in NVRAM.
293
294 * Otherwise use t the personality configured in sysctl.
295 *
296 */
297 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
298 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
299 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
300 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
301 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
302 #define QLNX_PERSONALIY_MASK 0xF
303
304 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
305 static uint64_t qlnxe_rdma_configuration = 0x22222222;
306
307 #if __FreeBSD_version < 1100000
308
309 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration);
310
311 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
312 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
313
314 #else
315
316 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
317 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
318
319 #endif /* #if __FreeBSD_version < 1100000 */
320
321 int
322 qlnx_vf_device(qlnx_host_t *ha)
323 {
324 uint16_t device_id;
325
326 device_id = ha->device_id;
327
328 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
329 return 0;
330
331 return -1;
332 }
333
334 static int
335 qlnx_valid_device(qlnx_host_t *ha)
336 {
337 uint16_t device_id;
338
339 device_id = ha->device_id;
340
341 #ifndef QLNX_VF
342 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
343 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
344 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
345 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
346 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
347 return 0;
348 #else
349 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
350 return 0;
351
352 #endif /* #ifndef QLNX_VF */
353 return -1;
354 }
355
356 #ifdef QLNX_ENABLE_IWARP
357 static int
358 qlnx_rdma_supported(struct qlnx_host *ha)
359 {
360 uint16_t device_id;
361
362 device_id = pci_get_device(ha->pci_dev);
363
364 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
365 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
366 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
367 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
368 return (0);
369
370 return (-1);
371 }
372 #endif /* #ifdef QLNX_ENABLE_IWARP */
373
374 /*
375 * Name: qlnx_pci_probe
376 * Function: Validate the PCI device to be a QLA80XX device
377 */
378 static int
379 qlnx_pci_probe(device_t dev)
380 {
381 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
382 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
383 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
384
385 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
386 return (ENXIO);
387 }
388
389 switch (pci_get_device(dev)) {
390 #ifndef QLNX_VF
391
392 case QLOGIC_PCI_DEVICE_ID_1644:
393 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
394 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
395 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
396 QLNX_VERSION_BUILD);
397 device_set_desc_copy(dev, qlnx_dev_str);
398
399 break;
400
401 case QLOGIC_PCI_DEVICE_ID_1634:
402 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
403 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
404 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
405 QLNX_VERSION_BUILD);
406 device_set_desc_copy(dev, qlnx_dev_str);
407
408 break;
409
410 case QLOGIC_PCI_DEVICE_ID_1656:
411 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
412 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
413 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
414 QLNX_VERSION_BUILD);
415 device_set_desc_copy(dev, qlnx_dev_str);
416
417 break;
418
419 case QLOGIC_PCI_DEVICE_ID_1654:
420 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
421 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
422 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
423 QLNX_VERSION_BUILD);
424 device_set_desc_copy(dev, qlnx_dev_str);
425
426 break;
427
428 case QLOGIC_PCI_DEVICE_ID_8070:
429 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
430 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
431 " Adapter-Ethernet Function",
432 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
433 QLNX_VERSION_BUILD);
434 device_set_desc_copy(dev, qlnx_dev_str);
435
436 break;
437
438 #else
439 case QLOGIC_PCI_DEVICE_ID_8090:
440 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
441 "Qlogic SRIOV PCI CNA (AH) "
442 "Adapter-Ethernet Function",
443 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
444 QLNX_VERSION_BUILD);
445 device_set_desc_copy(dev, qlnx_dev_str);
446
447 break;
448
449 #endif /* #ifndef QLNX_VF */
450
451 default:
452 return (ENXIO);
453 }
454
455 #ifdef QLNX_ENABLE_IWARP
456 qlnx_rdma_init();
457 #endif /* #ifdef QLNX_ENABLE_IWARP */
458
459 return (BUS_PROBE_DEFAULT);
460 }
461
462 static uint16_t
463 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
464 struct qlnx_tx_queue *txq)
465 {
466 u16 hw_bd_cons;
467 u16 ecore_cons_idx;
468 uint16_t diff;
469
470 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
471
472 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
473 if (hw_bd_cons < ecore_cons_idx) {
474 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
475 } else {
476 diff = hw_bd_cons - ecore_cons_idx;
477 }
478 return diff;
479 }
480
481 static void
482 qlnx_sp_intr(void *arg)
483 {
484 struct ecore_hwfn *p_hwfn;
485 qlnx_host_t *ha;
486 int i;
487
488 p_hwfn = arg;
489
490 if (p_hwfn == NULL) {
491 printf("%s: spurious slowpath intr\n", __func__);
492 return;
493 }
494
495 ha = (qlnx_host_t *)p_hwfn->p_dev;
496
497 QL_DPRINT2(ha, "enter\n");
498
499 for (i = 0; i < ha->cdev.num_hwfns; i++) {
500 if (&ha->cdev.hwfns[i] == p_hwfn) {
501 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
502 break;
503 }
504 }
505 QL_DPRINT2(ha, "exit\n");
506
507 return;
508 }
509
510 static void
511 qlnx_sp_taskqueue(void *context, int pending)
512 {
513 struct ecore_hwfn *p_hwfn;
514
515 p_hwfn = context;
516
517 if (p_hwfn != NULL) {
518 qlnx_sp_isr(p_hwfn);
519 }
520 return;
521 }
522
523 static int
524 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
525 {
526 int i;
527 uint8_t tq_name[32];
528
529 for (i = 0; i < ha->cdev.num_hwfns; i++) {
530 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
531
532 bzero(tq_name, sizeof (tq_name));
533 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
534
535 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
536
537 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
538 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
539
540 if (ha->sp_taskqueue[i] == NULL)
541 return (-1);
542
543 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
544 tq_name);
545
546 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
547 }
548
549 return (0);
550 }
551
552 static void
553 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
554 {
555 int i;
556
557 for (i = 0; i < ha->cdev.num_hwfns; i++) {
558 if (ha->sp_taskqueue[i] != NULL) {
559 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
560 taskqueue_free(ha->sp_taskqueue[i]);
561 }
562 }
563 return;
564 }
565
566 static void
567 qlnx_fp_taskqueue(void *context, int pending)
568 {
569 struct qlnx_fastpath *fp;
570 qlnx_host_t *ha;
571 struct ifnet *ifp;
572
573 fp = context;
574
575 if (fp == NULL)
576 return;
577
578 ha = (qlnx_host_t *)fp->edev;
579
580 ifp = ha->ifp;
581
582 if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
583 if (!drbr_empty(ifp, fp->tx_br)) {
584 if(mtx_trylock(&fp->tx_mtx)) {
585 #ifdef QLNX_TRACE_PERF_DATA
586 tx_pkts = fp->tx_pkts_transmitted;
587 tx_compl = fp->tx_pkts_completed;
588 #endif
589
590 qlnx_transmit_locked(ifp, fp, NULL);
591
592 #ifdef QLNX_TRACE_PERF_DATA
593 fp->tx_pkts_trans_fp +=
594 (fp->tx_pkts_transmitted - tx_pkts);
595 fp->tx_pkts_compl_fp +=
596 (fp->tx_pkts_completed - tx_compl);
597 #endif
598 mtx_unlock(&fp->tx_mtx);
599 }
600 }
601 }
602
603 QL_DPRINT2(ha, "exit \n");
604 return;
605 }
606
607 static int
608 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
609 {
610 int i;
611 uint8_t tq_name[32];
612 struct qlnx_fastpath *fp;
613
614 for (i = 0; i < ha->num_rss; i++) {
615 fp = &ha->fp_array[i];
616
617 bzero(tq_name, sizeof (tq_name));
618 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
619
620 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
621
622 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
623 taskqueue_thread_enqueue,
624 &fp->fp_taskqueue);
625
626 if (fp->fp_taskqueue == NULL)
627 return (-1);
628
629 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
630 tq_name);
631
632 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
633 }
634
635 return (0);
636 }
637
638 static void
639 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
640 {
641 int i;
642 struct qlnx_fastpath *fp;
643
644 for (i = 0; i < ha->num_rss; i++) {
645 fp = &ha->fp_array[i];
646
647 if (fp->fp_taskqueue != NULL) {
648 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
649 taskqueue_free(fp->fp_taskqueue);
650 fp->fp_taskqueue = NULL;
651 }
652 }
653 return;
654 }
655
656 static void
657 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
658 {
659 int i;
660 struct qlnx_fastpath *fp;
661
662 for (i = 0; i < ha->num_rss; i++) {
663 fp = &ha->fp_array[i];
664
665 if (fp->fp_taskqueue != NULL) {
666 QLNX_UNLOCK(ha);
667 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
668 QLNX_LOCK(ha);
669 }
670 }
671 return;
672 }
673
674 static void
675 qlnx_get_params(qlnx_host_t *ha)
676 {
677 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
678 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
679 qlnxe_queue_count);
680 qlnxe_queue_count = 0;
681 }
682 return;
683 }
684
685 static void
686 qlnx_error_recovery_taskqueue(void *context, int pending)
687 {
688 qlnx_host_t *ha;
689
690 ha = context;
691
692 QL_DPRINT2(ha, "enter\n");
693
694 QLNX_LOCK(ha);
695 qlnx_stop(ha);
696 QLNX_UNLOCK(ha);
697
698 #ifdef QLNX_ENABLE_IWARP
699 qlnx_rdma_dev_remove(ha);
700 #endif /* #ifdef QLNX_ENABLE_IWARP */
701
702 qlnx_slowpath_stop(ha);
703 qlnx_slowpath_start(ha);
704
705 #ifdef QLNX_ENABLE_IWARP
706 qlnx_rdma_dev_add(ha);
707 #endif /* #ifdef QLNX_ENABLE_IWARP */
708
709 qlnx_init(ha);
710
711 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
712
713 QL_DPRINT2(ha, "exit\n");
714
715 return;
716 }
717
718 static int
719 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
720 {
721 uint8_t tq_name[32];
722
723 bzero(tq_name, sizeof (tq_name));
724 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
725
726 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
727
728 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
729 taskqueue_thread_enqueue, &ha->err_taskqueue);
730
731 if (ha->err_taskqueue == NULL)
732 return (-1);
733
734 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
735
736 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
737
738 return (0);
739 }
740
741 static void
742 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
743 {
744 if (ha->err_taskqueue != NULL) {
745 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
746 taskqueue_free(ha->err_taskqueue);
747 }
748
749 ha->err_taskqueue = NULL;
750
751 return;
752 }
753
754 /*
755 * Name: qlnx_pci_attach
756 * Function: attaches the device to the operating system
757 */
758 static int
759 qlnx_pci_attach(device_t dev)
760 {
761 qlnx_host_t *ha = NULL;
762 uint32_t rsrc_len_reg __unused = 0;
763 uint32_t rsrc_len_dbells = 0;
764 uint32_t rsrc_len_msix __unused = 0;
765 int i;
766 uint32_t mfw_ver;
767 uint32_t num_sp_msix = 0;
768 uint32_t num_rdma_irqs = 0;
769
770 if ((ha = device_get_softc(dev)) == NULL) {
771 device_printf(dev, "cannot get softc\n");
772 return (ENOMEM);
773 }
774
775 memset(ha, 0, sizeof (qlnx_host_t));
776
777 ha->device_id = pci_get_device(dev);
778
779 if (qlnx_valid_device(ha) != 0) {
780 device_printf(dev, "device is not valid device\n");
781 return (ENXIO);
782 }
783 ha->pci_func = pci_get_function(dev);
784
785 ha->pci_dev = dev;
786
787 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
788
789 ha->flags.lock_init = 1;
790
791 pci_enable_busmaster(dev);
792
793 /*
794 * map the PCI BARs
795 */
796
797 ha->reg_rid = PCIR_BAR(0);
798 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
799 RF_ACTIVE);
800
801 if (ha->pci_reg == NULL) {
802 device_printf(dev, "unable to map BAR0\n");
803 goto qlnx_pci_attach_err;
804 }
805
806 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
807 ha->reg_rid);
808
809 ha->dbells_rid = PCIR_BAR(2);
810 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
811 SYS_RES_MEMORY,
812 ha->dbells_rid);
813 if (rsrc_len_dbells) {
814 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
815 &ha->dbells_rid, RF_ACTIVE);
816
817 if (ha->pci_dbells == NULL) {
818 device_printf(dev, "unable to map BAR1\n");
819 goto qlnx_pci_attach_err;
820 }
821 ha->dbells_phys_addr = (uint64_t)
822 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
823
824 ha->dbells_size = rsrc_len_dbells;
825 } else {
826 if (qlnx_vf_device(ha) != 0) {
827 device_printf(dev, " BAR1 size is zero\n");
828 goto qlnx_pci_attach_err;
829 }
830 }
831
832 ha->msix_rid = PCIR_BAR(4);
833 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
834 &ha->msix_rid, RF_ACTIVE);
835
836 if (ha->msix_bar == NULL) {
837 device_printf(dev, "unable to map BAR2\n");
838 goto qlnx_pci_attach_err;
839 }
840
841 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
842 ha->msix_rid);
843
844 ha->dbg_level = 0x0000;
845
846 QL_DPRINT1(ha, "\n\t\t\t"
847 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
848 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
849 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
850 " msix_avail = 0x%x "
851 "\n\t\t\t[ncpus = %d]\n",
852 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
853 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
854 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
855 mp_ncpus);
856 /*
857 * allocate dma tags
858 */
859
860 if (qlnx_alloc_parent_dma_tag(ha))
861 goto qlnx_pci_attach_err;
862
863 if (qlnx_alloc_tx_dma_tag(ha))
864 goto qlnx_pci_attach_err;
865
866 if (qlnx_alloc_rx_dma_tag(ha))
867 goto qlnx_pci_attach_err;
868
869
870 if (qlnx_init_hw(ha) != 0)
871 goto qlnx_pci_attach_err;
872
873 ha->flags.hw_init = 1;
874
875 qlnx_get_params(ha);
876
877 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
878 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
879 qlnxe_queue_count = QLNX_MAX_RSS;
880 }
881
882 /*
883 * Allocate MSI-x vectors
884 */
885 if (qlnx_vf_device(ha) != 0) {
886 if (qlnxe_queue_count == 0)
887 ha->num_rss = QLNX_DEFAULT_RSS;
888 else
889 ha->num_rss = qlnxe_queue_count;
890
891 num_sp_msix = ha->cdev.num_hwfns;
892 } else {
893 uint8_t max_rxq;
894 uint8_t max_txq;
895
896 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
897 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
898
899 if (max_rxq < max_txq)
900 ha->num_rss = max_rxq;
901 else
902 ha->num_rss = max_txq;
903
904 if (ha->num_rss > QLNX_MAX_VF_RSS)
905 ha->num_rss = QLNX_MAX_VF_RSS;
906
907 num_sp_msix = 0;
908 }
909
910 if (ha->num_rss > mp_ncpus)
911 ha->num_rss = mp_ncpus;
912
913 ha->num_tc = QLNX_MAX_TC;
914
915 ha->msix_count = pci_msix_count(dev);
916
917 #ifdef QLNX_ENABLE_IWARP
918
919 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
920
921 #endif /* #ifdef QLNX_ENABLE_IWARP */
922
923 if (!ha->msix_count ||
924 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
925 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
926 ha->msix_count);
927 goto qlnx_pci_attach_err;
928 }
929
930 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
931 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
932 else
933 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
934
935 QL_DPRINT1(ha, "\n\t\t\t"
936 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
937 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
938 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
939 " msix_avail = 0x%x msix_alloc = 0x%x"
940 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
941 ha->pci_reg, rsrc_len_reg,
942 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
943 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
944 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
945
946 if (pci_alloc_msix(dev, &ha->msix_count)) {
947 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
948 ha->msix_count);
949 ha->msix_count = 0;
950 goto qlnx_pci_attach_err;
951 }
952
953 /*
954 * Initialize slow path interrupt and task queue
955 */
956
957 if (num_sp_msix) {
958 if (qlnx_create_sp_taskqueues(ha) != 0)
959 goto qlnx_pci_attach_err;
960
961 for (i = 0; i < ha->cdev.num_hwfns; i++) {
962 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
963
964 ha->sp_irq_rid[i] = i + 1;
965 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
966 &ha->sp_irq_rid[i],
967 (RF_ACTIVE | RF_SHAREABLE));
968 if (ha->sp_irq[i] == NULL) {
969 device_printf(dev,
970 "could not allocate mbx interrupt\n");
971 goto qlnx_pci_attach_err;
972 }
973
974 if (bus_setup_intr(dev, ha->sp_irq[i],
975 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
976 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
977 device_printf(dev,
978 "could not setup slow path interrupt\n");
979 goto qlnx_pci_attach_err;
980 }
981
982 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
983 " sp_irq %p sp_handle %p\n", p_hwfn,
984 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
985 }
986 }
987
988 /*
989 * initialize fast path interrupt
990 */
991 if (qlnx_create_fp_taskqueues(ha) != 0)
992 goto qlnx_pci_attach_err;
993
994 for (i = 0; i < ha->num_rss; i++) {
995 ha->irq_vec[i].rss_idx = i;
996 ha->irq_vec[i].ha = ha;
997 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
998
999 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1000 &ha->irq_vec[i].irq_rid,
1001 (RF_ACTIVE | RF_SHAREABLE));
1002
1003 if (ha->irq_vec[i].irq == NULL) {
1004 device_printf(dev,
1005 "could not allocate interrupt[%d] irq_rid = %d\n",
1006 i, ha->irq_vec[i].irq_rid);
1007 goto qlnx_pci_attach_err;
1008 }
1009
1010 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
1011 device_printf(dev, "could not allocate tx_br[%d]\n", i);
1012 goto qlnx_pci_attach_err;
1013 }
1014 }
1015
1016 if (qlnx_vf_device(ha) != 0) {
1017 callout_init(&ha->qlnx_callout, 1);
1018 ha->flags.callout_init = 1;
1019
1020 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1021 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1022 goto qlnx_pci_attach_err;
1023 if (ha->grcdump_size[i] == 0)
1024 goto qlnx_pci_attach_err;
1025
1026 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1027 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1028 i, ha->grcdump_size[i]);
1029
1030 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1031 if (ha->grcdump[i] == NULL) {
1032 device_printf(dev, "grcdump alloc[%d] failed\n", i);
1033 goto qlnx_pci_attach_err;
1034 }
1035
1036 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1037 goto qlnx_pci_attach_err;
1038 if (ha->idle_chk_size[i] == 0)
1039 goto qlnx_pci_attach_err;
1040
1041 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1042 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1043 i, ha->idle_chk_size[i]);
1044
1045 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1046
1047 if (ha->idle_chk[i] == NULL) {
1048 device_printf(dev, "idle_chk alloc failed\n");
1049 goto qlnx_pci_attach_err;
1050 }
1051 }
1052
1053 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1054 goto qlnx_pci_attach_err;
1055 }
1056
1057 if (qlnx_slowpath_start(ha) != 0)
1058 goto qlnx_pci_attach_err;
1059 else
1060 ha->flags.slowpath_start = 1;
1061
1062 if (qlnx_vf_device(ha) != 0) {
1063 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1064 qlnx_mdelay(__func__, 1000);
1065 qlnx_trigger_dump(ha);
1066
1067 goto qlnx_pci_attach_err0;
1068 }
1069
1070 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1071 qlnx_mdelay(__func__, 1000);
1072 qlnx_trigger_dump(ha);
1073
1074 goto qlnx_pci_attach_err0;
1075 }
1076 } else {
1077 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1078 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1079 }
1080
1081 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1082 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1083 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1084 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1085 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1086 FW_ENGINEERING_VERSION);
1087
1088 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1089 ha->stormfw_ver, ha->mfw_ver);
1090
1091 qlnx_init_ifnet(dev, ha);
1092
1093 /*
1094 * add sysctls
1095 */
1096 qlnx_add_sysctls(ha);
1097
1098 qlnx_pci_attach_err0:
1099 /*
1100 * create ioctl device interface
1101 */
1102 if (qlnx_vf_device(ha) != 0) {
1103 if (qlnx_make_cdev(ha)) {
1104 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1105 goto qlnx_pci_attach_err;
1106 }
1107
1108 #ifdef QLNX_ENABLE_IWARP
1109 qlnx_rdma_dev_add(ha);
1110 #endif /* #ifdef QLNX_ENABLE_IWARP */
1111 }
1112
1113 #ifndef QLNX_VF
1114 #ifdef CONFIG_ECORE_SRIOV
1115
1116 if (qlnx_vf_device(ha) != 0)
1117 qlnx_initialize_sriov(ha);
1118
1119 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1120 #endif /* #ifdef QLNX_VF */
1121
1122 QL_DPRINT2(ha, "success\n");
1123
1124 return (0);
1125
1126 qlnx_pci_attach_err:
1127
1128 qlnx_release(ha);
1129
1130 return (ENXIO);
1131 }
1132
1133 /*
1134 * Name: qlnx_pci_detach
1135 * Function: Unhooks the device from the operating system
1136 */
1137 static int
1138 qlnx_pci_detach(device_t dev)
1139 {
1140 qlnx_host_t *ha = NULL;
1141
1142 if ((ha = device_get_softc(dev)) == NULL) {
1143 device_printf(dev, "%s: cannot get softc\n", __func__);
1144 return (ENOMEM);
1145 }
1146
1147 if (qlnx_vf_device(ha) != 0) {
1148 #ifdef CONFIG_ECORE_SRIOV
1149 int ret;
1150
1151 ret = pci_iov_detach(dev);
1152 if (ret) {
1153 device_printf(dev, "%s: SRIOV in use\n", __func__);
1154 return (ret);
1155 }
1156
1157 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1158
1159 #ifdef QLNX_ENABLE_IWARP
1160 if (qlnx_rdma_dev_remove(ha) != 0)
1161 return (EBUSY);
1162 #endif /* #ifdef QLNX_ENABLE_IWARP */
1163 }
1164
1165 QLNX_LOCK(ha);
1166 qlnx_stop(ha);
1167 QLNX_UNLOCK(ha);
1168
1169 qlnx_release(ha);
1170
1171 return (0);
1172 }
1173
1174 #ifdef QLNX_ENABLE_IWARP
1175
1176 static uint8_t
1177 qlnx_get_personality(uint8_t pci_func)
1178 {
1179 uint8_t personality;
1180
1181 personality = (qlnxe_rdma_configuration >>
1182 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1183 QLNX_PERSONALIY_MASK;
1184 return (personality);
1185 }
1186
1187 static void
1188 qlnx_set_personality(qlnx_host_t *ha)
1189 {
1190 uint8_t personality;
1191
1192 personality = qlnx_get_personality(ha->pci_func);
1193
1194 switch (personality) {
1195 case QLNX_PERSONALITY_DEFAULT:
1196 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1197 __func__);
1198 ha->personality = ECORE_PCI_DEFAULT;
1199 break;
1200
1201 case QLNX_PERSONALITY_ETH_ONLY:
1202 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1203 __func__);
1204 ha->personality = ECORE_PCI_ETH;
1205 break;
1206
1207 case QLNX_PERSONALITY_ETH_IWARP:
1208 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1209 __func__);
1210 ha->personality = ECORE_PCI_ETH_IWARP;
1211 break;
1212
1213 case QLNX_PERSONALITY_ETH_ROCE:
1214 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1215 __func__);
1216 ha->personality = ECORE_PCI_ETH_ROCE;
1217 break;
1218 }
1219
1220 return;
1221 }
1222
1223 #endif /* #ifdef QLNX_ENABLE_IWARP */
1224
1225 static int
1226 qlnx_init_hw(qlnx_host_t *ha)
1227 {
1228 int rval = 0;
1229 struct ecore_hw_prepare_params params;
1230
1231 ecore_init_struct(&ha->cdev);
1232
1233 /* ha->dp_module = ECORE_MSG_PROBE |
1234 ECORE_MSG_INTR |
1235 ECORE_MSG_SP |
1236 ECORE_MSG_LINK |
1237 ECORE_MSG_SPQ |
1238 ECORE_MSG_RDMA;
1239 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1240 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1241 ha->dp_level = ECORE_LEVEL_NOTICE;
1242 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1243
1244 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1245
1246 ha->cdev.regview = ha->pci_reg;
1247
1248 ha->personality = ECORE_PCI_DEFAULT;
1249
1250 if (qlnx_vf_device(ha) == 0) {
1251 ha->cdev.b_is_vf = true;
1252
1253 if (ha->pci_dbells != NULL) {
1254 ha->cdev.doorbells = ha->pci_dbells;
1255 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1256 ha->cdev.db_size = ha->dbells_size;
1257 } else {
1258 ha->pci_dbells = ha->pci_reg;
1259 }
1260 } else {
1261 ha->cdev.doorbells = ha->pci_dbells;
1262 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1263 ha->cdev.db_size = ha->dbells_size;
1264
1265 #ifdef QLNX_ENABLE_IWARP
1266
1267 if (qlnx_rdma_supported(ha) == 0)
1268 qlnx_set_personality(ha);
1269
1270 #endif /* #ifdef QLNX_ENABLE_IWARP */
1271 }
1272 QL_DPRINT2(ha, "%s: %s\n", __func__,
1273 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1274
1275 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1276
1277 params.personality = ha->personality;
1278
1279 params.drv_resc_alloc = false;
1280 params.chk_reg_fifo = false;
1281 params.initiate_pf_flr = true;
1282 params.epoch = 0;
1283
1284 ecore_hw_prepare(&ha->cdev, ¶ms);
1285
1286 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1287
1288 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1289 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1290
1291 return (rval);
1292 }
1293
1294 static void
1295 qlnx_release(qlnx_host_t *ha)
1296 {
1297 device_t dev;
1298 int i;
1299
1300 dev = ha->pci_dev;
1301
1302 QL_DPRINT2(ha, "enter\n");
1303
1304 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1305 if (ha->idle_chk[i] != NULL) {
1306 free(ha->idle_chk[i], M_QLNXBUF);
1307 ha->idle_chk[i] = NULL;
1308 }
1309
1310 if (ha->grcdump[i] != NULL) {
1311 free(ha->grcdump[i], M_QLNXBUF);
1312 ha->grcdump[i] = NULL;
1313 }
1314 }
1315
1316 if (ha->flags.callout_init)
1317 callout_drain(&ha->qlnx_callout);
1318
1319 if (ha->flags.slowpath_start) {
1320 qlnx_slowpath_stop(ha);
1321 }
1322
1323 if (ha->flags.hw_init)
1324 ecore_hw_remove(&ha->cdev);
1325
1326 qlnx_del_cdev(ha);
1327
1328 if (ha->ifp != NULL)
1329 ether_ifdetach(ha->ifp);
1330
1331 qlnx_free_tx_dma_tag(ha);
1332
1333 qlnx_free_rx_dma_tag(ha);
1334
1335 qlnx_free_parent_dma_tag(ha);
1336
1337 if (qlnx_vf_device(ha) != 0) {
1338 qlnx_destroy_error_recovery_taskqueue(ha);
1339 }
1340
1341 for (i = 0; i < ha->num_rss; i++) {
1342 struct qlnx_fastpath *fp = &ha->fp_array[i];
1343
1344 if (ha->irq_vec[i].handle) {
1345 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1346 ha->irq_vec[i].handle);
1347 }
1348
1349 if (ha->irq_vec[i].irq) {
1350 (void)bus_release_resource(dev, SYS_RES_IRQ,
1351 ha->irq_vec[i].irq_rid,
1352 ha->irq_vec[i].irq);
1353 }
1354
1355 qlnx_free_tx_br(ha, fp);
1356 }
1357 qlnx_destroy_fp_taskqueues(ha);
1358
1359 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1360 if (ha->sp_handle[i])
1361 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1362 ha->sp_handle[i]);
1363
1364 if (ha->sp_irq[i])
1365 (void) bus_release_resource(dev, SYS_RES_IRQ,
1366 ha->sp_irq_rid[i], ha->sp_irq[i]);
1367 }
1368
1369 qlnx_destroy_sp_taskqueues(ha);
1370
1371 if (ha->msix_count)
1372 pci_release_msi(dev);
1373
1374 if (ha->flags.lock_init) {
1375 mtx_destroy(&ha->hw_lock);
1376 }
1377
1378 if (ha->pci_reg)
1379 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1380 ha->pci_reg);
1381
1382 if (ha->dbells_size && ha->pci_dbells)
1383 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1384 ha->pci_dbells);
1385
1386 if (ha->msix_bar)
1387 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1388 ha->msix_bar);
1389
1390 QL_DPRINT2(ha, "exit\n");
1391 return;
1392 }
1393
1394 static void
1395 qlnx_trigger_dump(qlnx_host_t *ha)
1396 {
1397 int i;
1398
1399 if (ha->ifp != NULL)
1400 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1401
1402 QL_DPRINT2(ha, "enter\n");
1403
1404 if (qlnx_vf_device(ha) == 0)
1405 return;
1406
1407 ha->error_recovery = 1;
1408
1409 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1410 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1411 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1412 }
1413
1414 QL_DPRINT2(ha, "exit\n");
1415
1416 return;
1417 }
1418
1419 static int
1420 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1421 {
1422 int err, ret = 0;
1423 qlnx_host_t *ha;
1424
1425 err = sysctl_handle_int(oidp, &ret, 0, req);
1426
1427 if (err || !req->newptr)
1428 return (err);
1429
1430 if (ret == 1) {
1431 ha = (qlnx_host_t *)arg1;
1432 qlnx_trigger_dump(ha);
1433 }
1434 return (err);
1435 }
1436
1437 static int
1438 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1439 {
1440 int err, i, ret = 0, usecs = 0;
1441 qlnx_host_t *ha;
1442 struct ecore_hwfn *p_hwfn;
1443 struct qlnx_fastpath *fp;
1444
1445 err = sysctl_handle_int(oidp, &usecs, 0, req);
1446
1447 if (err || !req->newptr || !usecs || (usecs > 255))
1448 return (err);
1449
1450 ha = (qlnx_host_t *)arg1;
1451
1452 if (qlnx_vf_device(ha) == 0)
1453 return (-1);
1454
1455 for (i = 0; i < ha->num_rss; i++) {
1456 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1457
1458 fp = &ha->fp_array[i];
1459
1460 if (fp->txq[0]->handle != NULL) {
1461 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1462 (uint16_t)usecs, fp->txq[0]->handle);
1463 }
1464 }
1465
1466 if (!ret)
1467 ha->tx_coalesce_usecs = (uint8_t)usecs;
1468
1469 return (err);
1470 }
1471
1472 static int
1473 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1474 {
1475 int err, i, ret = 0, usecs = 0;
1476 qlnx_host_t *ha;
1477 struct ecore_hwfn *p_hwfn;
1478 struct qlnx_fastpath *fp;
1479
1480 err = sysctl_handle_int(oidp, &usecs, 0, req);
1481
1482 if (err || !req->newptr || !usecs || (usecs > 255))
1483 return (err);
1484
1485 ha = (qlnx_host_t *)arg1;
1486
1487 if (qlnx_vf_device(ha) == 0)
1488 return (-1);
1489
1490 for (i = 0; i < ha->num_rss; i++) {
1491 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1492
1493 fp = &ha->fp_array[i];
1494
1495 if (fp->rxq->handle != NULL) {
1496 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1497 0, fp->rxq->handle);
1498 }
1499 }
1500
1501 if (!ret)
1502 ha->rx_coalesce_usecs = (uint8_t)usecs;
1503
1504 return (err);
1505 }
1506
1507 static void
1508 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1509 {
1510 struct sysctl_ctx_list *ctx;
1511 struct sysctl_oid_list *children;
1512 struct sysctl_oid *ctx_oid;
1513
1514 ctx = device_get_sysctl_ctx(ha->pci_dev);
1515 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1516
1517 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1518 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1519 children = SYSCTL_CHILDREN(ctx_oid);
1520
1521 SYSCTL_ADD_QUAD(ctx, children,
1522 OID_AUTO, "sp_interrupts",
1523 CTLFLAG_RD, &ha->sp_interrupts,
1524 "No. of slowpath interrupts");
1525
1526 return;
1527 }
1528
1529 static void
1530 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1531 {
1532 struct sysctl_ctx_list *ctx;
1533 struct sysctl_oid_list *children;
1534 struct sysctl_oid_list *node_children;
1535 struct sysctl_oid *ctx_oid;
1536 int i, j;
1537 uint8_t name_str[16];
1538
1539 ctx = device_get_sysctl_ctx(ha->pci_dev);
1540 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1541
1542 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1543 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1544 children = SYSCTL_CHILDREN(ctx_oid);
1545
1546 for (i = 0; i < ha->num_rss; i++) {
1547 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1548 snprintf(name_str, sizeof(name_str), "%d", i);
1549
1550 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1551 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1552 node_children = SYSCTL_CHILDREN(ctx_oid);
1553
1554 /* Tx Related */
1555
1556 SYSCTL_ADD_QUAD(ctx, node_children,
1557 OID_AUTO, "tx_pkts_processed",
1558 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1559 "No. of packets processed for transmission");
1560
1561 SYSCTL_ADD_QUAD(ctx, node_children,
1562 OID_AUTO, "tx_pkts_freed",
1563 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1564 "No. of freed packets");
1565
1566 SYSCTL_ADD_QUAD(ctx, node_children,
1567 OID_AUTO, "tx_pkts_transmitted",
1568 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1569 "No. of transmitted packets");
1570
1571 SYSCTL_ADD_QUAD(ctx, node_children,
1572 OID_AUTO, "tx_pkts_completed",
1573 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1574 "No. of transmit completions");
1575
1576 SYSCTL_ADD_QUAD(ctx, node_children,
1577 OID_AUTO, "tx_non_tso_pkts",
1578 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1579 "No. of non LSO transmited packets");
1580
1581 #ifdef QLNX_TRACE_PERF_DATA
1582
1583 SYSCTL_ADD_QUAD(ctx, node_children,
1584 OID_AUTO, "tx_pkts_trans_ctx",
1585 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1586 "No. of transmitted packets in transmit context");
1587
1588 SYSCTL_ADD_QUAD(ctx, node_children,
1589 OID_AUTO, "tx_pkts_compl_ctx",
1590 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1591 "No. of transmit completions in transmit context");
1592
1593 SYSCTL_ADD_QUAD(ctx, node_children,
1594 OID_AUTO, "tx_pkts_trans_fp",
1595 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1596 "No. of transmitted packets in taskqueue");
1597
1598 SYSCTL_ADD_QUAD(ctx, node_children,
1599 OID_AUTO, "tx_pkts_compl_fp",
1600 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1601 "No. of transmit completions in taskqueue");
1602
1603 SYSCTL_ADD_QUAD(ctx, node_children,
1604 OID_AUTO, "tx_pkts_compl_intr",
1605 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1606 "No. of transmit completions in interrupt ctx");
1607 #endif
1608
1609 SYSCTL_ADD_QUAD(ctx, node_children,
1610 OID_AUTO, "tx_tso_pkts",
1611 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1612 "No. of LSO transmited packets");
1613
1614 SYSCTL_ADD_QUAD(ctx, node_children,
1615 OID_AUTO, "tx_lso_wnd_min_len",
1616 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1617 "tx_lso_wnd_min_len");
1618
1619 SYSCTL_ADD_QUAD(ctx, node_children,
1620 OID_AUTO, "tx_defrag",
1621 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1622 "tx_defrag");
1623
1624 SYSCTL_ADD_QUAD(ctx, node_children,
1625 OID_AUTO, "tx_nsegs_gt_elem_left",
1626 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1627 "tx_nsegs_gt_elem_left");
1628
1629 SYSCTL_ADD_UINT(ctx, node_children,
1630 OID_AUTO, "tx_tso_max_nsegs",
1631 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1632 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1633
1634 SYSCTL_ADD_UINT(ctx, node_children,
1635 OID_AUTO, "tx_tso_min_nsegs",
1636 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1637 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1638
1639 SYSCTL_ADD_UINT(ctx, node_children,
1640 OID_AUTO, "tx_tso_max_pkt_len",
1641 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1642 ha->fp_array[i].tx_tso_max_pkt_len,
1643 "tx_tso_max_pkt_len");
1644
1645 SYSCTL_ADD_UINT(ctx, node_children,
1646 OID_AUTO, "tx_tso_min_pkt_len",
1647 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1648 ha->fp_array[i].tx_tso_min_pkt_len,
1649 "tx_tso_min_pkt_len");
1650
1651 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1652 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1653 snprintf(name_str, sizeof(name_str),
1654 "tx_pkts_nseg_%02d", (j+1));
1655
1656 SYSCTL_ADD_QUAD(ctx, node_children,
1657 OID_AUTO, name_str, CTLFLAG_RD,
1658 &ha->fp_array[i].tx_pkts[j], name_str);
1659 }
1660
1661 #ifdef QLNX_TRACE_PERF_DATA
1662 for (j = 0; j < 18; j++) {
1663 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1664 snprintf(name_str, sizeof(name_str),
1665 "tx_pkts_hist_%02d", (j+1));
1666
1667 SYSCTL_ADD_QUAD(ctx, node_children,
1668 OID_AUTO, name_str, CTLFLAG_RD,
1669 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1670 }
1671 for (j = 0; j < 5; j++) {
1672 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1673 snprintf(name_str, sizeof(name_str),
1674 "tx_comInt_%02d", (j+1));
1675
1676 SYSCTL_ADD_QUAD(ctx, node_children,
1677 OID_AUTO, name_str, CTLFLAG_RD,
1678 &ha->fp_array[i].tx_comInt[j], name_str);
1679 }
1680 for (j = 0; j < 18; j++) {
1681 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1682 snprintf(name_str, sizeof(name_str),
1683 "tx_pkts_q_%02d", (j+1));
1684
1685 SYSCTL_ADD_QUAD(ctx, node_children,
1686 OID_AUTO, name_str, CTLFLAG_RD,
1687 &ha->fp_array[i].tx_pkts_q[j], name_str);
1688 }
1689 #endif
1690
1691 SYSCTL_ADD_QUAD(ctx, node_children,
1692 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1693 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1694 "err_tx_nsegs_gt_elem_left");
1695
1696 SYSCTL_ADD_QUAD(ctx, node_children,
1697 OID_AUTO, "err_tx_dmamap_create",
1698 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1699 "err_tx_dmamap_create");
1700
1701 SYSCTL_ADD_QUAD(ctx, node_children,
1702 OID_AUTO, "err_tx_defrag_dmamap_load",
1703 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1704 "err_tx_defrag_dmamap_load");
1705
1706 SYSCTL_ADD_QUAD(ctx, node_children,
1707 OID_AUTO, "err_tx_non_tso_max_seg",
1708 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1709 "err_tx_non_tso_max_seg");
1710
1711 SYSCTL_ADD_QUAD(ctx, node_children,
1712 OID_AUTO, "err_tx_dmamap_load",
1713 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1714 "err_tx_dmamap_load");
1715
1716 SYSCTL_ADD_QUAD(ctx, node_children,
1717 OID_AUTO, "err_tx_defrag",
1718 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1719 "err_tx_defrag");
1720
1721 SYSCTL_ADD_QUAD(ctx, node_children,
1722 OID_AUTO, "err_tx_free_pkt_null",
1723 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1724 "err_tx_free_pkt_null");
1725
1726 SYSCTL_ADD_QUAD(ctx, node_children,
1727 OID_AUTO, "err_tx_cons_idx_conflict",
1728 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1729 "err_tx_cons_idx_conflict");
1730
1731 SYSCTL_ADD_QUAD(ctx, node_children,
1732 OID_AUTO, "lro_cnt_64",
1733 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1734 "lro_cnt_64");
1735
1736 SYSCTL_ADD_QUAD(ctx, node_children,
1737 OID_AUTO, "lro_cnt_128",
1738 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1739 "lro_cnt_128");
1740
1741 SYSCTL_ADD_QUAD(ctx, node_children,
1742 OID_AUTO, "lro_cnt_256",
1743 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1744 "lro_cnt_256");
1745
1746 SYSCTL_ADD_QUAD(ctx, node_children,
1747 OID_AUTO, "lro_cnt_512",
1748 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1749 "lro_cnt_512");
1750
1751 SYSCTL_ADD_QUAD(ctx, node_children,
1752 OID_AUTO, "lro_cnt_1024",
1753 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1754 "lro_cnt_1024");
1755
1756 /* Rx Related */
1757
1758 SYSCTL_ADD_QUAD(ctx, node_children,
1759 OID_AUTO, "rx_pkts",
1760 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1761 "No. of received packets");
1762
1763 SYSCTL_ADD_QUAD(ctx, node_children,
1764 OID_AUTO, "tpa_start",
1765 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1766 "No. of tpa_start packets");
1767
1768 SYSCTL_ADD_QUAD(ctx, node_children,
1769 OID_AUTO, "tpa_cont",
1770 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1771 "No. of tpa_cont packets");
1772
1773 SYSCTL_ADD_QUAD(ctx, node_children,
1774 OID_AUTO, "tpa_end",
1775 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1776 "No. of tpa_end packets");
1777
1778 SYSCTL_ADD_QUAD(ctx, node_children,
1779 OID_AUTO, "err_m_getcl",
1780 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1781 "err_m_getcl");
1782
1783 SYSCTL_ADD_QUAD(ctx, node_children,
1784 OID_AUTO, "err_m_getjcl",
1785 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1786 "err_m_getjcl");
1787
1788 SYSCTL_ADD_QUAD(ctx, node_children,
1789 OID_AUTO, "err_rx_hw_errors",
1790 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1791 "err_rx_hw_errors");
1792
1793 SYSCTL_ADD_QUAD(ctx, node_children,
1794 OID_AUTO, "err_rx_alloc_errors",
1795 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1796 "err_rx_alloc_errors");
1797 }
1798
1799 return;
1800 }
1801
1802 static void
1803 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1804 {
1805 struct sysctl_ctx_list *ctx;
1806 struct sysctl_oid_list *children;
1807 struct sysctl_oid *ctx_oid;
1808
1809 ctx = device_get_sysctl_ctx(ha->pci_dev);
1810 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1811
1812 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1813 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1814 children = SYSCTL_CHILDREN(ctx_oid);
1815
1816 SYSCTL_ADD_QUAD(ctx, children,
1817 OID_AUTO, "no_buff_discards",
1818 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1819 "No. of packets discarded due to lack of buffer");
1820
1821 SYSCTL_ADD_QUAD(ctx, children,
1822 OID_AUTO, "packet_too_big_discard",
1823 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1824 "No. of packets discarded because packet was too big");
1825
1826 SYSCTL_ADD_QUAD(ctx, children,
1827 OID_AUTO, "ttl0_discard",
1828 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1829 "ttl0_discard");
1830
1831 SYSCTL_ADD_QUAD(ctx, children,
1832 OID_AUTO, "rx_ucast_bytes",
1833 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1834 "rx_ucast_bytes");
1835
1836 SYSCTL_ADD_QUAD(ctx, children,
1837 OID_AUTO, "rx_mcast_bytes",
1838 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1839 "rx_mcast_bytes");
1840
1841 SYSCTL_ADD_QUAD(ctx, children,
1842 OID_AUTO, "rx_bcast_bytes",
1843 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1844 "rx_bcast_bytes");
1845
1846 SYSCTL_ADD_QUAD(ctx, children,
1847 OID_AUTO, "rx_ucast_pkts",
1848 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1849 "rx_ucast_pkts");
1850
1851 SYSCTL_ADD_QUAD(ctx, children,
1852 OID_AUTO, "rx_mcast_pkts",
1853 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1854 "rx_mcast_pkts");
1855
1856 SYSCTL_ADD_QUAD(ctx, children,
1857 OID_AUTO, "rx_bcast_pkts",
1858 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1859 "rx_bcast_pkts");
1860
1861 SYSCTL_ADD_QUAD(ctx, children,
1862 OID_AUTO, "mftag_filter_discards",
1863 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1864 "mftag_filter_discards");
1865
1866 SYSCTL_ADD_QUAD(ctx, children,
1867 OID_AUTO, "mac_filter_discards",
1868 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1869 "mac_filter_discards");
1870
1871 SYSCTL_ADD_QUAD(ctx, children,
1872 OID_AUTO, "tx_ucast_bytes",
1873 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1874 "tx_ucast_bytes");
1875
1876 SYSCTL_ADD_QUAD(ctx, children,
1877 OID_AUTO, "tx_mcast_bytes",
1878 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1879 "tx_mcast_bytes");
1880
1881 SYSCTL_ADD_QUAD(ctx, children,
1882 OID_AUTO, "tx_bcast_bytes",
1883 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1884 "tx_bcast_bytes");
1885
1886 SYSCTL_ADD_QUAD(ctx, children,
1887 OID_AUTO, "tx_ucast_pkts",
1888 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1889 "tx_ucast_pkts");
1890
1891 SYSCTL_ADD_QUAD(ctx, children,
1892 OID_AUTO, "tx_mcast_pkts",
1893 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1894 "tx_mcast_pkts");
1895
1896 SYSCTL_ADD_QUAD(ctx, children,
1897 OID_AUTO, "tx_bcast_pkts",
1898 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1899 "tx_bcast_pkts");
1900
1901 SYSCTL_ADD_QUAD(ctx, children,
1902 OID_AUTO, "tx_err_drop_pkts",
1903 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1904 "tx_err_drop_pkts");
1905
1906 SYSCTL_ADD_QUAD(ctx, children,
1907 OID_AUTO, "tpa_coalesced_pkts",
1908 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1909 "tpa_coalesced_pkts");
1910
1911 SYSCTL_ADD_QUAD(ctx, children,
1912 OID_AUTO, "tpa_coalesced_events",
1913 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1914 "tpa_coalesced_events");
1915
1916 SYSCTL_ADD_QUAD(ctx, children,
1917 OID_AUTO, "tpa_aborts_num",
1918 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1919 "tpa_aborts_num");
1920
1921 SYSCTL_ADD_QUAD(ctx, children,
1922 OID_AUTO, "tpa_not_coalesced_pkts",
1923 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1924 "tpa_not_coalesced_pkts");
1925
1926 SYSCTL_ADD_QUAD(ctx, children,
1927 OID_AUTO, "tpa_coalesced_bytes",
1928 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1929 "tpa_coalesced_bytes");
1930
1931 SYSCTL_ADD_QUAD(ctx, children,
1932 OID_AUTO, "rx_64_byte_packets",
1933 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1934 "rx_64_byte_packets");
1935
1936 SYSCTL_ADD_QUAD(ctx, children,
1937 OID_AUTO, "rx_65_to_127_byte_packets",
1938 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1939 "rx_65_to_127_byte_packets");
1940
1941 SYSCTL_ADD_QUAD(ctx, children,
1942 OID_AUTO, "rx_128_to_255_byte_packets",
1943 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1944 "rx_128_to_255_byte_packets");
1945
1946 SYSCTL_ADD_QUAD(ctx, children,
1947 OID_AUTO, "rx_256_to_511_byte_packets",
1948 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1949 "rx_256_to_511_byte_packets");
1950
1951 SYSCTL_ADD_QUAD(ctx, children,
1952 OID_AUTO, "rx_512_to_1023_byte_packets",
1953 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1954 "rx_512_to_1023_byte_packets");
1955
1956 SYSCTL_ADD_QUAD(ctx, children,
1957 OID_AUTO, "rx_1024_to_1518_byte_packets",
1958 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1959 "rx_1024_to_1518_byte_packets");
1960
1961 SYSCTL_ADD_QUAD(ctx, children,
1962 OID_AUTO, "rx_1519_to_1522_byte_packets",
1963 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1964 "rx_1519_to_1522_byte_packets");
1965
1966 SYSCTL_ADD_QUAD(ctx, children,
1967 OID_AUTO, "rx_1523_to_2047_byte_packets",
1968 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1969 "rx_1523_to_2047_byte_packets");
1970
1971 SYSCTL_ADD_QUAD(ctx, children,
1972 OID_AUTO, "rx_2048_to_4095_byte_packets",
1973 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1974 "rx_2048_to_4095_byte_packets");
1975
1976 SYSCTL_ADD_QUAD(ctx, children,
1977 OID_AUTO, "rx_4096_to_9216_byte_packets",
1978 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1979 "rx_4096_to_9216_byte_packets");
1980
1981 SYSCTL_ADD_QUAD(ctx, children,
1982 OID_AUTO, "rx_9217_to_16383_byte_packets",
1983 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1984 "rx_9217_to_16383_byte_packets");
1985
1986 SYSCTL_ADD_QUAD(ctx, children,
1987 OID_AUTO, "rx_crc_errors",
1988 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1989 "rx_crc_errors");
1990
1991 SYSCTL_ADD_QUAD(ctx, children,
1992 OID_AUTO, "rx_mac_crtl_frames",
1993 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1994 "rx_mac_crtl_frames");
1995
1996 SYSCTL_ADD_QUAD(ctx, children,
1997 OID_AUTO, "rx_pause_frames",
1998 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1999 "rx_pause_frames");
2000
2001 SYSCTL_ADD_QUAD(ctx, children,
2002 OID_AUTO, "rx_pfc_frames",
2003 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
2004 "rx_pfc_frames");
2005
2006 SYSCTL_ADD_QUAD(ctx, children,
2007 OID_AUTO, "rx_align_errors",
2008 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
2009 "rx_align_errors");
2010
2011 SYSCTL_ADD_QUAD(ctx, children,
2012 OID_AUTO, "rx_carrier_errors",
2013 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
2014 "rx_carrier_errors");
2015
2016 SYSCTL_ADD_QUAD(ctx, children,
2017 OID_AUTO, "rx_oversize_packets",
2018 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
2019 "rx_oversize_packets");
2020
2021 SYSCTL_ADD_QUAD(ctx, children,
2022 OID_AUTO, "rx_jabbers",
2023 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2024 "rx_jabbers");
2025
2026 SYSCTL_ADD_QUAD(ctx, children,
2027 OID_AUTO, "rx_undersize_packets",
2028 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2029 "rx_undersize_packets");
2030
2031 SYSCTL_ADD_QUAD(ctx, children,
2032 OID_AUTO, "rx_fragments",
2033 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2034 "rx_fragments");
2035
2036 SYSCTL_ADD_QUAD(ctx, children,
2037 OID_AUTO, "tx_64_byte_packets",
2038 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2039 "tx_64_byte_packets");
2040
2041 SYSCTL_ADD_QUAD(ctx, children,
2042 OID_AUTO, "tx_65_to_127_byte_packets",
2043 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2044 "tx_65_to_127_byte_packets");
2045
2046 SYSCTL_ADD_QUAD(ctx, children,
2047 OID_AUTO, "tx_128_to_255_byte_packets",
2048 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2049 "tx_128_to_255_byte_packets");
2050
2051 SYSCTL_ADD_QUAD(ctx, children,
2052 OID_AUTO, "tx_256_to_511_byte_packets",
2053 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2054 "tx_256_to_511_byte_packets");
2055
2056 SYSCTL_ADD_QUAD(ctx, children,
2057 OID_AUTO, "tx_512_to_1023_byte_packets",
2058 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2059 "tx_512_to_1023_byte_packets");
2060
2061 SYSCTL_ADD_QUAD(ctx, children,
2062 OID_AUTO, "tx_1024_to_1518_byte_packets",
2063 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2064 "tx_1024_to_1518_byte_packets");
2065
2066 SYSCTL_ADD_QUAD(ctx, children,
2067 OID_AUTO, "tx_1519_to_2047_byte_packets",
2068 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2069 "tx_1519_to_2047_byte_packets");
2070
2071 SYSCTL_ADD_QUAD(ctx, children,
2072 OID_AUTO, "tx_2048_to_4095_byte_packets",
2073 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2074 "tx_2048_to_4095_byte_packets");
2075
2076 SYSCTL_ADD_QUAD(ctx, children,
2077 OID_AUTO, "tx_4096_to_9216_byte_packets",
2078 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2079 "tx_4096_to_9216_byte_packets");
2080
2081 SYSCTL_ADD_QUAD(ctx, children,
2082 OID_AUTO, "tx_9217_to_16383_byte_packets",
2083 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2084 "tx_9217_to_16383_byte_packets");
2085
2086 SYSCTL_ADD_QUAD(ctx, children,
2087 OID_AUTO, "tx_pause_frames",
2088 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2089 "tx_pause_frames");
2090
2091 SYSCTL_ADD_QUAD(ctx, children,
2092 OID_AUTO, "tx_pfc_frames",
2093 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2094 "tx_pfc_frames");
2095
2096 SYSCTL_ADD_QUAD(ctx, children,
2097 OID_AUTO, "tx_lpi_entry_count",
2098 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2099 "tx_lpi_entry_count");
2100
2101 SYSCTL_ADD_QUAD(ctx, children,
2102 OID_AUTO, "tx_total_collisions",
2103 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2104 "tx_total_collisions");
2105
2106 SYSCTL_ADD_QUAD(ctx, children,
2107 OID_AUTO, "brb_truncates",
2108 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2109 "brb_truncates");
2110
2111 SYSCTL_ADD_QUAD(ctx, children,
2112 OID_AUTO, "brb_discards",
2113 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2114 "brb_discards");
2115
2116 SYSCTL_ADD_QUAD(ctx, children,
2117 OID_AUTO, "rx_mac_bytes",
2118 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2119 "rx_mac_bytes");
2120
2121 SYSCTL_ADD_QUAD(ctx, children,
2122 OID_AUTO, "rx_mac_uc_packets",
2123 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2124 "rx_mac_uc_packets");
2125
2126 SYSCTL_ADD_QUAD(ctx, children,
2127 OID_AUTO, "rx_mac_mc_packets",
2128 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2129 "rx_mac_mc_packets");
2130
2131 SYSCTL_ADD_QUAD(ctx, children,
2132 OID_AUTO, "rx_mac_bc_packets",
2133 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2134 "rx_mac_bc_packets");
2135
2136 SYSCTL_ADD_QUAD(ctx, children,
2137 OID_AUTO, "rx_mac_frames_ok",
2138 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2139 "rx_mac_frames_ok");
2140
2141 SYSCTL_ADD_QUAD(ctx, children,
2142 OID_AUTO, "tx_mac_bytes",
2143 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2144 "tx_mac_bytes");
2145
2146 SYSCTL_ADD_QUAD(ctx, children,
2147 OID_AUTO, "tx_mac_uc_packets",
2148 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2149 "tx_mac_uc_packets");
2150
2151 SYSCTL_ADD_QUAD(ctx, children,
2152 OID_AUTO, "tx_mac_mc_packets",
2153 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2154 "tx_mac_mc_packets");
2155
2156 SYSCTL_ADD_QUAD(ctx, children,
2157 OID_AUTO, "tx_mac_bc_packets",
2158 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2159 "tx_mac_bc_packets");
2160
2161 SYSCTL_ADD_QUAD(ctx, children,
2162 OID_AUTO, "tx_mac_ctrl_frames",
2163 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2164 "tx_mac_ctrl_frames");
2165 return;
2166 }
2167
2168 static void
2169 qlnx_add_sysctls(qlnx_host_t *ha)
2170 {
2171 device_t dev = ha->pci_dev;
2172 struct sysctl_ctx_list *ctx;
2173 struct sysctl_oid_list *children;
2174
2175 ctx = device_get_sysctl_ctx(dev);
2176 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2177
2178 qlnx_add_fp_stats_sysctls(ha);
2179 qlnx_add_sp_stats_sysctls(ha);
2180
2181 if (qlnx_vf_device(ha) != 0)
2182 qlnx_add_hw_stats_sysctls(ha);
2183
2184 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2185 CTLFLAG_RD, qlnx_ver_str, 0,
2186 "Driver Version");
2187
2188 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2189 CTLFLAG_RD, ha->stormfw_ver, 0,
2190 "STORM Firmware Version");
2191
2192 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2193 CTLFLAG_RD, ha->mfw_ver, 0,
2194 "Management Firmware Version");
2195
2196 SYSCTL_ADD_UINT(ctx, children,
2197 OID_AUTO, "personality", CTLFLAG_RD,
2198 &ha->personality, ha->personality,
2199 "\tpersonality = 0 => Ethernet Only\n"
2200 "\tpersonality = 3 => Ethernet and RoCE\n"
2201 "\tpersonality = 4 => Ethernet and iWARP\n"
2202 "\tpersonality = 6 => Default in Shared Memory\n");
2203
2204 ha->dbg_level = 0;
2205 SYSCTL_ADD_UINT(ctx, children,
2206 OID_AUTO, "debug", CTLFLAG_RW,
2207 &ha->dbg_level, ha->dbg_level, "Debug Level");
2208
2209 ha->dp_level = 0x01;
2210 SYSCTL_ADD_UINT(ctx, children,
2211 OID_AUTO, "dp_level", CTLFLAG_RW,
2212 &ha->dp_level, ha->dp_level, "DP Level");
2213
2214 ha->dbg_trace_lro_cnt = 0;
2215 SYSCTL_ADD_UINT(ctx, children,
2216 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2217 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2218 "Trace LRO Counts");
2219
2220 ha->dbg_trace_tso_pkt_len = 0;
2221 SYSCTL_ADD_UINT(ctx, children,
2222 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2223 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2224 "Trace TSO packet lengths");
2225
2226 ha->dp_module = 0;
2227 SYSCTL_ADD_UINT(ctx, children,
2228 OID_AUTO, "dp_module", CTLFLAG_RW,
2229 &ha->dp_module, ha->dp_module, "DP Module");
2230
2231 ha->err_inject = 0;
2232
2233 SYSCTL_ADD_UINT(ctx, children,
2234 OID_AUTO, "err_inject", CTLFLAG_RW,
2235 &ha->err_inject, ha->err_inject, "Error Inject");
2236
2237 ha->storm_stats_enable = 0;
2238
2239 SYSCTL_ADD_UINT(ctx, children,
2240 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2241 &ha->storm_stats_enable, ha->storm_stats_enable,
2242 "Enable Storm Statistics Gathering");
2243
2244 ha->storm_stats_index = 0;
2245
2246 SYSCTL_ADD_UINT(ctx, children,
2247 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2248 &ha->storm_stats_index, ha->storm_stats_index,
2249 "Enable Storm Statistics Gathering Current Index");
2250
2251 ha->grcdump_taken = 0;
2252 SYSCTL_ADD_UINT(ctx, children,
2253 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2254 &ha->grcdump_taken, ha->grcdump_taken,
2255 "grcdump_taken");
2256
2257 ha->idle_chk_taken = 0;
2258 SYSCTL_ADD_UINT(ctx, children,
2259 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2260 &ha->idle_chk_taken, ha->idle_chk_taken,
2261 "idle_chk_taken");
2262
2263 SYSCTL_ADD_UINT(ctx, children,
2264 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2265 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2266 "rx_coalesce_usecs");
2267
2268 SYSCTL_ADD_UINT(ctx, children,
2269 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2270 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2271 "tx_coalesce_usecs");
2272
2273 SYSCTL_ADD_PROC(ctx, children,
2274 OID_AUTO, "trigger_dump",
2275 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2276 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2277
2278 SYSCTL_ADD_PROC(ctx, children,
2279 OID_AUTO, "set_rx_coalesce_usecs",
2280 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2281 (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2282 "rx interrupt coalesce period microseconds");
2283
2284 SYSCTL_ADD_PROC(ctx, children,
2285 OID_AUTO, "set_tx_coalesce_usecs",
2286 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2287 (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2288 "tx interrupt coalesce period microseconds");
2289
2290 ha->rx_pkt_threshold = 128;
2291 SYSCTL_ADD_UINT(ctx, children,
2292 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2293 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2294 "No. of Rx Pkts to process at a time");
2295
2296 ha->rx_jumbo_buf_eq_mtu = 0;
2297 SYSCTL_ADD_UINT(ctx, children,
2298 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2299 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2300 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2301 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2302
2303 SYSCTL_ADD_QUAD(ctx, children,
2304 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2305 &ha->err_illegal_intr, "err_illegal_intr");
2306
2307 SYSCTL_ADD_QUAD(ctx, children,
2308 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2309 &ha->err_fp_null, "err_fp_null");
2310
2311 SYSCTL_ADD_QUAD(ctx, children,
2312 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2313 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2314 return;
2315 }
2316
2317 /*****************************************************************************
2318 * Operating System Network Interface Functions
2319 *****************************************************************************/
2320
2321 static void
2322 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2323 {
2324 uint16_t device_id;
2325 struct ifnet *ifp;
2326
2327 ifp = ha->ifp = if_alloc(IFT_ETHER);
2328
2329 if (ifp == NULL)
2330 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2331
2332 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2333
2334 device_id = pci_get_device(ha->pci_dev);
2335
2336 #if __FreeBSD_version >= 1000000
2337
2338 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2339 ifp->if_baudrate = IF_Gbps(40);
2340 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2341 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2342 ifp->if_baudrate = IF_Gbps(25);
2343 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2344 ifp->if_baudrate = IF_Gbps(50);
2345 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2346 ifp->if_baudrate = IF_Gbps(100);
2347
2348 ifp->if_capabilities = IFCAP_LINKSTATE;
2349 #else
2350 ifp->if_mtu = ETHERMTU;
2351 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2352
2353 #endif /* #if __FreeBSD_version >= 1000000 */
2354
2355 ifp->if_init = qlnx_init;
2356 ifp->if_softc = ha;
2357 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2358 ifp->if_ioctl = qlnx_ioctl;
2359 ifp->if_transmit = qlnx_transmit;
2360 ifp->if_qflush = qlnx_qflush;
2361
2362 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2363 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2364 IFQ_SET_READY(&ifp->if_snd);
2365
2366 #if __FreeBSD_version >= 1100036
2367 if_setgetcounterfn(ifp, qlnx_get_counter);
2368 #endif
2369
2370 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2371
2372 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2373
2374 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2375 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2376 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2377 uint32_t rnd;
2378
2379 rnd = arc4random();
2380
2381 ha->primary_mac[0] = 0x00;
2382 ha->primary_mac[1] = 0x0e;
2383 ha->primary_mac[2] = 0x1e;
2384 ha->primary_mac[3] = rnd & 0xFF;
2385 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2386 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2387 }
2388
2389 ether_ifattach(ifp, ha->primary_mac);
2390 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2391
2392 ifp->if_capabilities = IFCAP_HWCSUM;
2393 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2394
2395 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2396 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2397 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2398 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2399 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2400 ifp->if_capabilities |= IFCAP_TSO4;
2401 ifp->if_capabilities |= IFCAP_TSO6;
2402 ifp->if_capabilities |= IFCAP_LRO;
2403
2404 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE -
2405 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2406 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2407 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2408
2409 ifp->if_capenable = ifp->if_capabilities;
2410
2411 ifp->if_hwassist = CSUM_IP;
2412 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2413 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2414 ifp->if_hwassist |= CSUM_TSO;
2415
2416 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2417
2418 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2419 qlnx_media_status);
2420
2421 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2422 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2423 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2424 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2425 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2426 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2427 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2428 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2429 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2430 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2431 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2432 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2433 ifmedia_add(&ha->media,
2434 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2435 ifmedia_add(&ha->media,
2436 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2437 ifmedia_add(&ha->media,
2438 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2439 }
2440
2441 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2442 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2443
2444 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2445
2446 QL_DPRINT2(ha, "exit\n");
2447
2448 return;
2449 }
2450
2451 static void
2452 qlnx_init_locked(qlnx_host_t *ha)
2453 {
2454 struct ifnet *ifp = ha->ifp;
2455
2456 QL_DPRINT1(ha, "Driver Initialization start \n");
2457
2458 qlnx_stop(ha);
2459
2460 if (qlnx_load(ha) == 0) {
2461 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2462 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2463
2464 #ifdef QLNX_ENABLE_IWARP
2465 if (qlnx_vf_device(ha) != 0) {
2466 qlnx_rdma_dev_open(ha);
2467 }
2468 #endif /* #ifdef QLNX_ENABLE_IWARP */
2469 }
2470
2471 return;
2472 }
2473
2474 static void
2475 qlnx_init(void *arg)
2476 {
2477 qlnx_host_t *ha;
2478
2479 ha = (qlnx_host_t *)arg;
2480
2481 QL_DPRINT2(ha, "enter\n");
2482
2483 QLNX_LOCK(ha);
2484 qlnx_init_locked(ha);
2485 QLNX_UNLOCK(ha);
2486
2487 QL_DPRINT2(ha, "exit\n");
2488
2489 return;
2490 }
2491
2492 static int
2493 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2494 {
2495 struct ecore_filter_mcast *mcast;
2496 struct ecore_dev *cdev;
2497 int rc;
2498
2499 cdev = &ha->cdev;
2500
2501 mcast = &ha->ecore_mcast;
2502 bzero(mcast, sizeof(struct ecore_filter_mcast));
2503
2504 if (add_mac)
2505 mcast->opcode = ECORE_FILTER_ADD;
2506 else
2507 mcast->opcode = ECORE_FILTER_REMOVE;
2508
2509 mcast->num_mc_addrs = 1;
2510 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2511
2512 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2513
2514 return (rc);
2515 }
2516
2517 static int
2518 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2519 {
2520 int i;
2521
2522 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2523 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2524 return 0; /* its been already added */
2525 }
2526
2527 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2528 if ((ha->mcast[i].addr[0] == 0) &&
2529 (ha->mcast[i].addr[1] == 0) &&
2530 (ha->mcast[i].addr[2] == 0) &&
2531 (ha->mcast[i].addr[3] == 0) &&
2532 (ha->mcast[i].addr[4] == 0) &&
2533 (ha->mcast[i].addr[5] == 0)) {
2534 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2535 return (-1);
2536
2537 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2538 ha->nmcast++;
2539
2540 return 0;
2541 }
2542 }
2543 return 0;
2544 }
2545
2546 static int
2547 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2548 {
2549 int i;
2550
2551 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2552 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2553 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2554 return (-1);
2555
2556 ha->mcast[i].addr[0] = 0;
2557 ha->mcast[i].addr[1] = 0;
2558 ha->mcast[i].addr[2] = 0;
2559 ha->mcast[i].addr[3] = 0;
2560 ha->mcast[i].addr[4] = 0;
2561 ha->mcast[i].addr[5] = 0;
2562
2563 ha->nmcast--;
2564
2565 return 0;
2566 }
2567 }
2568 return 0;
2569 }
2570
2571 /*
2572 * Name: qls_hw_set_multi
2573 * Function: Sets the Multicast Addresses provided the host O.S into the
2574 * hardware (for the given interface)
2575 */
2576 static void
2577 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2578 uint32_t add_mac)
2579 {
2580 int i;
2581
2582 for (i = 0; i < mcnt; i++) {
2583 if (add_mac) {
2584 if (qlnx_hw_add_mcast(ha, mta))
2585 break;
2586 } else {
2587 if (qlnx_hw_del_mcast(ha, mta))
2588 break;
2589 }
2590
2591 mta += ETHER_HDR_LEN;
2592 }
2593 return;
2594 }
2595
2596 static u_int
2597 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2598 {
2599 uint8_t *mta = arg;
2600
2601 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2602 return (0);
2603
2604 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2605
2606 return (1);
2607 }
2608
2609 static int
2610 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2611 {
2612 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2613 struct ifnet *ifp = ha->ifp;
2614 u_int mcnt;
2615
2616 if (qlnx_vf_device(ha) == 0)
2617 return (0);
2618
2619 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2620
2621 QLNX_LOCK(ha);
2622 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2623 QLNX_UNLOCK(ha);
2624
2625 return (0);
2626 }
2627
2628 static int
2629 qlnx_set_promisc(qlnx_host_t *ha)
2630 {
2631 int rc = 0;
2632 uint8_t filter;
2633
2634 if (qlnx_vf_device(ha) == 0)
2635 return (0);
2636
2637 filter = ha->filter;
2638 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2639 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2640
2641 rc = qlnx_set_rx_accept_filter(ha, filter);
2642 return (rc);
2643 }
2644
2645 static int
2646 qlnx_set_allmulti(qlnx_host_t *ha)
2647 {
2648 int rc = 0;
2649 uint8_t filter;
2650
2651 if (qlnx_vf_device(ha) == 0)
2652 return (0);
2653
2654 filter = ha->filter;
2655 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2656 rc = qlnx_set_rx_accept_filter(ha, filter);
2657
2658 return (rc);
2659 }
2660
2661 static int
2662 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2663 {
2664 int ret = 0, mask;
2665 struct ifreq *ifr = (struct ifreq *)data;
2666 struct ifaddr *ifa = (struct ifaddr *)data;
2667 qlnx_host_t *ha;
2668
2669 ha = (qlnx_host_t *)ifp->if_softc;
2670
2671 switch (cmd) {
2672 case SIOCSIFADDR:
2673 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2674
2675 if (ifa->ifa_addr->sa_family == AF_INET) {
2676 ifp->if_flags |= IFF_UP;
2677 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2678 QLNX_LOCK(ha);
2679 qlnx_init_locked(ha);
2680 QLNX_UNLOCK(ha);
2681 }
2682 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2683 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2684
2685 arp_ifinit(ifp, ifa);
2686 } else {
2687 ether_ioctl(ifp, cmd, data);
2688 }
2689 break;
2690
2691 case SIOCSIFMTU:
2692 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2693
2694 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2695 ret = EINVAL;
2696 } else {
2697 QLNX_LOCK(ha);
2698 ifp->if_mtu = ifr->ifr_mtu;
2699 ha->max_frame_size =
2700 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2701 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2702 qlnx_init_locked(ha);
2703 }
2704
2705 QLNX_UNLOCK(ha);
2706 }
2707
2708 break;
2709
2710 case SIOCSIFFLAGS:
2711 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2712
2713 QLNX_LOCK(ha);
2714
2715 if (ifp->if_flags & IFF_UP) {
2716 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2717 if ((ifp->if_flags ^ ha->if_flags) &
2718 IFF_PROMISC) {
2719 ret = qlnx_set_promisc(ha);
2720 } else if ((ifp->if_flags ^ ha->if_flags) &
2721 IFF_ALLMULTI) {
2722 ret = qlnx_set_allmulti(ha);
2723 }
2724 } else {
2725 ha->max_frame_size = ifp->if_mtu +
2726 ETHER_HDR_LEN + ETHER_CRC_LEN;
2727 qlnx_init_locked(ha);
2728 }
2729 } else {
2730 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2731 qlnx_stop(ha);
2732 ha->if_flags = ifp->if_flags;
2733 }
2734
2735 QLNX_UNLOCK(ha);
2736 break;
2737
2738 case SIOCADDMULTI:
2739 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2740
2741 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2742 if (qlnx_set_multi(ha, 1))
2743 ret = EINVAL;
2744 }
2745 break;
2746
2747 case SIOCDELMULTI:
2748 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2749
2750 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2751 if (qlnx_set_multi(ha, 0))
2752 ret = EINVAL;
2753 }
2754 break;
2755
2756 case SIOCSIFMEDIA:
2757 case SIOCGIFMEDIA:
2758 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2759
2760 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2761 break;
2762
2763 case SIOCSIFCAP:
2764
2765 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2766
2767 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2768
2769 if (mask & IFCAP_HWCSUM)
2770 ifp->if_capenable ^= IFCAP_HWCSUM;
2771 if (mask & IFCAP_TSO4)
2772 ifp->if_capenable ^= IFCAP_TSO4;
2773 if (mask & IFCAP_TSO6)
2774 ifp->if_capenable ^= IFCAP_TSO6;
2775 if (mask & IFCAP_VLAN_HWTAGGING)
2776 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2777 if (mask & IFCAP_VLAN_HWTSO)
2778 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2779 if (mask & IFCAP_LRO)
2780 ifp->if_capenable ^= IFCAP_LRO;
2781
2782 QLNX_LOCK(ha);
2783
2784 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2785 qlnx_init_locked(ha);
2786
2787 QLNX_UNLOCK(ha);
2788
2789 VLAN_CAPABILITIES(ifp);
2790 break;
2791
2792 #if (__FreeBSD_version >= 1100101)
2793
2794 case SIOCGI2C:
2795 {
2796 struct ifi2creq i2c;
2797 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2798 struct ecore_ptt *p_ptt;
2799
2800 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2801
2802 if (ret)
2803 break;
2804
2805 if ((i2c.len > sizeof (i2c.data)) ||
2806 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2807 ret = EINVAL;
2808 break;
2809 }
2810
2811 p_ptt = ecore_ptt_acquire(p_hwfn);
2812
2813 if (!p_ptt) {
2814 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2815 ret = -1;
2816 break;
2817 }
2818
2819 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2820 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2821 i2c.len, &i2c.data[0]);
2822
2823 ecore_ptt_release(p_hwfn, p_ptt);
2824
2825 if (ret) {
2826 ret = -1;
2827 break;
2828 }
2829
2830 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2831
2832 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2833 len = %d addr = 0x%02x offset = 0x%04x \
2834 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2835 0x%02x 0x%02x 0x%02x\n",
2836 ret, i2c.len, i2c.dev_addr, i2c.offset,
2837 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2838 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2839 break;
2840 }
2841 #endif /* #if (__FreeBSD_version >= 1100101) */
2842
2843 default:
2844 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2845 ret = ether_ioctl(ifp, cmd, data);
2846 break;
2847 }
2848
2849 return (ret);
2850 }
2851
2852 static int
2853 qlnx_media_change(struct ifnet *ifp)
2854 {
2855 qlnx_host_t *ha;
2856 struct ifmedia *ifm;
2857 int ret = 0;
2858
2859 ha = (qlnx_host_t *)ifp->if_softc;
2860
2861 QL_DPRINT2(ha, "enter\n");
2862
2863 ifm = &ha->media;
2864
2865 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2866 ret = EINVAL;
2867
2868 QL_DPRINT2(ha, "exit\n");
2869
2870 return (ret);
2871 }
2872
2873 static void
2874 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2875 {
2876 qlnx_host_t *ha;
2877
2878 ha = (qlnx_host_t *)ifp->if_softc;
2879
2880 QL_DPRINT2(ha, "enter\n");
2881
2882 ifmr->ifm_status = IFM_AVALID;
2883 ifmr->ifm_active = IFM_ETHER;
2884
2885 if (ha->link_up) {
2886 ifmr->ifm_status |= IFM_ACTIVE;
2887 ifmr->ifm_active |=
2888 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2889
2890 if (ha->if_link.link_partner_caps &
2891 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2892 ifmr->ifm_active |=
2893 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2894 }
2895
2896 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2897
2898 return;
2899 }
2900
2901 static void
2902 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2903 struct qlnx_tx_queue *txq)
2904 {
2905 u16 idx;
2906 struct mbuf *mp;
2907 bus_dmamap_t map;
2908 int i;
2909 // struct eth_tx_bd *tx_data_bd;
2910 struct eth_tx_1st_bd *first_bd;
2911 int nbds = 0;
2912
2913 idx = txq->sw_tx_cons;
2914 mp = txq->sw_tx_ring[idx].mp;
2915 map = txq->sw_tx_ring[idx].map;
2916
2917 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2918 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2919
2920 QL_DPRINT1(ha, "(mp == NULL) "
2921 " tx_idx = 0x%x"
2922 " ecore_prod_idx = 0x%x"
2923 " ecore_cons_idx = 0x%x"
2924 " hw_bd_cons = 0x%x"
2925 " txq_db_last = 0x%x"
2926 " elem_left = 0x%x\n",
2927 fp->rss_id,
2928 ecore_chain_get_prod_idx(&txq->tx_pbl),
2929 ecore_chain_get_cons_idx(&txq->tx_pbl),
2930 le16toh(*txq->hw_cons_ptr),
2931 txq->tx_db.raw,
2932 ecore_chain_get_elem_left(&txq->tx_pbl));
2933
2934 fp->err_tx_free_pkt_null++;
2935
2936 //DEBUG
2937 qlnx_trigger_dump(ha);
2938
2939 return;
2940 } else {
2941 QLNX_INC_OPACKETS((ha->ifp));
2942 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2943
2944 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2945 bus_dmamap_unload(ha->tx_tag, map);
2946
2947 fp->tx_pkts_freed++;
2948 fp->tx_pkts_completed++;
2949
2950 m_freem(mp);
2951 }
2952
2953 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2954 nbds = first_bd->data.nbds;
2955
2956 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2957
2958 for (i = 1; i < nbds; i++) {
2959 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2960 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2961 }
2962 txq->sw_tx_ring[idx].flags = 0;
2963 txq->sw_tx_ring[idx].mp = NULL;
2964 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2965
2966 return;
2967 }
2968
2969 static void
2970 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2971 struct qlnx_tx_queue *txq)
2972 {
2973 u16 hw_bd_cons;
2974 u16 ecore_cons_idx;
2975 uint16_t diff;
2976 uint16_t idx, idx2;
2977
2978 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2979
2980 while (hw_bd_cons !=
2981 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2982 if (hw_bd_cons < ecore_cons_idx) {
2983 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2984 } else {
2985 diff = hw_bd_cons - ecore_cons_idx;
2986 }
2987 if ((diff > TX_RING_SIZE) ||
2988 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2989 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2990
2991 QL_DPRINT1(ha, "(diff = 0x%x) "
2992 " tx_idx = 0x%x"
2993 " ecore_prod_idx = 0x%x"
2994 " ecore_cons_idx = 0x%x"
2995 " hw_bd_cons = 0x%x"
2996 " txq_db_last = 0x%x"
2997 " elem_left = 0x%x\n",
2998 diff,
2999 fp->rss_id,
3000 ecore_chain_get_prod_idx(&txq->tx_pbl),
3001 ecore_chain_get_cons_idx(&txq->tx_pbl),
3002 le16toh(*txq->hw_cons_ptr),
3003 txq->tx_db.raw,
3004 ecore_chain_get_elem_left(&txq->tx_pbl));
3005
3006 fp->err_tx_cons_idx_conflict++;
3007
3008 //DEBUG
3009 qlnx_trigger_dump(ha);
3010 }
3011
3012 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3013 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
3014 prefetch(txq->sw_tx_ring[idx].mp);
3015 prefetch(txq->sw_tx_ring[idx2].mp);
3016
3017 qlnx_free_tx_pkt(ha, fp, txq);
3018
3019 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3020 }
3021 return;
3022 }
3023
3024 static int
3025 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp)
3026 {
3027 int ret = 0;
3028 struct qlnx_tx_queue *txq;
3029 qlnx_host_t * ha;
3030 uint16_t elem_left;
3031
3032 txq = fp->txq[0];
3033 ha = (qlnx_host_t *)fp->edev;
3034
3035 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3036 if(mp != NULL)
3037 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3038 return (ret);
3039 }
3040
3041 if(mp != NULL)
3042 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3043
3044 mp = drbr_peek(ifp, fp->tx_br);
3045
3046 while (mp != NULL) {
3047 if (qlnx_send(ha, fp, &mp)) {
3048 if (mp != NULL) {
3049 drbr_putback(ifp, fp->tx_br, mp);
3050 } else {
3051 fp->tx_pkts_processed++;
3052 drbr_advance(ifp, fp->tx_br);
3053 }
3054 goto qlnx_transmit_locked_exit;
3055
3056 } else {
3057 drbr_advance(ifp, fp->tx_br);
3058 fp->tx_pkts_transmitted++;
3059 fp->tx_pkts_processed++;
3060 }
3061
3062 mp = drbr_peek(ifp, fp->tx_br);
3063 }
3064
3065 qlnx_transmit_locked_exit:
3066 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3067 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3068 < QLNX_TX_ELEM_MAX_THRESH))
3069 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3070
3071 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3072 return ret;
3073 }
3074
3075 static int
3076 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
3077 {
3078 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
3079 struct qlnx_fastpath *fp;
3080 int rss_id = 0, ret = 0;
3081
3082 #ifdef QLNX_TRACEPERF_DATA
3083 uint64_t tx_pkts = 0, tx_compl = 0;
3084 #endif
3085
3086 QL_DPRINT2(ha, "enter\n");
3087
3088 #if __FreeBSD_version >= 1100000
3089 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3090 #else
3091 if (mp->m_flags & M_FLOWID)
3092 #endif
3093 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3094 ha->num_rss;
3095
3096 fp = &ha->fp_array[rss_id];
3097
3098 if (fp->tx_br == NULL) {
3099 ret = EINVAL;
3100 goto qlnx_transmit_exit;
3101 }
3102
3103 if (mtx_trylock(&fp->tx_mtx)) {
3104 #ifdef QLNX_TRACEPERF_DATA
3105 tx_pkts = fp->tx_pkts_transmitted;
3106 tx_compl = fp->tx_pkts_completed;
3107 #endif
3108
3109 ret = qlnx_transmit_locked(ifp, fp, mp);
3110
3111 #ifdef QLNX_TRACEPERF_DATA
3112 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3113 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3114 #endif
3115 mtx_unlock(&fp->tx_mtx);
3116 } else {
3117 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3118 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3119 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3120 }
3121 }
3122
3123 qlnx_transmit_exit:
3124
3125 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3126 return ret;
3127 }
3128
3129 static void
3130 qlnx_qflush(struct ifnet *ifp)
3131 {
3132 int rss_id;
3133 struct qlnx_fastpath *fp;
3134 struct mbuf *mp;
3135 qlnx_host_t *ha;
3136
3137 ha = (qlnx_host_t *)ifp->if_softc;
3138
3139 QL_DPRINT2(ha, "enter\n");
3140
3141 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3142 fp = &ha->fp_array[rss_id];
3143
3144 if (fp == NULL)
3145 continue;
3146
3147 if (fp->tx_br) {
3148 mtx_lock(&fp->tx_mtx);
3149
3150 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3151 fp->tx_pkts_freed++;
3152 m_freem(mp);
3153 }
3154 mtx_unlock(&fp->tx_mtx);
3155 }
3156 }
3157 QL_DPRINT2(ha, "exit\n");
3158
3159 return;
3160 }
3161
3162 static void
3163 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3164 {
3165 uint32_t offset;
3166
3167 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3168
3169 bus_write_4(ha->pci_dbells, offset, value);
3170 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3171 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3172
3173 return;
3174 }
3175
3176 static uint32_t
3177 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3178 {
3179 struct ether_vlan_header *eh = NULL;
3180 struct ip *ip = NULL;
3181 struct ip6_hdr *ip6 = NULL;
3182 struct tcphdr *th = NULL;
3183 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3184 uint16_t etype = 0;
3185 uint8_t buf[sizeof(struct ip6_hdr)];
3186
3187 eh = mtod(mp, struct ether_vlan_header *);
3188
3189 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3190 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3191 etype = ntohs(eh->evl_proto);
3192 } else {
3193 ehdrlen = ETHER_HDR_LEN;
3194 etype = ntohs(eh->evl_encap_proto);
3195 }
3196
3197 switch (etype) {
3198 case ETHERTYPE_IP:
3199 ip = (struct ip *)(mp->m_data + ehdrlen);
3200
3201 ip_hlen = sizeof (struct ip);
3202
3203 if (mp->m_len < (ehdrlen + ip_hlen)) {
3204 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3205 ip = (struct ip *)buf;
3206 }
3207
3208 th = (struct tcphdr *)(ip + 1);
3209 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3210 break;
3211
3212 case ETHERTYPE_IPV6:
3213 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3214
3215 ip_hlen = sizeof(struct ip6_hdr);
3216
3217 if (mp->m_len < (ehdrlen + ip_hlen)) {
3218 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3219 buf);
3220 ip6 = (struct ip6_hdr *)buf;
3221 }
3222 th = (struct tcphdr *)(ip6 + 1);
3223 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3224 break;
3225
3226 default:
3227 break;
3228 }
3229
3230 return (offset);
3231 }
3232
3233 static __inline int
3234 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3235 uint32_t offset)
3236 {
3237 int i;
3238 uint32_t sum, nbds_in_hdr = 1;
3239 uint32_t window;
3240 bus_dma_segment_t *s_seg;
3241
3242 /* If the header spans multiple segments, skip those segments */
3243
3244 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3245 return (0);
3246
3247 i = 0;
3248
3249 while ((i < nsegs) && (offset >= segs->ds_len)) {
3250 offset = offset - segs->ds_len;
3251 segs++;
3252 i++;
3253 nbds_in_hdr++;
3254 }
3255
3256 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3257
3258 nsegs = nsegs - i;
3259
3260 while (nsegs >= window) {
3261 sum = 0;
3262 s_seg = segs;
3263
3264 for (i = 0; i < window; i++){
3265 sum += s_seg->ds_len;
3266 s_seg++;
3267 }
3268
3269 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3270 fp->tx_lso_wnd_min_len++;
3271 return (-1);
3272 }
3273
3274 nsegs = nsegs - 1;
3275 segs++;
3276 }
3277
3278 return (0);
3279 }
3280
3281 static int
3282 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3283 {
3284 bus_dma_segment_t *segs;
3285 bus_dmamap_t map = 0;
3286 uint32_t nsegs = 0;
3287 int ret = -1;
3288 struct mbuf *m_head = *m_headp;
3289 uint16_t idx = 0;
3290 uint16_t elem_left;
3291
3292 uint8_t nbd = 0;
3293 struct qlnx_tx_queue *txq;
3294
3295 struct eth_tx_1st_bd *first_bd;
3296 struct eth_tx_2nd_bd *second_bd;
3297 struct eth_tx_3rd_bd *third_bd;
3298 struct eth_tx_bd *tx_data_bd;
3299
3300 int seg_idx = 0;
3301 uint32_t nbds_in_hdr = 0;
3302 uint32_t offset = 0;
3303
3304 #ifdef QLNX_TRACE_PERF_DATA
3305 uint16_t bd_used;
3306 #endif
3307
3308 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3309
3310 if (!ha->link_up)
3311 return (-1);
3312
3313 first_bd = NULL;
3314 second_bd = NULL;
3315 third_bd = NULL;
3316 tx_data_bd = NULL;
3317
3318 txq = fp->txq[0];
3319
3320 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3321 QLNX_TX_ELEM_MIN_THRESH) {
3322 fp->tx_nsegs_gt_elem_left++;
3323 fp->err_tx_nsegs_gt_elem_left++;
3324
3325 return (ENOBUFS);
3326 }
3327
3328 idx = txq->sw_tx_prod;
3329
3330 map = txq->sw_tx_ring[idx].map;
3331 segs = txq->segs;
3332
3333 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3334 BUS_DMA_NOWAIT);
3335
3336 if (ha->dbg_trace_tso_pkt_len) {
3337 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3338 if (!fp->tx_tso_min_pkt_len) {
3339 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3340 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3341 } else {
3342 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3343 fp->tx_tso_min_pkt_len =
3344 m_head->m_pkthdr.len;
3345 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3346 fp->tx_tso_max_pkt_len =
3347 m_head->m_pkthdr.len;
3348 }
3349 }
3350 }
3351
3352 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3353 offset = qlnx_tcp_offset(ha, m_head);
3354
3355 if ((ret == EFBIG) ||
3356 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3357 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3358 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3359 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3360 struct mbuf *m;
3361
3362 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3363
3364 fp->tx_defrag++;
3365
3366 m = m_defrag(m_head, M_NOWAIT);
3367 if (m == NULL) {
3368 fp->err_tx_defrag++;
3369 fp->tx_pkts_freed++;
3370 m_freem(m_head);
3371 *m_headp = NULL;
3372 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3373 return (ENOBUFS);
3374 }
3375
3376 m_head = m;
3377 *m_headp = m_head;
3378
3379 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3380 segs, &nsegs, BUS_DMA_NOWAIT))) {
3381 fp->err_tx_defrag_dmamap_load++;
3382
3383 QL_DPRINT1(ha,
3384 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3385 ret, m_head->m_pkthdr.len);
3386
3387 fp->tx_pkts_freed++;
3388 m_freem(m_head);
3389 *m_headp = NULL;
3390
3391 return (ret);
3392 }
3393
3394 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3395 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3396 fp->err_tx_non_tso_max_seg++;
3397
3398 QL_DPRINT1(ha,
3399 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3400 ret, nsegs, m_head->m_pkthdr.len);
3401
3402 fp->tx_pkts_freed++;
3403 m_freem(m_head);
3404 *m_headp = NULL;
3405
3406 return (ret);
3407 }
3408 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3409 offset = qlnx_tcp_offset(ha, m_head);
3410
3411 } else if (ret) {
3412 fp->err_tx_dmamap_load++;
3413
3414 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3415 ret, m_head->m_pkthdr.len);
3416 fp->tx_pkts_freed++;
3417 m_freem(m_head);
3418 *m_headp = NULL;
3419 return (ret);
3420 }
3421
3422 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3423
3424 if (ha->dbg_trace_tso_pkt_len) {
3425 if (nsegs < QLNX_FP_MAX_SEGS)
3426 fp->tx_pkts[(nsegs - 1)]++;
3427 else
3428 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3429 }
3430
3431 #ifdef QLNX_TRACE_PERF_DATA
3432 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3433 if(m_head->m_pkthdr.len <= 2048)
3434 fp->tx_pkts_hist[0]++;
3435 else if((m_head->m_pkthdr.len > 2048) &&
3436 (m_head->m_pkthdr.len <= 4096))
3437 fp->tx_pkts_hist[1]++;
3438 else if((m_head->m_pkthdr.len > 4096) &&
3439 (m_head->m_pkthdr.len <= 8192))
3440 fp->tx_pkts_hist[2]++;
3441 else if((m_head->m_pkthdr.len > 8192) &&
3442 (m_head->m_pkthdr.len <= 12288 ))
3443 fp->tx_pkts_hist[3]++;
3444 else if((m_head->m_pkthdr.len > 11288) &&
3445 (m_head->m_pkthdr.len <= 16394))
3446 fp->tx_pkts_hist[4]++;
3447 else if((m_head->m_pkthdr.len > 16384) &&
3448 (m_head->m_pkthdr.len <= 20480))
3449 fp->tx_pkts_hist[5]++;
3450 else if((m_head->m_pkthdr.len > 20480) &&
3451 (m_head->m_pkthdr.len <= 24576))
3452 fp->tx_pkts_hist[6]++;
3453 else if((m_head->m_pkthdr.len > 24576) &&
3454 (m_head->m_pkthdr.len <= 28672))
3455 fp->tx_pkts_hist[7]++;
3456 else if((m_head->m_pkthdr.len > 28762) &&
3457 (m_head->m_pkthdr.len <= 32768))
3458 fp->tx_pkts_hist[8]++;
3459 else if((m_head->m_pkthdr.len > 32768) &&
3460 (m_head->m_pkthdr.len <= 36864))
3461 fp->tx_pkts_hist[9]++;
3462 else if((m_head->m_pkthdr.len > 36864) &&
3463 (m_head->m_pkthdr.len <= 40960))
3464 fp->tx_pkts_hist[10]++;
3465 else if((m_head->m_pkthdr.len > 40960) &&
3466 (m_head->m_pkthdr.len <= 45056))
3467 fp->tx_pkts_hist[11]++;
3468 else if((m_head->m_pkthdr.len > 45056) &&
3469 (m_head->m_pkthdr.len <= 49152))
3470 fp->tx_pkts_hist[12]++;
3471 else if((m_head->m_pkthdr.len > 49512) &&
3472 m_head->m_pkthdr.len <= 53248))
3473 fp->tx_pkts_hist[13]++;
3474 else if((m_head->m_pkthdr.len > 53248) &&
3475 (m_head->m_pkthdr.len <= 57344))
3476 fp->tx_pkts_hist[14]++;
3477 else if((m_head->m_pkthdr.len > 53248) &&
3478 (m_head->m_pkthdr.len <= 57344))
3479 fp->tx_pkts_hist[15]++;
3480 else if((m_head->m_pkthdr.len > 57344) &&
3481 (m_head->m_pkthdr.len <= 61440))
3482 fp->tx_pkts_hist[16]++;
3483 else
3484 fp->tx_pkts_hist[17]++;
3485 }
3486
3487 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3488 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3489 bd_used = TX_RING_SIZE - elem_left;
3490
3491 if(bd_used <= 100)
3492 fp->tx_pkts_q[0]++;
3493 else if((bd_used > 100) && (bd_used <= 500))
3494 fp->tx_pkts_q[1]++;
3495 else if((bd_used > 500) && (bd_used <= 1000))
3496 fp->tx_pkts_q[2]++;
3497 else if((bd_used > 1000) && (bd_used <= 2000))
3498 fp->tx_pkts_q[3]++;
3499 else if((bd_used > 3000) && (bd_used <= 4000))
3500 fp->tx_pkts_q[4]++;
3501 else if((bd_used > 4000) && (bd_used <= 5000))
3502 fp->tx_pkts_q[5]++;
3503 else if((bd_used > 6000) && (bd_used <= 7000))
3504 fp->tx_pkts_q[6]++;
3505 else if((bd_used > 7000) && (bd_used <= 8000))
3506 fp->tx_pkts_q[7]++;
3507 else if((bd_used > 8000) && (bd_used <= 9000))
3508 fp->tx_pkts_q[8]++;
3509 else if((bd_used > 9000) && (bd_used <= 10000))
3510 fp->tx_pkts_q[9]++;
3511 else if((bd_used > 10000) && (bd_used <= 11000))
3512 fp->tx_pkts_q[10]++;
3513 else if((bd_used > 11000) && (bd_used <= 12000))
3514 fp->tx_pkts_q[11]++;
3515 else if((bd_used > 12000) && (bd_used <= 13000))
3516 fp->tx_pkts_q[12]++;
3517 else if((bd_used > 13000) && (bd_used <= 14000))
3518 fp->tx_pkts_q[13]++;
3519 else if((bd_used > 14000) && (bd_used <= 15000))
3520 fp->tx_pkts_q[14]++;
3521 else if((bd_used > 15000) && (bd_used <= 16000))
3522 fp->tx_pkts_q[15]++;
3523 else
3524 fp->tx_pkts_q[16]++;
3525 }
3526
3527 #endif /* end of QLNX_TRACE_PERF_DATA */
3528
3529 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3530 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3531 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3532 " in chain[%d] trying to free packets\n",
3533 nsegs, elem_left, fp->rss_id);
3534
3535 fp->tx_nsegs_gt_elem_left++;
3536
3537 (void)qlnx_tx_int(ha, fp, txq);
3538
3539 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3540 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3541 QL_DPRINT1(ha,
3542 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3543 nsegs, elem_left, fp->rss_id);
3544
3545 fp->err_tx_nsegs_gt_elem_left++;
3546 fp->tx_ring_full = 1;
3547 if (ha->storm_stats_enable)
3548 ha->storm_stats_gather = 1;
3549 return (ENOBUFS);
3550 }
3551 }
3552
3553 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3554
3555 txq->sw_tx_ring[idx].mp = m_head;
3556
3557 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3558
3559 memset(first_bd, 0, sizeof(*first_bd));
3560
3561 first_bd->data.bd_flags.bitfields =
3562 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3563
3564 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3565
3566 nbd++;
3567
3568 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3569 first_bd->data.bd_flags.bitfields |=
3570 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3571 }
3572
3573 if (m_head->m_pkthdr.csum_flags &
3574 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3575 first_bd->data.bd_flags.bitfields |=
3576 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3577 }
3578
3579 if (m_head->m_flags & M_VLANTAG) {
3580 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3581 first_bd->data.bd_flags.bitfields |=
3582 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3583 }
3584
3585 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3586 first_bd->data.bd_flags.bitfields |=
3587 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3588 first_bd->data.bd_flags.bitfields |=
3589 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3590
3591 nbds_in_hdr = 1;
3592
3593 if (offset == segs->ds_len) {
3594 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3595 segs++;
3596 seg_idx++;
3597
3598 second_bd = (struct eth_tx_2nd_bd *)
3599 ecore_chain_produce(&txq->tx_pbl);
3600 memset(second_bd, 0, sizeof(*second_bd));
3601 nbd++;
3602
3603 if (seg_idx < nsegs) {
3604 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3605 (segs->ds_addr), (segs->ds_len));
3606 segs++;
3607 seg_idx++;
3608 }
3609
3610 third_bd = (struct eth_tx_3rd_bd *)
3611 ecore_chain_produce(&txq->tx_pbl);
3612 memset(third_bd, 0, sizeof(*third_bd));
3613 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3614 third_bd->data.bitfields |=
3615 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3616 nbd++;
3617
3618 if (seg_idx < nsegs) {
3619 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3620 (segs->ds_addr), (segs->ds_len));
3621 segs++;
3622 seg_idx++;
3623 }
3624
3625 for (; seg_idx < nsegs; seg_idx++) {
3626 tx_data_bd = (struct eth_tx_bd *)
3627 ecore_chain_produce(&txq->tx_pbl);
3628 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3629 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3630 segs->ds_addr,\
3631 segs->ds_len);
3632 segs++;
3633 nbd++;
3634 }
3635
3636 } else if (offset < segs->ds_len) {
3637 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3638
3639 second_bd = (struct eth_tx_2nd_bd *)
3640 ecore_chain_produce(&txq->tx_pbl);
3641 memset(second_bd, 0, sizeof(*second_bd));
3642 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3643 (segs->ds_addr + offset),\
3644 (segs->ds_len - offset));
3645 nbd++;
3646 segs++;
3647
3648 third_bd = (struct eth_tx_3rd_bd *)
3649 ecore_chain_produce(&txq->tx_pbl);
3650 memset(third_bd, 0, sizeof(*third_bd));
3651
3652 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3653 segs->ds_addr,\
3654 segs->ds_len);
3655 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3656 third_bd->data.bitfields |=
3657 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3658 segs++;
3659 nbd++;
3660
3661 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3662 tx_data_bd = (struct eth_tx_bd *)
3663 ecore_chain_produce(&txq->tx_pbl);
3664 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3665 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3666 segs->ds_addr,\
3667 segs->ds_len);
3668 segs++;
3669 nbd++;
3670 }
3671
3672 } else {
3673 offset = offset - segs->ds_len;
3674 segs++;
3675
3676 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3677 if (offset)
3678 nbds_in_hdr++;
3679
3680 tx_data_bd = (struct eth_tx_bd *)
3681 ecore_chain_produce(&txq->tx_pbl);
3682 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3683
3684 if (second_bd == NULL) {
3685 second_bd = (struct eth_tx_2nd_bd *)
3686 tx_data_bd;
3687 } else if (third_bd == NULL) {
3688 third_bd = (struct eth_tx_3rd_bd *)
3689 tx_data_bd;
3690 }
3691
3692 if (offset && (offset < segs->ds_len)) {
3693 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3694 segs->ds_addr, offset);
3695
3696 tx_data_bd = (struct eth_tx_bd *)
3697 ecore_chain_produce(&txq->tx_pbl);
3698
3699 memset(tx_data_bd, 0,
3700 sizeof(*tx_data_bd));
3701
3702 if (second_bd == NULL) {
3703 second_bd =
3704 (struct eth_tx_2nd_bd *)tx_data_bd;
3705 } else if (third_bd == NULL) {
3706 third_bd =
3707 (struct eth_tx_3rd_bd *)tx_data_bd;
3708 }
3709 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3710 (segs->ds_addr + offset), \
3711 (segs->ds_len - offset));
3712 nbd++;
3713 offset = 0;
3714 } else {
3715 if (offset)
3716 offset = offset - segs->ds_len;
3717 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3718 segs->ds_addr, segs->ds_len);
3719 }
3720 segs++;
3721 nbd++;
3722 }
3723
3724 if (third_bd == NULL) {
3725 third_bd = (struct eth_tx_3rd_bd *)
3726 ecore_chain_produce(&txq->tx_pbl);
3727 memset(third_bd, 0, sizeof(*third_bd));
3728 }
3729
3730 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3731 third_bd->data.bitfields |=
3732 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3733 }
3734 fp->tx_tso_pkts++;
3735 } else {
3736 segs++;
3737 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3738 tx_data_bd = (struct eth_tx_bd *)
3739 ecore_chain_produce(&txq->tx_pbl);
3740 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3741 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3742 segs->ds_len);
3743 segs++;
3744 nbd++;
3745 }
3746 first_bd->data.bitfields =
3747 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3748 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3749 first_bd->data.bitfields =
3750 htole16(first_bd->data.bitfields);
3751 fp->tx_non_tso_pkts++;
3752 }
3753
3754 first_bd->data.nbds = nbd;
3755
3756 if (ha->dbg_trace_tso_pkt_len) {
3757 if (fp->tx_tso_max_nsegs < nsegs)
3758 fp->tx_tso_max_nsegs = nsegs;
3759
3760 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3761 fp->tx_tso_min_nsegs = nsegs;
3762 }
3763
3764 txq->sw_tx_ring[idx].nsegs = nsegs;
3765 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3766
3767 txq->tx_db.data.bd_prod =
3768 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3769
3770 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3771
3772 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3773 return (0);
3774 }
3775
3776 static void
3777 qlnx_stop(qlnx_host_t *ha)
3778 {
3779 struct ifnet *ifp = ha->ifp;
3780 int i;
3781
3782 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3783
3784 /*
3785 * We simply lock and unlock each fp->tx_mtx to
3786 * propagate the if_drv_flags
3787 * state to each tx thread
3788 */
3789 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3790
3791 if (ha->state == QLNX_STATE_OPEN) {
3792 for (i = 0; i < ha->num_rss; i++) {
3793 struct qlnx_fastpath *fp = &ha->fp_array[i];
3794
3795 mtx_lock(&fp->tx_mtx);
3796 mtx_unlock(&fp->tx_mtx);
3797
3798 if (fp->fp_taskqueue != NULL)
3799 taskqueue_enqueue(fp->fp_taskqueue,
3800 &fp->fp_task);
3801 }
3802 }
3803 #ifdef QLNX_ENABLE_IWARP
3804 if (qlnx_vf_device(ha) != 0) {
3805 qlnx_rdma_dev_close(ha);
3806 }
3807 #endif /* #ifdef QLNX_ENABLE_IWARP */
3808
3809 qlnx_unload(ha);
3810
3811 return;
3812 }
3813
3814 static int
3815 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3816 {
3817 return(TX_RING_SIZE - 1);
3818 }
3819
3820 uint8_t *
3821 qlnx_get_mac_addr(qlnx_host_t *ha)
3822 {
3823 struct ecore_hwfn *p_hwfn;
3824 unsigned char mac[ETHER_ADDR_LEN];
3825 uint8_t p_is_forced;
3826
3827 p_hwfn = &ha->cdev.hwfns[0];
3828
3829 if (qlnx_vf_device(ha) != 0)
3830 return (p_hwfn->hw_info.hw_mac_addr);
3831
3832 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3833 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3834 true) {
3835 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3836 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3837 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3838 memcpy(ha->primary_mac, mac, ETH_ALEN);
3839 }
3840
3841 return (ha->primary_mac);
3842 }
3843
3844 static uint32_t
3845 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3846 {
3847 uint32_t ifm_type = 0;
3848
3849 switch (if_link->media_type) {
3850 case MEDIA_MODULE_FIBER:
3851 case MEDIA_UNSPECIFIED:
3852 if (if_link->speed == (100 * 1000))
3853 ifm_type = QLNX_IFM_100G_SR4;
3854 else if (if_link->speed == (40 * 1000))
3855 ifm_type = IFM_40G_SR4;
3856 else if (if_link->speed == (25 * 1000))
3857 ifm_type = QLNX_IFM_25G_SR;
3858 else if (if_link->speed == (10 * 1000))
3859 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3860 else if (if_link->speed == (1 * 1000))
3861 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3862
3863 break;
3864
3865 case MEDIA_DA_TWINAX:
3866 if (if_link->speed == (100 * 1000))
3867 ifm_type = QLNX_IFM_100G_CR4;
3868 else if (if_link->speed == (40 * 1000))
3869 ifm_type = IFM_40G_CR4;
3870 else if (if_link->speed == (25 * 1000))
3871 ifm_type = QLNX_IFM_25G_CR;
3872 else if (if_link->speed == (10 * 1000))
3873 ifm_type = IFM_10G_TWINAX;
3874
3875 break;
3876
3877 default :
3878 ifm_type = IFM_UNKNOWN;
3879 break;
3880 }
3881 return (ifm_type);
3882 }
3883
3884 /*****************************************************************************
3885 * Interrupt Service Functions
3886 *****************************************************************************/
3887
3888 static int
3889 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3890 struct mbuf *mp_head, uint16_t len)
3891 {
3892 struct mbuf *mp, *mpf, *mpl;
3893 struct sw_rx_data *sw_rx_data;
3894 struct qlnx_rx_queue *rxq;
3895 uint16_t len_in_buffer;
3896
3897 rxq = fp->rxq;
3898 mpf = mpl = mp = NULL;
3899
3900 while (len) {
3901 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3902
3903 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3904 mp = sw_rx_data->data;
3905
3906 if (mp == NULL) {
3907 QL_DPRINT1(ha, "mp = NULL\n");
3908 fp->err_rx_mp_null++;
3909 rxq->sw_rx_cons =
3910 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3911
3912 if (mpf != NULL)
3913 m_freem(mpf);
3914
3915 return (-1);
3916 }
3917 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3918 BUS_DMASYNC_POSTREAD);
3919
3920 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3921 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3922 " incoming packet and reusing its buffer\n");
3923
3924 qlnx_reuse_rx_data(rxq);
3925 fp->err_rx_alloc_errors++;
3926
3927 if (mpf != NULL)
3928 m_freem(mpf);
3929
3930 return (-1);
3931 }
3932 ecore_chain_consume(&rxq->rx_bd_ring);
3933
3934 if (len > rxq->rx_buf_size)
3935 len_in_buffer = rxq->rx_buf_size;
3936 else
3937 len_in_buffer = len;
3938
3939 len = len - len_in_buffer;
3940
3941 mp->m_flags &= ~M_PKTHDR;
3942 mp->m_next = NULL;
3943 mp->m_len = len_in_buffer;
3944
3945 if (mpf == NULL)
3946 mpf = mpl = mp;
3947 else {
3948 mpl->m_next = mp;
3949 mpl = mp;
3950 }
3951 }
3952
3953 if (mpf != NULL)
3954 mp_head->m_next = mpf;
3955
3956 return (0);
3957 }
3958
3959 static void
3960 qlnx_tpa_start(qlnx_host_t *ha,
3961 struct qlnx_fastpath *fp,
3962 struct qlnx_rx_queue *rxq,
3963 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3964 {
3965 uint32_t agg_index;
3966 struct ifnet *ifp = ha->ifp;
3967 struct mbuf *mp;
3968 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3969 struct sw_rx_data *sw_rx_data;
3970 dma_addr_t addr;
3971 bus_dmamap_t map;
3972 struct eth_rx_bd *rx_bd;
3973 int i;
3974 #if __FreeBSD_version >= 1100000
3975 uint8_t hash_type;
3976 #endif /* #if __FreeBSD_version >= 1100000 */
3977
3978 agg_index = cqe->tpa_agg_index;
3979
3980 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3981 \t type = 0x%x\n \
3982 \t bitfields = 0x%x\n \
3983 \t seg_len = 0x%x\n \
3984 \t pars_flags = 0x%x\n \
3985 \t vlan_tag = 0x%x\n \
3986 \t rss_hash = 0x%x\n \
3987 \t len_on_first_bd = 0x%x\n \
3988 \t placement_offset = 0x%x\n \
3989 \t tpa_agg_index = 0x%x\n \
3990 \t header_len = 0x%x\n \
3991 \t ext_bd_len_list[0] = 0x%x\n \
3992 \t ext_bd_len_list[1] = 0x%x\n \
3993 \t ext_bd_len_list[2] = 0x%x\n \
3994 \t ext_bd_len_list[3] = 0x%x\n \
3995 \t ext_bd_len_list[4] = 0x%x\n",
3996 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3997 cqe->pars_flags.flags, cqe->vlan_tag,
3998 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3999 cqe->tpa_agg_index, cqe->header_len,
4000 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
4001 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
4002 cqe->ext_bd_len_list[4]);
4003
4004 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4005 fp->err_rx_tpa_invalid_agg_num++;
4006 return;
4007 }
4008
4009 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4010 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
4011 mp = sw_rx_data->data;
4012
4013 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
4014
4015 if (mp == NULL) {
4016 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
4017 fp->err_rx_mp_null++;
4018 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4019
4020 return;
4021 }
4022
4023 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
4024 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
4025 " flags = %x, dropping incoming packet\n", fp->rss_id,
4026 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
4027
4028 fp->err_rx_hw_errors++;
4029
4030 qlnx_reuse_rx_data(rxq);
4031
4032 QLNX_INC_IERRORS(ifp);
4033
4034 return;
4035 }
4036
4037 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4038 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4039 " dropping incoming packet and reusing its buffer\n",
4040 fp->rss_id);
4041
4042 fp->err_rx_alloc_errors++;
4043 QLNX_INC_IQDROPS(ifp);
4044
4045 /*
4046 * Load the tpa mbuf into the rx ring and save the
4047 * posted mbuf
4048 */
4049
4050 map = sw_rx_data->map;
4051 addr = sw_rx_data->dma_addr;
4052
4053 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4054
4055 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4056 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4057 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4058
4059 rxq->tpa_info[agg_index].rx_buf.data = mp;
4060 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4061 rxq->tpa_info[agg_index].rx_buf.map = map;
4062
4063 rx_bd = (struct eth_rx_bd *)
4064 ecore_chain_produce(&rxq->rx_bd_ring);
4065
4066 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4067 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4068
4069 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4070 BUS_DMASYNC_PREREAD);
4071
4072 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4073 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4074
4075 ecore_chain_consume(&rxq->rx_bd_ring);
4076
4077 /* Now reuse any buffers posted in ext_bd_len_list */
4078 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4079 if (cqe->ext_bd_len_list[i] == 0)
4080 break;
4081
4082 qlnx_reuse_rx_data(rxq);
4083 }
4084
4085 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4086 return;
4087 }
4088
4089 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4090 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4091 " dropping incoming packet and reusing its buffer\n",
4092 fp->rss_id);
4093
4094 QLNX_INC_IQDROPS(ifp);
4095
4096 /* if we already have mbuf head in aggregation free it */
4097 if (rxq->tpa_info[agg_index].mpf) {
4098 m_freem(rxq->tpa_info[agg_index].mpf);
4099 rxq->tpa_info[agg_index].mpl = NULL;
4100 }
4101 rxq->tpa_info[agg_index].mpf = mp;
4102 rxq->tpa_info[agg_index].mpl = NULL;
4103
4104 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4105 ecore_chain_consume(&rxq->rx_bd_ring);
4106
4107 /* Now reuse any buffers posted in ext_bd_len_list */
4108 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4109 if (cqe->ext_bd_len_list[i] == 0)
4110 break;
4111
4112 qlnx_reuse_rx_data(rxq);
4113 }
4114 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4115
4116 return;
4117 }
4118
4119 /*
4120 * first process the ext_bd_len_list
4121 * if this fails then we simply drop the packet
4122 */
4123 ecore_chain_consume(&rxq->rx_bd_ring);
4124 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4125
4126 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4127 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4128
4129 if (cqe->ext_bd_len_list[i] == 0)
4130 break;
4131
4132 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4133 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4134 BUS_DMASYNC_POSTREAD);
4135
4136 mpc = sw_rx_data->data;
4137
4138 if (mpc == NULL) {
4139 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4140 fp->err_rx_mp_null++;
4141 if (mpf != NULL)
4142 m_freem(mpf);
4143 mpf = mpl = NULL;
4144 rxq->tpa_info[agg_index].agg_state =
4145 QLNX_AGG_STATE_ERROR;
4146 ecore_chain_consume(&rxq->rx_bd_ring);
4147 rxq->sw_rx_cons =
4148 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4149 continue;
4150 }
4151
4152 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4153 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4154 " dropping incoming packet and reusing its"
4155 " buffer\n", fp->rss_id);
4156
4157 qlnx_reuse_rx_data(rxq);
4158
4159 if (mpf != NULL)
4160 m_freem(mpf);
4161 mpf = mpl = NULL;
4162
4163 rxq->tpa_info[agg_index].agg_state =
4164 QLNX_AGG_STATE_ERROR;
4165
4166 ecore_chain_consume(&rxq->rx_bd_ring);
4167 rxq->sw_rx_cons =
4168 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4169
4170 continue;
4171 }
4172
4173 mpc->m_flags &= ~M_PKTHDR;
4174 mpc->m_next = NULL;
4175 mpc->m_len = cqe->ext_bd_len_list[i];
4176
4177 if (mpf == NULL) {
4178 mpf = mpl = mpc;
4179 } else {
4180 mpl->m_len = ha->rx_buf_size;
4181 mpl->m_next = mpc;
4182 mpl = mpc;
4183 }
4184
4185 ecore_chain_consume(&rxq->rx_bd_ring);
4186 rxq->sw_rx_cons =
4187 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4188 }
4189
4190 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4191 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4192 " incoming packet and reusing its buffer\n",
4193 fp->rss_id);
4194
4195 QLNX_INC_IQDROPS(ifp);
4196
4197 rxq->tpa_info[agg_index].mpf = mp;
4198 rxq->tpa_info[agg_index].mpl = NULL;
4199
4200 return;
4201 }
4202
4203 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4204
4205 if (mpf != NULL) {
4206 mp->m_len = ha->rx_buf_size;
4207 mp->m_next = mpf;
4208 rxq->tpa_info[agg_index].mpf = mp;
4209 rxq->tpa_info[agg_index].mpl = mpl;
4210 } else {
4211 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4212 rxq->tpa_info[agg_index].mpf = mp;
4213 rxq->tpa_info[agg_index].mpl = mp;
4214 mp->m_next = NULL;
4215 }
4216
4217 mp->m_flags |= M_PKTHDR;
4218
4219 /* assign packet to this interface interface */
4220 mp->m_pkthdr.rcvif = ifp;
4221
4222 /* assume no hardware checksum has complated */
4223 mp->m_pkthdr.csum_flags = 0;
4224
4225 //mp->m_pkthdr.flowid = fp->rss_id;
4226 mp->m_pkthdr.flowid = cqe->rss_hash;
4227
4228 #if __FreeBSD_version >= 1100000
4229
4230 hash_type = cqe->bitfields &
4231 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4232 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4233
4234 switch (hash_type) {
4235 case RSS_HASH_TYPE_IPV4:
4236 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4237 break;
4238
4239 case RSS_HASH_TYPE_TCP_IPV4:
4240 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4241 break;
4242
4243 case RSS_HASH_TYPE_IPV6:
4244 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4245 break;
4246
4247 case RSS_HASH_TYPE_TCP_IPV6:
4248 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4249 break;
4250
4251 default:
4252 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4253 break;
4254 }
4255
4256 #else
4257 mp->m_flags |= M_FLOWID;
4258 #endif
4259
4260 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4261 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4262
4263 mp->m_pkthdr.csum_data = 0xFFFF;
4264
4265 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4266 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4267 mp->m_flags |= M_VLANTAG;
4268 }
4269
4270 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4271
4272 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4273 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4274 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4275
4276 return;
4277 }
4278
4279 static void
4280 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4281 struct qlnx_rx_queue *rxq,
4282 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4283 {
4284 struct sw_rx_data *sw_rx_data;
4285 int i;
4286 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4287 struct mbuf *mp;
4288 uint32_t agg_index;
4289
4290 QL_DPRINT7(ha, "[%d]: enter\n \
4291 \t type = 0x%x\n \
4292 \t tpa_agg_index = 0x%x\n \
4293 \t len_list[0] = 0x%x\n \
4294 \t len_list[1] = 0x%x\n \
4295 \t len_list[2] = 0x%x\n \
4296 \t len_list[3] = 0x%x\n \
4297 \t len_list[4] = 0x%x\n \
4298 \t len_list[5] = 0x%x\n",
4299 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4300 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4301 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4302
4303 agg_index = cqe->tpa_agg_index;
4304
4305 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4306 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4307 fp->err_rx_tpa_invalid_agg_num++;
4308 return;
4309 }
4310
4311 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4312 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4313
4314 if (cqe->len_list[i] == 0)
4315 break;
4316
4317 if (rxq->tpa_info[agg_index].agg_state !=
4318 QLNX_AGG_STATE_START) {
4319 qlnx_reuse_rx_data(rxq);
4320 continue;
4321 }
4322
4323 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4324 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4325 BUS_DMASYNC_POSTREAD);
4326
4327 mpc = sw_rx_data->data;
4328
4329 if (mpc == NULL) {
4330 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4331
4332 fp->err_rx_mp_null++;
4333 if (mpf != NULL)
4334 m_freem(mpf);
4335 mpf = mpl = NULL;
4336 rxq->tpa_info[agg_index].agg_state =
4337 QLNX_AGG_STATE_ERROR;
4338 ecore_chain_consume(&rxq->rx_bd_ring);
4339 rxq->sw_rx_cons =
4340 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4341 continue;
4342 }
4343
4344 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4345 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4346 " dropping incoming packet and reusing its"
4347 " buffer\n", fp->rss_id);
4348
4349 qlnx_reuse_rx_data(rxq);
4350
4351 if (mpf != NULL)
4352 m_freem(mpf);
4353 mpf = mpl = NULL;
4354
4355 rxq->tpa_info[agg_index].agg_state =
4356 QLNX_AGG_STATE_ERROR;
4357
4358 ecore_chain_consume(&rxq->rx_bd_ring);
4359 rxq->sw_rx_cons =
4360 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4361
4362 continue;
4363 }
4364
4365 mpc->m_flags &= ~M_PKTHDR;
4366 mpc->m_next = NULL;
4367 mpc->m_len = cqe->len_list[i];
4368
4369 if (mpf == NULL) {
4370 mpf = mpl = mpc;
4371 } else {
4372 mpl->m_len = ha->rx_buf_size;
4373 mpl->m_next = mpc;
4374 mpl = mpc;
4375 }
4376
4377 ecore_chain_consume(&rxq->rx_bd_ring);
4378 rxq->sw_rx_cons =
4379 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4380 }
4381
4382 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4383 fp->rss_id, mpf, mpl);
4384
4385 if (mpf != NULL) {
4386 mp = rxq->tpa_info[agg_index].mpl;
4387 mp->m_len = ha->rx_buf_size;
4388 mp->m_next = mpf;
4389 rxq->tpa_info[agg_index].mpl = mpl;
4390 }
4391
4392 return;
4393 }
4394
4395 static int
4396 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4397 struct qlnx_rx_queue *rxq,
4398 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4399 {
4400 struct sw_rx_data *sw_rx_data;
4401 int i;
4402 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4403 struct mbuf *mp;
4404 uint32_t agg_index;
4405 uint32_t len = 0;
4406 struct ifnet *ifp = ha->ifp;
4407
4408 QL_DPRINT7(ha, "[%d]: enter\n \
4409 \t type = 0x%x\n \
4410 \t tpa_agg_index = 0x%x\n \
4411 \t total_packet_len = 0x%x\n \
4412 \t num_of_bds = 0x%x\n \
4413 \t end_reason = 0x%x\n \
4414 \t num_of_coalesced_segs = 0x%x\n \
4415 \t ts_delta = 0x%x\n \
4416 \t len_list[0] = 0x%x\n \
4417 \t len_list[1] = 0x%x\n \
4418 \t len_list[2] = 0x%x\n \
4419 \t len_list[3] = 0x%x\n",
4420 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4421 cqe->total_packet_len, cqe->num_of_bds,
4422 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4423 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4424 cqe->len_list[3]);
4425
4426 agg_index = cqe->tpa_agg_index;
4427
4428 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4429 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4430
4431 fp->err_rx_tpa_invalid_agg_num++;
4432 return (0);
4433 }
4434
4435 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4436 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4437
4438 if (cqe->len_list[i] == 0)
4439 break;
4440
4441 if (rxq->tpa_info[agg_index].agg_state !=
4442 QLNX_AGG_STATE_START) {
4443 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4444
4445 qlnx_reuse_rx_data(rxq);
4446 continue;
4447 }
4448
4449 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4450 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4451 BUS_DMASYNC_POSTREAD);
4452
4453 mpc = sw_rx_data->data;
4454
4455 if (mpc == NULL) {
4456 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4457
4458 fp->err_rx_mp_null++;
4459 if (mpf != NULL)
4460 m_freem(mpf);
4461 mpf = mpl = NULL;
4462 rxq->tpa_info[agg_index].agg_state =
4463 QLNX_AGG_STATE_ERROR;
4464 ecore_chain_consume(&rxq->rx_bd_ring);
4465 rxq->sw_rx_cons =
4466 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4467 continue;
4468 }
4469
4470 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4471 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4472 " dropping incoming packet and reusing its"
4473 " buffer\n", fp->rss_id);
4474
4475 qlnx_reuse_rx_data(rxq);
4476
4477 if (mpf != NULL)
4478 m_freem(mpf);
4479 mpf = mpl = NULL;
4480
4481 rxq->tpa_info[agg_index].agg_state =
4482 QLNX_AGG_STATE_ERROR;
4483
4484 ecore_chain_consume(&rxq->rx_bd_ring);
4485 rxq->sw_rx_cons =
4486 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4487
4488 continue;
4489 }
4490
4491 mpc->m_flags &= ~M_PKTHDR;
4492 mpc->m_next = NULL;
4493 mpc->m_len = cqe->len_list[i];
4494
4495 if (mpf == NULL) {
4496 mpf = mpl = mpc;
4497 } else {
4498 mpl->m_len = ha->rx_buf_size;
4499 mpl->m_next = mpc;
4500 mpl = mpc;
4501 }
4502
4503 ecore_chain_consume(&rxq->rx_bd_ring);
4504 rxq->sw_rx_cons =
4505 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4506 }
4507
4508 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4509
4510 if (mpf != NULL) {
4511 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4512
4513 mp = rxq->tpa_info[agg_index].mpl;
4514 mp->m_len = ha->rx_buf_size;
4515 mp->m_next = mpf;
4516 }
4517
4518 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4519 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4520
4521 if (rxq->tpa_info[agg_index].mpf != NULL)
4522 m_freem(rxq->tpa_info[agg_index].mpf);
4523 rxq->tpa_info[agg_index].mpf = NULL;
4524 rxq->tpa_info[agg_index].mpl = NULL;
4525 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4526 return (0);
4527 }
4528
4529 mp = rxq->tpa_info[agg_index].mpf;
4530 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4531 mp->m_pkthdr.len = cqe->total_packet_len;
4532
4533 if (mp->m_next == NULL)
4534 mp->m_len = mp->m_pkthdr.len;
4535 else {
4536 /* compute the total packet length */
4537 mpf = mp;
4538 while (mpf != NULL) {
4539 len += mpf->m_len;
4540 mpf = mpf->m_next;
4541 }
4542
4543 if (cqe->total_packet_len > len) {
4544 mpl = rxq->tpa_info[agg_index].mpl;
4545 mpl->m_len += (cqe->total_packet_len - len);
4546 }
4547 }
4548
4549 QLNX_INC_IPACKETS(ifp);
4550 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4551
4552 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4553 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4554 fp->rss_id, mp->m_pkthdr.csum_data,
4555 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4556
4557 (*ifp->if_input)(ifp, mp);
4558
4559 rxq->tpa_info[agg_index].mpf = NULL;
4560 rxq->tpa_info[agg_index].mpl = NULL;
4561 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4562
4563 return (cqe->num_of_coalesced_segs);
4564 }
4565
4566 static int
4567 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4568 int lro_enable)
4569 {
4570 uint16_t hw_comp_cons, sw_comp_cons;
4571 int rx_pkt = 0;
4572 struct qlnx_rx_queue *rxq = fp->rxq;
4573 struct ifnet *ifp = ha->ifp;
4574 struct ecore_dev *cdev = &ha->cdev;
4575 struct ecore_hwfn *p_hwfn;
4576
4577 #ifdef QLNX_SOFT_LRO
4578 struct lro_ctrl *lro;
4579
4580 lro = &rxq->lro;
4581 #endif /* #ifdef QLNX_SOFT_LRO */
4582
4583 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4584 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4585
4586 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4587
4588 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4589 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4590 * read before it is written by FW, then FW writes CQE and SB, and then
4591 * the CPU reads the hw_comp_cons, it will use an old CQE.
4592 */
4593
4594 /* Loop to complete all indicated BDs */
4595 while (sw_comp_cons != hw_comp_cons) {
4596 union eth_rx_cqe *cqe;
4597 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4598 struct sw_rx_data *sw_rx_data;
4599 register struct mbuf *mp;
4600 enum eth_rx_cqe_type cqe_type;
4601 uint16_t len, pad, len_on_first_bd;
4602 uint8_t *data;
4603 #if __FreeBSD_version >= 1100000
4604 uint8_t hash_type;
4605 #endif /* #if __FreeBSD_version >= 1100000 */
4606
4607 /* Get the CQE from the completion ring */
4608 cqe = (union eth_rx_cqe *)
4609 ecore_chain_consume(&rxq->rx_comp_ring);
4610 cqe_type = cqe->fast_path_regular.type;
4611
4612 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4613 QL_DPRINT3(ha, "Got a slowath CQE\n");
4614
4615 ecore_eth_cqe_completion(p_hwfn,
4616 (struct eth_slow_path_rx_cqe *)cqe);
4617 goto next_cqe;
4618 }
4619
4620 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4621 switch (cqe_type) {
4622 case ETH_RX_CQE_TYPE_TPA_START:
4623 qlnx_tpa_start(ha, fp, rxq,
4624 &cqe->fast_path_tpa_start);
4625 fp->tpa_start++;
4626 break;
4627
4628 case ETH_RX_CQE_TYPE_TPA_CONT:
4629 qlnx_tpa_cont(ha, fp, rxq,
4630 &cqe->fast_path_tpa_cont);
4631 fp->tpa_cont++;
4632 break;
4633
4634 case ETH_RX_CQE_TYPE_TPA_END:
4635 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4636 &cqe->fast_path_tpa_end);
4637 fp->tpa_end++;
4638 break;
4639
4640 default:
4641 break;
4642 }
4643
4644 goto next_cqe;
4645 }
4646
4647 /* Get the data from the SW ring */
4648 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4649 mp = sw_rx_data->data;
4650
4651 if (mp == NULL) {
4652 QL_DPRINT1(ha, "mp = NULL\n");
4653 fp->err_rx_mp_null++;
4654 rxq->sw_rx_cons =
4655 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4656 goto next_cqe;
4657 }
4658 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4659 BUS_DMASYNC_POSTREAD);
4660
4661 /* non GRO */
4662 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4663 len = le16toh(fp_cqe->pkt_len);
4664 pad = fp_cqe->placement_offset;
4665 #if 0
4666 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4667 " len %u, parsing flags = %d pad = %d\n",
4668 cqe_type, fp_cqe->bitfields,
4669 le16toh(fp_cqe->vlan_tag),
4670 len, le16toh(fp_cqe->pars_flags.flags), pad);
4671 #endif
4672 data = mtod(mp, uint8_t *);
4673 data = data + pad;
4674
4675 if (0)
4676 qlnx_dump_buf8(ha, __func__, data, len);
4677
4678 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4679 * is always with a fixed size. If allocation fails, we take the
4680 * consumed BD and return it to the ring in the PROD position.
4681 * The packet that was received on that BD will be dropped (and
4682 * not passed to the upper stack).
4683 */
4684 /* If this is an error packet then drop it */
4685 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4686 CQE_FLAGS_ERR) {
4687 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4688 " dropping incoming packet\n", sw_comp_cons,
4689 le16toh(cqe->fast_path_regular.pars_flags.flags));
4690 fp->err_rx_hw_errors++;
4691
4692 qlnx_reuse_rx_data(rxq);
4693
4694 QLNX_INC_IERRORS(ifp);
4695
4696 goto next_cqe;
4697 }
4698
4699 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4700 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4701 " incoming packet and reusing its buffer\n");
4702 qlnx_reuse_rx_data(rxq);
4703
4704 fp->err_rx_alloc_errors++;
4705
4706 QLNX_INC_IQDROPS(ifp);
4707
4708 goto next_cqe;
4709 }
4710
4711 ecore_chain_consume(&rxq->rx_bd_ring);
4712
4713 len_on_first_bd = fp_cqe->len_on_first_bd;
4714 m_adj(mp, pad);
4715 mp->m_pkthdr.len = len;
4716
4717 if ((len > 60 ) && (len > len_on_first_bd)) {
4718 mp->m_len = len_on_first_bd;
4719
4720 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4721 (len - len_on_first_bd)) != 0) {
4722 m_freem(mp);
4723
4724 QLNX_INC_IQDROPS(ifp);
4725
4726 goto next_cqe;
4727 }
4728
4729 } else if (len_on_first_bd < len) {
4730 fp->err_rx_jumbo_chain_pkts++;
4731 } else {
4732 mp->m_len = len;
4733 }
4734
4735 mp->m_flags |= M_PKTHDR;
4736
4737 /* assign packet to this interface interface */
4738 mp->m_pkthdr.rcvif = ifp;
4739
4740 /* assume no hardware checksum has complated */
4741 mp->m_pkthdr.csum_flags = 0;
4742
4743 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4744
4745 #if __FreeBSD_version >= 1100000
4746
4747 hash_type = fp_cqe->bitfields &
4748 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4749 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4750
4751 switch (hash_type) {
4752 case RSS_HASH_TYPE_IPV4:
4753 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4754 break;
4755
4756 case RSS_HASH_TYPE_TCP_IPV4:
4757 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4758 break;
4759
4760 case RSS_HASH_TYPE_IPV6:
4761 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4762 break;
4763
4764 case RSS_HASH_TYPE_TCP_IPV6:
4765 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4766 break;
4767
4768 default:
4769 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4770 break;
4771 }
4772
4773 #else
4774 mp->m_flags |= M_FLOWID;
4775 #endif
4776
4777 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4778 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4779 }
4780
4781 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4782 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4783 }
4784
4785 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4786 mp->m_pkthdr.csum_data = 0xFFFF;
4787 mp->m_pkthdr.csum_flags |=
4788 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4789 }
4790
4791 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4792 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4793 mp->m_flags |= M_VLANTAG;
4794 }
4795
4796 QLNX_INC_IPACKETS(ifp);
4797 QLNX_INC_IBYTES(ifp, len);
4798
4799 #ifdef QLNX_SOFT_LRO
4800
4801 if (lro_enable) {
4802 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4803
4804 tcp_lro_queue_mbuf(lro, mp);
4805
4806 #else
4807
4808 if (tcp_lro_rx(lro, mp, 0))
4809 (*ifp->if_input)(ifp, mp);
4810
4811 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4812
4813 } else {
4814 (*ifp->if_input)(ifp, mp);
4815 }
4816 #else
4817
4818 (*ifp->if_input)(ifp, mp);
4819
4820 #endif /* #ifdef QLNX_SOFT_LRO */
4821
4822 rx_pkt++;
4823
4824 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4825
4826 next_cqe: /* don't consume bd rx buffer */
4827 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4828 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4829
4830 /* CR TPA - revisit how to handle budget in TPA perhaps
4831 increase on "end" */
4832 if (rx_pkt == budget)
4833 break;
4834 } /* repeat while sw_comp_cons != hw_comp_cons... */
4835
4836 /* Update producers */
4837 qlnx_update_rx_prod(p_hwfn, rxq);
4838
4839 return rx_pkt;
4840 }
4841
4842 /*
4843 * fast path interrupt
4844 */
4845
4846 static void
4847 qlnx_fp_isr(void *arg)
4848 {
4849 qlnx_ivec_t *ivec = arg;
4850 qlnx_host_t *ha;
4851 struct qlnx_fastpath *fp = NULL;
4852 int idx;
4853
4854 ha = ivec->ha;
4855
4856 if (ha->state != QLNX_STATE_OPEN) {
4857 return;
4858 }
4859
4860 idx = ivec->rss_idx;
4861
4862 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4863 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4864 ha->err_illegal_intr++;
4865 return;
4866 }
4867 fp = &ha->fp_array[idx];
4868
4869 if (fp == NULL) {
4870 ha->err_fp_null++;
4871 } else {
4872 int rx_int = 0;
4873 #ifdef QLNX_SOFT_LRO
4874 int total_rx_count = 0;
4875 #endif
4876 int lro_enable, tc;
4877 struct qlnx_tx_queue *txq;
4878 uint16_t elem_left;
4879
4880 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
4881
4882 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4883
4884 do {
4885 for (tc = 0; tc < ha->num_tc; tc++) {
4886 txq = fp->txq[tc];
4887
4888 if((int)(elem_left =
4889 ecore_chain_get_elem_left(&txq->tx_pbl)) <
4890 QLNX_TX_ELEM_THRESH) {
4891 if (mtx_trylock(&fp->tx_mtx)) {
4892 #ifdef QLNX_TRACE_PERF_DATA
4893 tx_compl = fp->tx_pkts_completed;
4894 #endif
4895
4896 qlnx_tx_int(ha, fp, fp->txq[tc]);
4897 #ifdef QLNX_TRACE_PERF_DATA
4898 fp->tx_pkts_compl_intr +=
4899 (fp->tx_pkts_completed - tx_compl);
4900 if ((fp->tx_pkts_completed - tx_compl) <= 32)
4901 fp->tx_comInt[0]++;
4902 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4903 ((fp->tx_pkts_completed - tx_compl) <= 64))
4904 fp->tx_comInt[1]++;
4905 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4906 ((fp->tx_pkts_completed - tx_compl) <= 128))
4907 fp->tx_comInt[2]++;
4908 else if(((fp->tx_pkts_completed - tx_compl) > 128))
4909 fp->tx_comInt[3]++;
4910 #endif
4911 mtx_unlock(&fp->tx_mtx);
4912 }
4913 }
4914 }
4915
4916 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4917 lro_enable);
4918
4919 if (rx_int) {
4920 fp->rx_pkts += rx_int;
4921 #ifdef QLNX_SOFT_LRO
4922 total_rx_count += rx_int;
4923 #endif
4924 }
4925
4926 } while (rx_int);
4927
4928 #ifdef QLNX_SOFT_LRO
4929 {
4930 struct lro_ctrl *lro;
4931
4932 lro = &fp->rxq->lro;
4933
4934 if (lro_enable && total_rx_count) {
4935 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4936
4937 #ifdef QLNX_TRACE_LRO_CNT
4938 if (lro->lro_mbuf_count & ~1023)
4939 fp->lro_cnt_1024++;
4940 else if (lro->lro_mbuf_count & ~511)
4941 fp->lro_cnt_512++;
4942 else if (lro->lro_mbuf_count & ~255)
4943 fp->lro_cnt_256++;
4944 else if (lro->lro_mbuf_count & ~127)
4945 fp->lro_cnt_128++;
4946 else if (lro->lro_mbuf_count & ~63)
4947 fp->lro_cnt_64++;
4948 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4949
4950 tcp_lro_flush_all(lro);
4951
4952 #else
4953 struct lro_entry *queued;
4954
4955 while ((!SLIST_EMPTY(&lro->lro_active))) {
4956 queued = SLIST_FIRST(&lro->lro_active);
4957 SLIST_REMOVE_HEAD(&lro->lro_active, \
4958 next);
4959 tcp_lro_flush(lro, queued);
4960 }
4961 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4962 }
4963 }
4964 #endif /* #ifdef QLNX_SOFT_LRO */
4965
4966 ecore_sb_update_sb_idx(fp->sb_info);
4967 rmb();
4968 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4969 }
4970
4971 return;
4972 }
4973
4974 /*
4975 * slow path interrupt processing function
4976 * can be invoked in polled mode or in interrupt mode via taskqueue.
4977 */
4978 void
4979 qlnx_sp_isr(void *arg)
4980 {
4981 struct ecore_hwfn *p_hwfn;
4982 qlnx_host_t *ha;
4983
4984 p_hwfn = arg;
4985
4986 ha = (qlnx_host_t *)p_hwfn->p_dev;
4987
4988 ha->sp_interrupts++;
4989
4990 QL_DPRINT2(ha, "enter\n");
4991
4992 ecore_int_sp_dpc(p_hwfn);
4993
4994 QL_DPRINT2(ha, "exit\n");
4995
4996 return;
4997 }
4998
4999 /*****************************************************************************
5000 * Support Functions for DMA'able Memory
5001 *****************************************************************************/
5002
5003 static void
5004 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
5005 {
5006 *((bus_addr_t *)arg) = 0;
5007
5008 if (error) {
5009 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
5010 return;
5011 }
5012
5013 *((bus_addr_t *)arg) = segs[0].ds_addr;
5014
5015 return;
5016 }
5017
5018 static int
5019 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5020 {
5021 int ret = 0;
5022 bus_addr_t b_addr;
5023
5024 ret = bus_dma_tag_create(
5025 ha->parent_tag,/* parent */
5026 dma_buf->alignment,
5027 ((bus_size_t)(1ULL << 32)),/* boundary */
5028 BUS_SPACE_MAXADDR, /* lowaddr */
5029 BUS_SPACE_MAXADDR, /* highaddr */
5030 NULL, NULL, /* filter, filterarg */
5031 dma_buf->size, /* maxsize */
5032 1, /* nsegments */
5033 dma_buf->size, /* maxsegsize */
5034 0, /* flags */
5035 NULL, NULL, /* lockfunc, lockarg */
5036 &dma_buf->dma_tag);
5037
5038 if (ret) {
5039 QL_DPRINT1(ha, "could not create dma tag\n");
5040 goto qlnx_alloc_dmabuf_exit;
5041 }
5042 ret = bus_dmamem_alloc(dma_buf->dma_tag,
5043 (void **)&dma_buf->dma_b,
5044 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
5045 &dma_buf->dma_map);
5046 if (ret) {
5047 bus_dma_tag_destroy(dma_buf->dma_tag);
5048 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
5049 goto qlnx_alloc_dmabuf_exit;
5050 }
5051
5052 ret = bus_dmamap_load(dma_buf->dma_tag,
5053 dma_buf->dma_map,
5054 dma_buf->dma_b,
5055 dma_buf->size,
5056 qlnx_dmamap_callback,
5057 &b_addr, BUS_DMA_NOWAIT);
5058
5059 if (ret || !b_addr) {
5060 bus_dma_tag_destroy(dma_buf->dma_tag);
5061 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
5062 dma_buf->dma_map);
5063 ret = -1;
5064 goto qlnx_alloc_dmabuf_exit;
5065 }
5066
5067 dma_buf->dma_addr = b_addr;
5068
5069 qlnx_alloc_dmabuf_exit:
5070
5071 return ret;
5072 }
5073
5074 static void
5075 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5076 {
5077 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5078 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5079 bus_dma_tag_destroy(dma_buf->dma_tag);
5080 return;
5081 }
5082
5083 void *
5084 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5085 {
5086 qlnx_dma_t dma_buf;
5087 qlnx_dma_t *dma_p;
5088 qlnx_host_t *ha __unused;
5089
5090 ha = (qlnx_host_t *)ecore_dev;
5091
5092 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5093
5094 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5095
5096 dma_buf.size = size + PAGE_SIZE;
5097 dma_buf.alignment = 8;
5098
5099 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5100 return (NULL);
5101 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5102
5103 *phys = dma_buf.dma_addr;
5104
5105 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5106
5107 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5108
5109 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5110 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5111 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5112
5113 return (dma_buf.dma_b);
5114 }
5115
5116 void
5117 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5118 uint32_t size)
5119 {
5120 qlnx_dma_t dma_buf, *dma_p;
5121 qlnx_host_t *ha;
5122
5123 ha = (qlnx_host_t *)ecore_dev;
5124
5125 if (v_addr == NULL)
5126 return;
5127
5128 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5129
5130 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5131
5132 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5133 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5134 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5135
5136 dma_buf = *dma_p;
5137
5138 if (!ha->qlnxr_debug)
5139 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5140 return;
5141 }
5142
5143 static int
5144 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5145 {
5146 int ret;
5147 device_t dev;
5148
5149 dev = ha->pci_dev;
5150
5151 /*
5152 * Allocate parent DMA Tag
5153 */
5154 ret = bus_dma_tag_create(
5155 bus_get_dma_tag(dev), /* parent */
5156 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5157 BUS_SPACE_MAXADDR, /* lowaddr */
5158 BUS_SPACE_MAXADDR, /* highaddr */
5159 NULL, NULL, /* filter, filterarg */
5160 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5161 0, /* nsegments */
5162 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5163 0, /* flags */
5164 NULL, NULL, /* lockfunc, lockarg */
5165 &ha->parent_tag);
5166
5167 if (ret) {
5168 QL_DPRINT1(ha, "could not create parent dma tag\n");
5169 return (-1);
5170 }
5171
5172 ha->flags.parent_tag = 1;
5173
5174 return (0);
5175 }
5176
5177 static void
5178 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5179 {
5180 if (ha->parent_tag != NULL) {
5181 bus_dma_tag_destroy(ha->parent_tag);
5182 ha->parent_tag = NULL;
5183 }
5184 return;
5185 }
5186
5187 static int
5188 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5189 {
5190 if (bus_dma_tag_create(NULL, /* parent */
5191 1, 0, /* alignment, bounds */
5192 BUS_SPACE_MAXADDR, /* lowaddr */
5193 BUS_SPACE_MAXADDR, /* highaddr */
5194 NULL, NULL, /* filter, filterarg */
5195 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
5196 QLNX_MAX_SEGMENTS, /* nsegments */
5197 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
5198 0, /* flags */
5199 NULL, /* lockfunc */
5200 NULL, /* lockfuncarg */
5201 &ha->tx_tag)) {
5202 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5203 return (-1);
5204 }
5205
5206 return (0);
5207 }
5208
5209 static void
5210 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5211 {
5212 if (ha->tx_tag != NULL) {
5213 bus_dma_tag_destroy(ha->tx_tag);
5214 ha->tx_tag = NULL;
5215 }
5216 return;
5217 }
5218
5219 static int
5220 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5221 {
5222 if (bus_dma_tag_create(NULL, /* parent */
5223 1, 0, /* alignment, bounds */
5224 BUS_SPACE_MAXADDR, /* lowaddr */
5225 BUS_SPACE_MAXADDR, /* highaddr */
5226 NULL, NULL, /* filter, filterarg */
5227 MJUM9BYTES, /* maxsize */
5228 1, /* nsegments */
5229 MJUM9BYTES, /* maxsegsize */
5230 0, /* flags */
5231 NULL, /* lockfunc */
5232 NULL, /* lockfuncarg */
5233 &ha->rx_tag)) {
5234 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5235
5236 return (-1);
5237 }
5238 return (0);
5239 }
5240
5241 static void
5242 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5243 {
5244 if (ha->rx_tag != NULL) {
5245 bus_dma_tag_destroy(ha->rx_tag);
5246 ha->rx_tag = NULL;
5247 }
5248 return;
5249 }
5250
5251 /*********************************
5252 * Exported functions
5253 *********************************/
5254 uint32_t
5255 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5256 {
5257 uint32_t bar_size;
5258
5259 bar_id = bar_id * 2;
5260
5261 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5262 SYS_RES_MEMORY,
5263 PCIR_BAR(bar_id));
5264
5265 return (bar_size);
5266 }
5267
5268 uint32_t
5269 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5270 {
5271 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5272 pci_reg, 1);
5273 return 0;
5274 }
5275
5276 uint32_t
5277 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5278 uint16_t *reg_value)
5279 {
5280 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5281 pci_reg, 2);
5282 return 0;
5283 }
5284
5285 uint32_t
5286 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5287 uint32_t *reg_value)
5288 {
5289 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5290 pci_reg, 4);
5291 return 0;
5292 }
5293
5294 void
5295 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5296 {
5297 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5298 pci_reg, reg_value, 1);
5299 return;
5300 }
5301
5302 void
5303 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5304 uint16_t reg_value)
5305 {
5306 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5307 pci_reg, reg_value, 2);
5308 return;
5309 }
5310
5311 void
5312 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5313 uint32_t reg_value)
5314 {
5315 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5316 pci_reg, reg_value, 4);
5317 return;
5318 }
5319
5320 int
5321 qlnx_pci_find_capability(void *ecore_dev, int cap)
5322 {
5323 int reg;
5324 qlnx_host_t *ha;
5325
5326 ha = ecore_dev;
5327
5328 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5329 return reg;
5330 else {
5331 QL_DPRINT1(ha, "failed\n");
5332 return 0;
5333 }
5334 }
5335
5336 int
5337 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5338 {
5339 int reg;
5340 qlnx_host_t *ha;
5341
5342 ha = ecore_dev;
5343
5344 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5345 return reg;
5346 else {
5347 QL_DPRINT1(ha, "failed\n");
5348 return 0;
5349 }
5350 }
5351
5352 uint32_t
5353 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5354 {
5355 uint32_t data32;
5356 struct ecore_hwfn *p_hwfn;
5357
5358 p_hwfn = hwfn;
5359
5360 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5361 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5362
5363 return (data32);
5364 }
5365
5366 void
5367 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5368 {
5369 struct ecore_hwfn *p_hwfn = hwfn;
5370
5371 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5372 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5373
5374 return;
5375 }
5376
5377 void
5378 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5379 {
5380 struct ecore_hwfn *p_hwfn = hwfn;
5381
5382 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5383 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5384 return;
5385 }
5386
5387 void
5388 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5389 {
5390 struct ecore_dev *cdev;
5391 struct ecore_hwfn *p_hwfn;
5392 uint32_t offset;
5393
5394 p_hwfn = hwfn;
5395
5396 cdev = p_hwfn->p_dev;
5397
5398 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5399 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5400
5401 return;
5402 }
5403
5404 void
5405 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5406 {
5407 struct ecore_hwfn *p_hwfn = hwfn;
5408
5409 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5410 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5411
5412 return;
5413 }
5414
5415 uint32_t
5416 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5417 {
5418 uint32_t data32;
5419 bus_size_t offset;
5420 struct ecore_dev *cdev;
5421
5422 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5423 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5424
5425 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5426
5427 return (data32);
5428 }
5429
5430 void
5431 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5432 {
5433 bus_size_t offset;
5434 struct ecore_dev *cdev;
5435
5436 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5437 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5438
5439 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5440
5441 return;
5442 }
5443
5444 void
5445 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5446 {
5447 bus_size_t offset;
5448 struct ecore_dev *cdev;
5449
5450 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5451 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5452
5453 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5454 return;
5455 }
5456
5457 void *
5458 qlnx_zalloc(uint32_t size)
5459 {
5460 caddr_t va;
5461
5462 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5463 bzero(va, size);
5464 return ((void *)va);
5465 }
5466
5467 void
5468 qlnx_barrier(void *p_hwfn)
5469 {
5470 qlnx_host_t *ha;
5471
5472 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5473 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5474 }
5475
5476 void
5477 qlnx_link_update(void *p_hwfn)
5478 {
5479 qlnx_host_t *ha;
5480 int prev_link_state;
5481
5482 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5483
5484 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5485
5486 prev_link_state = ha->link_up;
5487 ha->link_up = ha->if_link.link_up;
5488
5489 if (prev_link_state != ha->link_up) {
5490 if (ha->link_up) {
5491 if_link_state_change(ha->ifp, LINK_STATE_UP);
5492 } else {
5493 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5494 }
5495 }
5496 #ifndef QLNX_VF
5497 #ifdef CONFIG_ECORE_SRIOV
5498
5499 if (qlnx_vf_device(ha) != 0) {
5500 if (ha->sriov_initialized)
5501 qlnx_inform_vf_link_state(p_hwfn, ha);
5502 }
5503
5504 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5505 #endif /* #ifdef QLNX_VF */
5506
5507 return;
5508 }
5509
5510 static void
5511 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5512 struct ecore_vf_acquire_sw_info *p_sw_info)
5513 {
5514 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5515 (QLNX_VERSION_MINOR << 16) |
5516 QLNX_VERSION_BUILD;
5517 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5518
5519 return;
5520 }
5521
5522 void
5523 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5524 void *p_sw_info)
5525 {
5526 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5527
5528 return;
5529 }
5530
5531 void
5532 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5533 struct qlnx_link_output *if_link)
5534 {
5535 struct ecore_mcp_link_params link_params;
5536 struct ecore_mcp_link_state link_state;
5537 uint8_t p_change;
5538 struct ecore_ptt *p_ptt = NULL;
5539
5540 memset(if_link, 0, sizeof(*if_link));
5541 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5542 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5543
5544 ha = (qlnx_host_t *)hwfn->p_dev;
5545
5546 /* Prepare source inputs */
5547 /* we only deal with physical functions */
5548 if (qlnx_vf_device(ha) != 0) {
5549 p_ptt = ecore_ptt_acquire(hwfn);
5550
5551 if (p_ptt == NULL) {
5552 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5553 return;
5554 }
5555
5556 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5557 ecore_ptt_release(hwfn, p_ptt);
5558
5559 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5560 sizeof(link_params));
5561 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5562 sizeof(link_state));
5563 } else {
5564 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5565 ecore_vf_read_bulletin(hwfn, &p_change);
5566 ecore_vf_get_link_params(hwfn, &link_params);
5567 ecore_vf_get_link_state(hwfn, &link_state);
5568 }
5569
5570 /* Set the link parameters to pass to protocol driver */
5571 if (link_state.link_up) {
5572 if_link->link_up = true;
5573 if_link->speed = link_state.speed;
5574 }
5575
5576 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5577
5578 if (link_params.speed.autoneg)
5579 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5580
5581 if (link_params.pause.autoneg ||
5582 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5583 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5584
5585 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5586 link_params.pause.forced_tx)
5587 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5588
5589 if (link_params.speed.advertised_speeds &
5590 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5591 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5592 QLNX_LINK_CAP_1000baseT_Full;
5593
5594 if (link_params.speed.advertised_speeds &
5595 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5596 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5597
5598 if (link_params.speed.advertised_speeds &
5599 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5600 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5601
5602 if (link_params.speed.advertised_speeds &
5603 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5604 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5605
5606 if (link_params.speed.advertised_speeds &
5607 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5608 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5609
5610 if (link_params.speed.advertised_speeds &
5611 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5612 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5613
5614 if_link->advertised_caps = if_link->supported_caps;
5615
5616 if_link->autoneg = link_params.speed.autoneg;
5617 if_link->duplex = QLNX_LINK_DUPLEX;
5618
5619 /* Link partner capabilities */
5620
5621 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5622 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5623
5624 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5625 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5626
5627 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5628 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5629
5630 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5631 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5632
5633 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5634 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5635
5636 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5637 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5638
5639 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5640 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5641
5642 if (link_state.an_complete)
5643 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5644
5645 if (link_state.partner_adv_pause)
5646 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5647
5648 if ((link_state.partner_adv_pause ==
5649 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5650 (link_state.partner_adv_pause ==
5651 ECORE_LINK_PARTNER_BOTH_PAUSE))
5652 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5653
5654 return;
5655 }
5656
5657 void
5658 qlnx_schedule_recovery(void *p_hwfn)
5659 {
5660 qlnx_host_t *ha;
5661
5662 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5663
5664 if (qlnx_vf_device(ha) != 0) {
5665 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5666 }
5667
5668 return;
5669 }
5670
5671 static int
5672 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5673 {
5674 int rc, i;
5675
5676 for (i = 0; i < cdev->num_hwfns; i++) {
5677 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5678 p_hwfn->pf_params = *func_params;
5679
5680 #ifdef QLNX_ENABLE_IWARP
5681 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5682 p_hwfn->using_ll2 = true;
5683 }
5684 #endif /* #ifdef QLNX_ENABLE_IWARP */
5685 }
5686
5687 rc = ecore_resc_alloc(cdev);
5688 if (rc)
5689 goto qlnx_nic_setup_exit;
5690
5691 ecore_resc_setup(cdev);
5692
5693 qlnx_nic_setup_exit:
5694
5695 return rc;
5696 }
5697
5698 static int
5699 qlnx_nic_start(struct ecore_dev *cdev)
5700 {
5701 int rc;
5702 struct ecore_hw_init_params params;
5703
5704 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5705
5706 params.p_tunn = NULL;
5707 params.b_hw_start = true;
5708 params.int_mode = cdev->int_mode;
5709 params.allow_npar_tx_switch = true;
5710 params.bin_fw_data = NULL;
5711
5712 rc = ecore_hw_init(cdev, ¶ms);
5713 if (rc) {
5714 ecore_resc_free(cdev);
5715 return rc;
5716 }
5717
5718 return 0;
5719 }
5720
5721 static int
5722 qlnx_slowpath_start(qlnx_host_t *ha)
5723 {
5724 struct ecore_dev *cdev;
5725 struct ecore_pf_params pf_params;
5726 int rc;
5727
5728 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5729 pf_params.eth_pf_params.num_cons =
5730 (ha->num_rss) * (ha->num_tc + 1);
5731
5732 #ifdef QLNX_ENABLE_IWARP
5733 if (qlnx_vf_device(ha) != 0) {
5734 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5735 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5736 pf_params.rdma_pf_params.num_qps = 1024;
5737 pf_params.rdma_pf_params.num_srqs = 1024;
5738 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5739 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5740 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5741 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5742 pf_params.rdma_pf_params.num_qps = 8192;
5743 pf_params.rdma_pf_params.num_srqs = 8192;
5744 //pf_params.rdma_pf_params.min_dpis = 0;
5745 pf_params.rdma_pf_params.min_dpis = 8;
5746 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5747 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5748 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5749 }
5750 }
5751 #endif /* #ifdef QLNX_ENABLE_IWARP */
5752
5753 cdev = &ha->cdev;
5754
5755 rc = qlnx_nic_setup(cdev, &pf_params);
5756 if (rc)
5757 goto qlnx_slowpath_start_exit;
5758
5759 cdev->int_mode = ECORE_INT_MODE_MSIX;
5760 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5761
5762 #ifdef QLNX_MAX_COALESCE
5763 cdev->rx_coalesce_usecs = 255;
5764 cdev->tx_coalesce_usecs = 255;
5765 #endif
5766
5767 rc = qlnx_nic_start(cdev);
5768
5769 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5770 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5771
5772 #ifdef QLNX_USER_LLDP
5773 (void)qlnx_set_lldp_tlvx(ha, NULL);
5774 #endif /* #ifdef QLNX_USER_LLDP */
5775
5776 qlnx_slowpath_start_exit:
5777
5778 return (rc);
5779 }
5780
5781 static int
5782 qlnx_slowpath_stop(qlnx_host_t *ha)
5783 {
5784 struct ecore_dev *cdev;
5785 device_t dev = ha->pci_dev;
5786 int i;
5787
5788 cdev = &ha->cdev;
5789
5790 ecore_hw_stop(cdev);
5791
5792 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5793 if (ha->sp_handle[i])
5794 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5795 ha->sp_handle[i]);
5796
5797 ha->sp_handle[i] = NULL;
5798
5799 if (ha->sp_irq[i])
5800 (void) bus_release_resource(dev, SYS_RES_IRQ,
5801 ha->sp_irq_rid[i], ha->sp_irq[i]);
5802 ha->sp_irq[i] = NULL;
5803 }
5804
5805 ecore_resc_free(cdev);
5806
5807 return 0;
5808 }
5809
5810 static void
5811 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5812 char ver_str[VER_SIZE])
5813 {
5814 int i;
5815
5816 memcpy(cdev->name, name, NAME_SIZE);
5817
5818 for_each_hwfn(cdev, i) {
5819 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5820 }
5821
5822 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5823
5824 return ;
5825 }
5826
5827 void
5828 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5829 {
5830 enum ecore_mcp_protocol_type type;
5831 union ecore_mcp_protocol_stats *stats;
5832 struct ecore_eth_stats eth_stats;
5833 qlnx_host_t *ha;
5834
5835 ha = cdev;
5836 stats = proto_stats;
5837 type = proto_type;
5838
5839 switch (type) {
5840 case ECORE_MCP_LAN_STATS:
5841 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5842 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5843 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5844 stats->lan_stats.fcs_err = -1;
5845 break;
5846
5847 default:
5848 ha->err_get_proto_invalid_type++;
5849
5850 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5851 break;
5852 }
5853 return;
5854 }
5855
5856 static int
5857 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5858 {
5859 struct ecore_hwfn *p_hwfn;
5860 struct ecore_ptt *p_ptt;
5861
5862 p_hwfn = &ha->cdev.hwfns[0];
5863 p_ptt = ecore_ptt_acquire(p_hwfn);
5864
5865 if (p_ptt == NULL) {
5866 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5867 return (-1);
5868 }
5869 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5870
5871 ecore_ptt_release(p_hwfn, p_ptt);
5872
5873 return (0);
5874 }
5875
5876 static int
5877 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5878 {
5879 struct ecore_hwfn *p_hwfn;
5880 struct ecore_ptt *p_ptt;
5881
5882 p_hwfn = &ha->cdev.hwfns[0];
5883 p_ptt = ecore_ptt_acquire(p_hwfn);
5884
5885 if (p_ptt == NULL) {
5886 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5887 return (-1);
5888 }
5889 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5890
5891 ecore_ptt_release(p_hwfn, p_ptt);
5892
5893 return (0);
5894 }
5895
5896 static int
5897 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5898 {
5899 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5900 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5901 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5902
5903 return 0;
5904 }
5905
5906 static void
5907 qlnx_init_fp(qlnx_host_t *ha)
5908 {
5909 int rss_id, txq_array_index, tc;
5910
5911 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5912 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5913
5914 fp->rss_id = rss_id;
5915 fp->edev = ha;
5916 fp->sb_info = &ha->sb_array[rss_id];
5917 fp->rxq = &ha->rxq_array[rss_id];
5918 fp->rxq->rxq_id = rss_id;
5919
5920 for (tc = 0; tc < ha->num_tc; tc++) {
5921 txq_array_index = tc * ha->num_rss + rss_id;
5922 fp->txq[tc] = &ha->txq_array[txq_array_index];
5923 fp->txq[tc]->index = txq_array_index;
5924 }
5925
5926 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5927 rss_id);
5928
5929 fp->tx_ring_full = 0;
5930
5931 /* reset all the statistics counters */
5932
5933 fp->tx_pkts_processed = 0;
5934 fp->tx_pkts_freed = 0;
5935 fp->tx_pkts_transmitted = 0;
5936 fp->tx_pkts_completed = 0;
5937
5938 #ifdef QLNX_TRACE_PERF_DATA
5939 fp->tx_pkts_trans_ctx = 0;
5940 fp->tx_pkts_compl_ctx = 0;
5941 fp->tx_pkts_trans_fp = 0;
5942 fp->tx_pkts_compl_fp = 0;
5943 fp->tx_pkts_compl_intr = 0;
5944 #endif
5945 fp->tx_lso_wnd_min_len = 0;
5946 fp->tx_defrag = 0;
5947 fp->tx_nsegs_gt_elem_left = 0;
5948 fp->tx_tso_max_nsegs = 0;
5949 fp->tx_tso_min_nsegs = 0;
5950 fp->err_tx_nsegs_gt_elem_left = 0;
5951 fp->err_tx_dmamap_create = 0;
5952 fp->err_tx_defrag_dmamap_load = 0;
5953 fp->err_tx_non_tso_max_seg = 0;
5954 fp->err_tx_dmamap_load = 0;
5955 fp->err_tx_defrag = 0;
5956 fp->err_tx_free_pkt_null = 0;
5957 fp->err_tx_cons_idx_conflict = 0;
5958
5959 fp->rx_pkts = 0;
5960 fp->err_m_getcl = 0;
5961 fp->err_m_getjcl = 0;
5962 }
5963 return;
5964 }
5965
5966 void
5967 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5968 {
5969 struct ecore_dev *cdev;
5970
5971 cdev = &ha->cdev;
5972
5973 if (sb_info->sb_virt) {
5974 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5975 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5976 sb_info->sb_virt = NULL;
5977 }
5978 }
5979
5980 static int
5981 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5982 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5983 {
5984 struct ecore_hwfn *p_hwfn;
5985 int hwfn_index, rc;
5986 u16 rel_sb_id;
5987
5988 hwfn_index = sb_id % cdev->num_hwfns;
5989 p_hwfn = &cdev->hwfns[hwfn_index];
5990 rel_sb_id = sb_id / cdev->num_hwfns;
5991
5992 QL_DPRINT2(((qlnx_host_t *)cdev),
5993 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5994 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5995 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5996 sb_virt_addr, (void *)sb_phy_addr);
5997
5998 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5999 sb_virt_addr, sb_phy_addr, rel_sb_id);
6000
6001 return rc;
6002 }
6003
6004 /* This function allocates fast-path status block memory */
6005 int
6006 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
6007 {
6008 struct status_block_e4 *sb_virt;
6009 bus_addr_t sb_phys;
6010 int rc;
6011 uint32_t size;
6012 struct ecore_dev *cdev;
6013
6014 cdev = &ha->cdev;
6015
6016 size = sizeof(*sb_virt);
6017 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
6018
6019 if (!sb_virt) {
6020 QL_DPRINT1(ha, "Status block allocation failed\n");
6021 return -ENOMEM;
6022 }
6023
6024 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
6025 if (rc) {
6026 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
6027 }
6028
6029 return rc;
6030 }
6031
6032 static void
6033 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6034 {
6035 int i;
6036 struct sw_rx_data *rx_buf;
6037
6038 for (i = 0; i < rxq->num_rx_buffers; i++) {
6039 rx_buf = &rxq->sw_rx_ring[i];
6040
6041 if (rx_buf->data != NULL) {
6042 if (rx_buf->map != NULL) {
6043 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6044 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6045 rx_buf->map = NULL;
6046 }
6047 m_freem(rx_buf->data);
6048 rx_buf->data = NULL;
6049 }
6050 }
6051 return;
6052 }
6053
6054 static void
6055 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6056 {
6057 struct ecore_dev *cdev;
6058 int i;
6059
6060 cdev = &ha->cdev;
6061
6062 qlnx_free_rx_buffers(ha, rxq);
6063
6064 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6065 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
6066 if (rxq->tpa_info[i].mpf != NULL)
6067 m_freem(rxq->tpa_info[i].mpf);
6068 }
6069
6070 bzero((void *)&rxq->sw_rx_ring[0],
6071 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6072
6073 /* Free the real RQ ring used by FW */
6074 if (rxq->rx_bd_ring.p_virt_addr) {
6075 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6076 rxq->rx_bd_ring.p_virt_addr = NULL;
6077 }
6078
6079 /* Free the real completion ring used by FW */
6080 if (rxq->rx_comp_ring.p_virt_addr &&
6081 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6082 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6083 rxq->rx_comp_ring.p_virt_addr = NULL;
6084 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6085 }
6086
6087 #ifdef QLNX_SOFT_LRO
6088 {
6089 struct lro_ctrl *lro;
6090
6091 lro = &rxq->lro;
6092 tcp_lro_free(lro);
6093 }
6094 #endif /* #ifdef QLNX_SOFT_LRO */
6095
6096 return;
6097 }
6098
6099 static int
6100 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6101 {
6102 register struct mbuf *mp;
6103 uint16_t rx_buf_size;
6104 struct sw_rx_data *sw_rx_data;
6105 struct eth_rx_bd *rx_bd;
6106 dma_addr_t dma_addr;
6107 bus_dmamap_t map;
6108 bus_dma_segment_t segs[1];
6109 int nsegs;
6110 int ret;
6111
6112 rx_buf_size = rxq->rx_buf_size;
6113
6114 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6115
6116 if (mp == NULL) {
6117 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6118 return -ENOMEM;
6119 }
6120
6121 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6122
6123 map = (bus_dmamap_t)0;
6124
6125 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6126 BUS_DMA_NOWAIT);
6127 dma_addr = segs[0].ds_addr;
6128
6129 if (ret || !dma_addr || (nsegs != 1)) {
6130 m_freem(mp);
6131 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6132 ret, (long long unsigned int)dma_addr, nsegs);
6133 return -ENOMEM;
6134 }
6135
6136 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6137 sw_rx_data->data = mp;
6138 sw_rx_data->dma_addr = dma_addr;
6139 sw_rx_data->map = map;
6140
6141 /* Advance PROD and get BD pointer */
6142 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6143 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6144 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6145 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6146
6147 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6148
6149 return 0;
6150 }
6151
6152 static int
6153 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6154 struct qlnx_agg_info *tpa)
6155 {
6156 struct mbuf *mp;
6157 dma_addr_t dma_addr;
6158 bus_dmamap_t map;
6159 bus_dma_segment_t segs[1];
6160 int nsegs;
6161 int ret;
6162 struct sw_rx_data *rx_buf;
6163
6164 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6165
6166 if (mp == NULL) {
6167 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6168 return -ENOMEM;
6169 }
6170
6171 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6172
6173 map = (bus_dmamap_t)0;
6174
6175 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6176 BUS_DMA_NOWAIT);
6177 dma_addr = segs[0].ds_addr;
6178
6179 if (ret || !dma_addr || (nsegs != 1)) {
6180 m_freem(mp);
6181 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6182 ret, (long long unsigned int)dma_addr, nsegs);
6183 return -ENOMEM;
6184 }
6185
6186 rx_buf = &tpa->rx_buf;
6187
6188 memset(rx_buf, 0, sizeof (struct sw_rx_data));
6189
6190 rx_buf->data = mp;
6191 rx_buf->dma_addr = dma_addr;
6192 rx_buf->map = map;
6193
6194 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6195
6196 return (0);
6197 }
6198
6199 static void
6200 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6201 {
6202 struct sw_rx_data *rx_buf;
6203
6204 rx_buf = &tpa->rx_buf;
6205
6206 if (rx_buf->data != NULL) {
6207 if (rx_buf->map != NULL) {
6208 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6209 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6210 rx_buf->map = NULL;
6211 }
6212 m_freem(rx_buf->data);
6213 rx_buf->data = NULL;
6214 }
6215 return;
6216 }
6217
6218 /* This function allocates all memory needed per Rx queue */
6219 static int
6220 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6221 {
6222 int i, rc, num_allocated;
6223 struct ecore_dev *cdev;
6224
6225 cdev = &ha->cdev;
6226
6227 rxq->num_rx_buffers = RX_RING_SIZE;
6228
6229 rxq->rx_buf_size = ha->rx_buf_size;
6230
6231 /* Allocate the parallel driver ring for Rx buffers */
6232 bzero((void *)&rxq->sw_rx_ring[0],
6233 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6234
6235 /* Allocate FW Rx ring */
6236
6237 rc = ecore_chain_alloc(cdev,
6238 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6239 ECORE_CHAIN_MODE_NEXT_PTR,
6240 ECORE_CHAIN_CNT_TYPE_U16,
6241 RX_RING_SIZE,
6242 sizeof(struct eth_rx_bd),
6243 &rxq->rx_bd_ring, NULL);
6244
6245 if (rc)
6246 goto err;
6247
6248 /* Allocate FW completion ring */
6249 rc = ecore_chain_alloc(cdev,
6250 ECORE_CHAIN_USE_TO_CONSUME,
6251 ECORE_CHAIN_MODE_PBL,
6252 ECORE_CHAIN_CNT_TYPE_U16,
6253 RX_RING_SIZE,
6254 sizeof(union eth_rx_cqe),
6255 &rxq->rx_comp_ring, NULL);
6256
6257 if (rc)
6258 goto err;
6259
6260 /* Allocate buffers for the Rx ring */
6261
6262 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6263 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6264 &rxq->tpa_info[i]);
6265 if (rc)
6266 break;
6267 }
6268
6269 for (i = 0; i < rxq->num_rx_buffers; i++) {
6270 rc = qlnx_alloc_rx_buffer(ha, rxq);
6271 if (rc)
6272 break;
6273 }
6274 num_allocated = i;
6275 if (!num_allocated) {
6276 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6277 goto err;
6278 } else if (num_allocated < rxq->num_rx_buffers) {
6279 QL_DPRINT1(ha, "Allocated less buffers than"
6280 " desired (%d allocated)\n", num_allocated);
6281 }
6282
6283 #ifdef QLNX_SOFT_LRO
6284
6285 {
6286 struct lro_ctrl *lro;
6287
6288 lro = &rxq->lro;
6289
6290 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6291 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6292 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6293 rxq->rxq_id);
6294 goto err;
6295 }
6296 #else
6297 if (tcp_lro_init(lro)) {
6298 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6299 rxq->rxq_id);
6300 goto err;
6301 }
6302 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6303
6304 lro->ifp = ha->ifp;
6305 }
6306 #endif /* #ifdef QLNX_SOFT_LRO */
6307 return 0;
6308
6309 err:
6310 qlnx_free_mem_rxq(ha, rxq);
6311 return -ENOMEM;
6312 }
6313
6314 static void
6315 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6316 struct qlnx_tx_queue *txq)
6317 {
6318 struct ecore_dev *cdev;
6319
6320 cdev = &ha->cdev;
6321
6322 bzero((void *)&txq->sw_tx_ring[0],
6323 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6324
6325 /* Free the real RQ ring used by FW */
6326 if (txq->tx_pbl.p_virt_addr) {
6327 ecore_chain_free(cdev, &txq->tx_pbl);
6328 txq->tx_pbl.p_virt_addr = NULL;
6329 }
6330 return;
6331 }
6332
6333 /* This function allocates all memory needed per Tx queue */
6334 static int
6335 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6336 struct qlnx_tx_queue *txq)
6337 {
6338 int ret = ECORE_SUCCESS;
6339 union eth_tx_bd_types *p_virt;
6340 struct ecore_dev *cdev;
6341
6342 cdev = &ha->cdev;
6343
6344 bzero((void *)&txq->sw_tx_ring[0],
6345 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6346
6347 /* Allocate the real Tx ring to be used by FW */
6348 ret = ecore_chain_alloc(cdev,
6349 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6350 ECORE_CHAIN_MODE_PBL,
6351 ECORE_CHAIN_CNT_TYPE_U16,
6352 TX_RING_SIZE,
6353 sizeof(*p_virt),
6354 &txq->tx_pbl, NULL);
6355
6356 if (ret != ECORE_SUCCESS) {
6357 goto err;
6358 }
6359
6360 txq->num_tx_buffers = TX_RING_SIZE;
6361
6362 return 0;
6363
6364 err:
6365 qlnx_free_mem_txq(ha, fp, txq);
6366 return -ENOMEM;
6367 }
6368
6369 static void
6370 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6371 {
6372 struct mbuf *mp;
6373 struct ifnet *ifp = ha->ifp;
6374
6375 if (mtx_initialized(&fp->tx_mtx)) {
6376 if (fp->tx_br != NULL) {
6377 mtx_lock(&fp->tx_mtx);
6378
6379 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6380 fp->tx_pkts_freed++;
6381 m_freem(mp);
6382 }
6383
6384 mtx_unlock(&fp->tx_mtx);
6385
6386 buf_ring_free(fp->tx_br, M_DEVBUF);
6387 fp->tx_br = NULL;
6388 }
6389 mtx_destroy(&fp->tx_mtx);
6390 }
6391 return;
6392 }
6393
6394 static void
6395 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6396 {
6397 int tc;
6398
6399 qlnx_free_mem_sb(ha, fp->sb_info);
6400
6401 qlnx_free_mem_rxq(ha, fp->rxq);
6402
6403 for (tc = 0; tc < ha->num_tc; tc++)
6404 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6405
6406 return;
6407 }
6408
6409 static int
6410 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6411 {
6412 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6413 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6414
6415 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6416
6417 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6418 M_NOWAIT, &fp->tx_mtx);
6419 if (fp->tx_br == NULL) {
6420 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6421 ha->dev_unit, fp->rss_id);
6422 return -ENOMEM;
6423 }
6424 return 0;
6425 }
6426
6427 static int
6428 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6429 {
6430 int rc, tc;
6431
6432 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6433 if (rc)
6434 goto err;
6435
6436 if (ha->rx_jumbo_buf_eq_mtu) {
6437 if (ha->max_frame_size <= MCLBYTES)
6438 ha->rx_buf_size = MCLBYTES;
6439 else if (ha->max_frame_size <= MJUMPAGESIZE)
6440 ha->rx_buf_size = MJUMPAGESIZE;
6441 else if (ha->max_frame_size <= MJUM9BYTES)
6442 ha->rx_buf_size = MJUM9BYTES;
6443 else if (ha->max_frame_size <= MJUM16BYTES)
6444 ha->rx_buf_size = MJUM16BYTES;
6445 } else {
6446 if (ha->max_frame_size <= MCLBYTES)
6447 ha->rx_buf_size = MCLBYTES;
6448 else
6449 ha->rx_buf_size = MJUMPAGESIZE;
6450 }
6451
6452 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6453 if (rc)
6454 goto err;
6455
6456 for (tc = 0; tc < ha->num_tc; tc++) {
6457 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6458 if (rc)
6459 goto err;
6460 }
6461
6462 return 0;
6463
6464 err:
6465 qlnx_free_mem_fp(ha, fp);
6466 return -ENOMEM;
6467 }
6468
6469 static void
6470 qlnx_free_mem_load(qlnx_host_t *ha)
6471 {
6472 int i;
6473
6474 for (i = 0; i < ha->num_rss; i++) {
6475 struct qlnx_fastpath *fp = &ha->fp_array[i];
6476
6477 qlnx_free_mem_fp(ha, fp);
6478 }
6479 return;
6480 }
6481
6482 static int
6483 qlnx_alloc_mem_load(qlnx_host_t *ha)
6484 {
6485 int rc = 0, rss_id;
6486
6487 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6488 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6489
6490 rc = qlnx_alloc_mem_fp(ha, fp);
6491 if (rc)
6492 break;
6493 }
6494 return (rc);
6495 }
6496
6497 static int
6498 qlnx_start_vport(struct ecore_dev *cdev,
6499 u8 vport_id,
6500 u16 mtu,
6501 u8 drop_ttl0_flg,
6502 u8 inner_vlan_removal_en_flg,
6503 u8 tx_switching,
6504 u8 hw_lro_enable)
6505 {
6506 int rc, i;
6507 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6508 qlnx_host_t *ha __unused;
6509
6510 ha = (qlnx_host_t *)cdev;
6511
6512 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6513 vport_start_params.tx_switching = 0;
6514 vport_start_params.handle_ptp_pkts = 0;
6515 vport_start_params.only_untagged = 0;
6516 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6517
6518 vport_start_params.tpa_mode =
6519 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6520 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6521
6522 vport_start_params.vport_id = vport_id;
6523 vport_start_params.mtu = mtu;
6524
6525 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6526
6527 for_each_hwfn(cdev, i) {
6528 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6529
6530 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6531 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6532
6533 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6534
6535 if (rc) {
6536 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6537 " with MTU %d\n" , vport_id, mtu);
6538 return -ENOMEM;
6539 }
6540
6541 ecore_hw_start_fastpath(p_hwfn);
6542
6543 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6544 vport_id, mtu);
6545 }
6546 return 0;
6547 }
6548
6549 static int
6550 qlnx_update_vport(struct ecore_dev *cdev,
6551 struct qlnx_update_vport_params *params)
6552 {
6553 struct ecore_sp_vport_update_params sp_params;
6554 int rc, i, j, fp_index;
6555 struct ecore_hwfn *p_hwfn;
6556 struct ecore_rss_params *rss;
6557 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6558 struct qlnx_fastpath *fp;
6559
6560 memset(&sp_params, 0, sizeof(sp_params));
6561 /* Translate protocol params into sp params */
6562 sp_params.vport_id = params->vport_id;
6563
6564 sp_params.update_vport_active_rx_flg =
6565 params->update_vport_active_rx_flg;
6566 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6567
6568 sp_params.update_vport_active_tx_flg =
6569 params->update_vport_active_tx_flg;
6570 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6571
6572 sp_params.update_inner_vlan_removal_flg =
6573 params->update_inner_vlan_removal_flg;
6574 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6575
6576 sp_params.sge_tpa_params = params->sge_tpa_params;
6577
6578 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6579 * We need to re-fix the rss values per engine for CMT.
6580 */
6581 if (params->rss_params->update_rss_config)
6582 sp_params.rss_params = params->rss_params;
6583 else
6584 sp_params.rss_params = NULL;
6585
6586 for_each_hwfn(cdev, i) {
6587 p_hwfn = &cdev->hwfns[i];
6588
6589 if ((cdev->num_hwfns > 1) &&
6590 params->rss_params->update_rss_config &&
6591 params->rss_params->rss_enable) {
6592 rss = params->rss_params;
6593
6594 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6595 fp_index = ((cdev->num_hwfns * j) + i) %
6596 ha->num_rss;
6597
6598 fp = &ha->fp_array[fp_index];
6599 rss->rss_ind_table[j] = fp->rxq->handle;
6600 }
6601
6602 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6603 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6604 rss->rss_ind_table[j],
6605 rss->rss_ind_table[j+1],
6606 rss->rss_ind_table[j+2],
6607 rss->rss_ind_table[j+3],
6608 rss->rss_ind_table[j+4],
6609 rss->rss_ind_table[j+5],
6610 rss->rss_ind_table[j+6],
6611 rss->rss_ind_table[j+7]);
6612 j += 8;
6613 }
6614 }
6615
6616 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6617
6618 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6619
6620 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6621 ECORE_SPQ_MODE_EBLOCK, NULL);
6622 if (rc) {
6623 QL_DPRINT1(ha, "Failed to update VPORT\n");
6624 return rc;
6625 }
6626
6627 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6628 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6629 params->vport_id, params->vport_active_tx_flg,
6630 params->vport_active_rx_flg,
6631 params->update_vport_active_tx_flg,
6632 params->update_vport_active_rx_flg);
6633 }
6634
6635 return 0;
6636 }
6637
6638 static void
6639 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6640 {
6641 struct eth_rx_bd *rx_bd_cons =
6642 ecore_chain_consume(&rxq->rx_bd_ring);
6643 struct eth_rx_bd *rx_bd_prod =
6644 ecore_chain_produce(&rxq->rx_bd_ring);
6645 struct sw_rx_data *sw_rx_data_cons =
6646 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6647 struct sw_rx_data *sw_rx_data_prod =
6648 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6649
6650 sw_rx_data_prod->data = sw_rx_data_cons->data;
6651 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6652
6653 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6654 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6655
6656 return;
6657 }
6658
6659 static void
6660 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6661 {
6662
6663 uint16_t bd_prod;
6664 uint16_t cqe_prod;
6665 union {
6666 struct eth_rx_prod_data rx_prod_data;
6667 uint32_t data32;
6668 } rx_prods;
6669
6670 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6671 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6672
6673 /* Update producers */
6674 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6675 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6676
6677 /* Make sure that the BD and SGE data is updated before updating the
6678 * producers since FW might read the BD/SGE right after the producer
6679 * is updated.
6680 */
6681 wmb();
6682
6683 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6684 sizeof(rx_prods), &rx_prods.data32);
6685
6686 /* mmiowb is needed to synchronize doorbell writes from more than one
6687 * processor. It guarantees that the write arrives to the device before
6688 * the napi lock is released and another qlnx_poll is called (possibly
6689 * on another CPU). Without this barrier, the next doorbell can bypass
6690 * this doorbell. This is applicable to IA64/Altix systems.
6691 */
6692 wmb();
6693
6694 return;
6695 }
6696
6697 static uint32_t qlnx_hash_key[] = {
6698 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6699 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6700 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6701 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6702 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6703 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6704 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6705 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6706 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6707 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6708
6709 static int
6710 qlnx_start_queues(qlnx_host_t *ha)
6711 {
6712 int rc, tc, i, vport_id = 0,
6713 drop_ttl0_flg = 1, vlan_removal_en = 1,
6714 tx_switching = 0, hw_lro_enable = 0;
6715 struct ecore_dev *cdev = &ha->cdev;
6716 struct ecore_rss_params *rss_params = &ha->rss_params;
6717 struct qlnx_update_vport_params vport_update_params;
6718 struct ifnet *ifp;
6719 struct ecore_hwfn *p_hwfn;
6720 struct ecore_sge_tpa_params tpa_params;
6721 struct ecore_queue_start_common_params qparams;
6722 struct qlnx_fastpath *fp;
6723
6724 ifp = ha->ifp;
6725
6726 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6727
6728 if (!ha->num_rss) {
6729 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6730 " are no Rx queues\n");
6731 return -EINVAL;
6732 }
6733
6734 #ifndef QLNX_SOFT_LRO
6735 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6736 #endif /* #ifndef QLNX_SOFT_LRO */
6737
6738 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6739 vlan_removal_en, tx_switching, hw_lro_enable);
6740
6741 if (rc) {
6742 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6743 return rc;
6744 }
6745
6746 QL_DPRINT2(ha, "Start vport ramrod passed, "
6747 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6748 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6749
6750 for_each_rss(i) {
6751 struct ecore_rxq_start_ret_params rx_ret_params;
6752 struct ecore_txq_start_ret_params tx_ret_params;
6753
6754 fp = &ha->fp_array[i];
6755 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6756
6757 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6758 bzero(&rx_ret_params,
6759 sizeof (struct ecore_rxq_start_ret_params));
6760
6761 qparams.queue_id = i ;
6762 qparams.vport_id = vport_id;
6763 qparams.stats_id = vport_id;
6764 qparams.p_sb = fp->sb_info;
6765 qparams.sb_idx = RX_PI;
6766
6767
6768 rc = ecore_eth_rx_queue_start(p_hwfn,
6769 p_hwfn->hw_info.opaque_fid,
6770 &qparams,
6771 fp->rxq->rx_buf_size, /* bd_max_bytes */
6772 /* bd_chain_phys_addr */
6773 fp->rxq->rx_bd_ring.p_phys_addr,
6774 /* cqe_pbl_addr */
6775 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6776 /* cqe_pbl_size */
6777 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6778 &rx_ret_params);
6779
6780 if (rc) {
6781 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6782 return rc;
6783 }
6784
6785 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6786 fp->rxq->handle = rx_ret_params.p_handle;
6787 fp->rxq->hw_cons_ptr =
6788 &fp->sb_info->sb_virt->pi_array[RX_PI];
6789
6790 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6791
6792 for (tc = 0; tc < ha->num_tc; tc++) {
6793 struct qlnx_tx_queue *txq = fp->txq[tc];
6794
6795 bzero(&qparams,
6796 sizeof(struct ecore_queue_start_common_params));
6797 bzero(&tx_ret_params,
6798 sizeof (struct ecore_txq_start_ret_params));
6799
6800 qparams.queue_id = txq->index / cdev->num_hwfns ;
6801 qparams.vport_id = vport_id;
6802 qparams.stats_id = vport_id;
6803 qparams.p_sb = fp->sb_info;
6804 qparams.sb_idx = TX_PI(tc);
6805
6806 rc = ecore_eth_tx_queue_start(p_hwfn,
6807 p_hwfn->hw_info.opaque_fid,
6808 &qparams, tc,
6809 /* bd_chain_phys_addr */
6810 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6811 ecore_chain_get_page_cnt(&txq->tx_pbl),
6812 &tx_ret_params);
6813
6814 if (rc) {
6815 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6816 txq->index, rc);
6817 return rc;
6818 }
6819
6820 txq->doorbell_addr = tx_ret_params.p_doorbell;
6821 txq->handle = tx_ret_params.p_handle;
6822
6823 txq->hw_cons_ptr =
6824 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6825 SET_FIELD(txq->tx_db.data.params,
6826 ETH_DB_DATA_DEST, DB_DEST_XCM);
6827 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6828 DB_AGG_CMD_SET);
6829 SET_FIELD(txq->tx_db.data.params,
6830 ETH_DB_DATA_AGG_VAL_SEL,
6831 DQ_XCM_ETH_TX_BD_PROD_CMD);
6832
6833 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6834 }
6835 }
6836
6837 /* Fill struct with RSS params */
6838 if (ha->num_rss > 1) {
6839 rss_params->update_rss_config = 1;
6840 rss_params->rss_enable = 1;
6841 rss_params->update_rss_capabilities = 1;
6842 rss_params->update_rss_ind_table = 1;
6843 rss_params->update_rss_key = 1;
6844 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6845 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6846 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6847
6848 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6849 fp = &ha->fp_array[(i % ha->num_rss)];
6850 rss_params->rss_ind_table[i] = fp->rxq->handle;
6851 }
6852
6853 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6854 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6855
6856 } else {
6857 memset(rss_params, 0, sizeof(*rss_params));
6858 }
6859
6860 /* Prepare and send the vport enable */
6861 memset(&vport_update_params, 0, sizeof(vport_update_params));
6862 vport_update_params.vport_id = vport_id;
6863 vport_update_params.update_vport_active_tx_flg = 1;
6864 vport_update_params.vport_active_tx_flg = 1;
6865 vport_update_params.update_vport_active_rx_flg = 1;
6866 vport_update_params.vport_active_rx_flg = 1;
6867 vport_update_params.rss_params = rss_params;
6868 vport_update_params.update_inner_vlan_removal_flg = 1;
6869 vport_update_params.inner_vlan_removal_flg = 1;
6870
6871 if (hw_lro_enable) {
6872 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6873
6874 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6875
6876 tpa_params.update_tpa_en_flg = 1;
6877 tpa_params.tpa_ipv4_en_flg = 1;
6878 tpa_params.tpa_ipv6_en_flg = 1;
6879
6880 tpa_params.update_tpa_param_flg = 1;
6881 tpa_params.tpa_pkt_split_flg = 0;
6882 tpa_params.tpa_hdr_data_split_flg = 0;
6883 tpa_params.tpa_gro_consistent_flg = 0;
6884 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6885 tpa_params.tpa_max_size = (uint16_t)(-1);
6886 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6887 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6888
6889 vport_update_params.sge_tpa_params = &tpa_params;
6890 }
6891
6892 rc = qlnx_update_vport(cdev, &vport_update_params);
6893 if (rc) {
6894 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6895 return rc;
6896 }
6897
6898 return 0;
6899 }
6900
6901 static int
6902 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6903 struct qlnx_tx_queue *txq)
6904 {
6905 uint16_t hw_bd_cons;
6906 uint16_t ecore_cons_idx;
6907
6908 QL_DPRINT2(ha, "enter\n");
6909
6910 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6911
6912 while (hw_bd_cons !=
6913 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6914 mtx_lock(&fp->tx_mtx);
6915
6916 (void)qlnx_tx_int(ha, fp, txq);
6917
6918 mtx_unlock(&fp->tx_mtx);
6919
6920 qlnx_mdelay(__func__, 2);
6921
6922 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6923 }
6924
6925 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6926
6927 return 0;
6928 }
6929
6930 static int
6931 qlnx_stop_queues(qlnx_host_t *ha)
6932 {
6933 struct qlnx_update_vport_params vport_update_params;
6934 struct ecore_dev *cdev;
6935 struct qlnx_fastpath *fp;
6936 int rc, tc, i;
6937
6938 cdev = &ha->cdev;
6939
6940 /* Disable the vport */
6941
6942 memset(&vport_update_params, 0, sizeof(vport_update_params));
6943
6944 vport_update_params.vport_id = 0;
6945 vport_update_params.update_vport_active_tx_flg = 1;
6946 vport_update_params.vport_active_tx_flg = 0;
6947 vport_update_params.update_vport_active_rx_flg = 1;
6948 vport_update_params.vport_active_rx_flg = 0;
6949 vport_update_params.rss_params = &ha->rss_params;
6950 vport_update_params.rss_params->update_rss_config = 0;
6951 vport_update_params.rss_params->rss_enable = 0;
6952 vport_update_params.update_inner_vlan_removal_flg = 0;
6953 vport_update_params.inner_vlan_removal_flg = 0;
6954
6955 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6956
6957 rc = qlnx_update_vport(cdev, &vport_update_params);
6958 if (rc) {
6959 QL_DPRINT1(ha, "Failed to update vport\n");
6960 return rc;
6961 }
6962
6963 /* Flush Tx queues. If needed, request drain from MCP */
6964 for_each_rss(i) {
6965 fp = &ha->fp_array[i];
6966
6967 for (tc = 0; tc < ha->num_tc; tc++) {
6968 struct qlnx_tx_queue *txq = fp->txq[tc];
6969
6970 rc = qlnx_drain_txq(ha, fp, txq);
6971 if (rc)
6972 return rc;
6973 }
6974 }
6975
6976 /* Stop all Queues in reverse order*/
6977 for (i = ha->num_rss - 1; i >= 0; i--) {
6978 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6979
6980 fp = &ha->fp_array[i];
6981
6982 /* Stop the Tx Queue(s)*/
6983 for (tc = 0; tc < ha->num_tc; tc++) {
6984 int tx_queue_id __unused;
6985
6986 tx_queue_id = tc * ha->num_rss + i;
6987 rc = ecore_eth_tx_queue_stop(p_hwfn,
6988 fp->txq[tc]->handle);
6989
6990 if (rc) {
6991 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6992 tx_queue_id);
6993 return rc;
6994 }
6995 }
6996
6997 /* Stop the Rx Queue*/
6998 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6999 false);
7000 if (rc) {
7001 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
7002 return rc;
7003 }
7004 }
7005
7006 /* Stop the vport */
7007 for_each_hwfn(cdev, i) {
7008 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
7009
7010 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
7011
7012 if (rc) {
7013 QL_DPRINT1(ha, "Failed to stop VPORT\n");
7014 return rc;
7015 }
7016 }
7017
7018 return rc;
7019 }
7020
7021 static int
7022 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
7023 enum ecore_filter_opcode opcode,
7024 unsigned char mac[ETH_ALEN])
7025 {
7026 struct ecore_filter_ucast ucast;
7027 struct ecore_dev *cdev;
7028 int rc;
7029
7030 cdev = &ha->cdev;
7031
7032 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7033
7034 ucast.opcode = opcode;
7035 ucast.type = ECORE_FILTER_MAC;
7036 ucast.is_rx_filter = 1;
7037 ucast.vport_to_add_to = 0;
7038 memcpy(&ucast.mac[0], mac, ETH_ALEN);
7039
7040 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7041
7042 return (rc);
7043 }
7044
7045 static int
7046 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
7047 {
7048 struct ecore_filter_ucast ucast;
7049 struct ecore_dev *cdev;
7050 int rc;
7051
7052 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7053
7054 ucast.opcode = ECORE_FILTER_REPLACE;
7055 ucast.type = ECORE_FILTER_MAC;
7056 ucast.is_rx_filter = 1;
7057
7058 cdev = &ha->cdev;
7059
7060 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7061
7062 return (rc);
7063 }
7064
7065 static int
7066 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
7067 {
7068 struct ecore_filter_mcast *mcast;
7069 struct ecore_dev *cdev;
7070 int rc, i;
7071
7072 cdev = &ha->cdev;
7073
7074 mcast = &ha->ecore_mcast;
7075 bzero(mcast, sizeof(struct ecore_filter_mcast));
7076
7077 mcast->opcode = ECORE_FILTER_REMOVE;
7078
7079 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
7080 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
7081 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
7082 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
7083 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7084 mcast->num_mc_addrs++;
7085 }
7086 }
7087 mcast = &ha->ecore_mcast;
7088
7089 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7090
7091 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7092 ha->nmcast = 0;
7093
7094 return (rc);
7095 }
7096
7097 static int
7098 qlnx_clean_filters(qlnx_host_t *ha)
7099 {
7100 int rc = 0;
7101
7102 /* Remove all unicast macs */
7103 rc = qlnx_remove_all_ucast_mac(ha);
7104 if (rc)
7105 return rc;
7106
7107 /* Remove all multicast macs */
7108 rc = qlnx_remove_all_mcast_mac(ha);
7109 if (rc)
7110 return rc;
7111
7112 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7113
7114 return (rc);
7115 }
7116
7117 static int
7118 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7119 {
7120 struct ecore_filter_accept_flags accept;
7121 int rc = 0;
7122 struct ecore_dev *cdev;
7123
7124 cdev = &ha->cdev;
7125
7126 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7127
7128 accept.update_rx_mode_config = 1;
7129 accept.rx_accept_filter = filter;
7130
7131 accept.update_tx_mode_config = 1;
7132 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7133 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7134
7135 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7136 ECORE_SPQ_MODE_CB, NULL);
7137
7138 return (rc);
7139 }
7140
7141 static int
7142 qlnx_set_rx_mode(qlnx_host_t *ha)
7143 {
7144 int rc = 0;
7145 uint8_t filter;
7146
7147 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7148 if (rc)
7149 return rc;
7150
7151 rc = qlnx_remove_all_mcast_mac(ha);
7152 if (rc)
7153 return rc;
7154
7155 filter = ECORE_ACCEPT_UCAST_MATCHED |
7156 ECORE_ACCEPT_MCAST_MATCHED |
7157 ECORE_ACCEPT_BCAST;
7158
7159 if (qlnx_vf_device(ha) == 0) {
7160 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7161 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7162 }
7163 ha->filter = filter;
7164
7165 rc = qlnx_set_rx_accept_filter(ha, filter);
7166
7167 return (rc);
7168 }
7169
7170 static int
7171 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7172 {
7173 int i, rc = 0;
7174 struct ecore_dev *cdev;
7175 struct ecore_hwfn *hwfn;
7176 struct ecore_ptt *ptt;
7177
7178 if (qlnx_vf_device(ha) == 0)
7179 return (0);
7180
7181 cdev = &ha->cdev;
7182
7183 for_each_hwfn(cdev, i) {
7184 hwfn = &cdev->hwfns[i];
7185
7186 ptt = ecore_ptt_acquire(hwfn);
7187 if (!ptt)
7188 return -EBUSY;
7189
7190 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7191
7192 ecore_ptt_release(hwfn, ptt);
7193
7194 if (rc)
7195 return rc;
7196 }
7197 return (rc);
7198 }
7199
7200 #if __FreeBSD_version >= 1100000
7201 static uint64_t
7202 qlnx_get_counter(if_t ifp, ift_counter cnt)
7203 {
7204 qlnx_host_t *ha;
7205 uint64_t count;
7206
7207 ha = (qlnx_host_t *)if_getsoftc(ifp);
7208
7209 switch (cnt) {
7210 case IFCOUNTER_IPACKETS:
7211 count = ha->hw_stats.common.rx_ucast_pkts +
7212 ha->hw_stats.common.rx_mcast_pkts +
7213 ha->hw_stats.common.rx_bcast_pkts;
7214 break;
7215
7216 case IFCOUNTER_IERRORS:
7217 count = ha->hw_stats.common.rx_crc_errors +
7218 ha->hw_stats.common.rx_align_errors +
7219 ha->hw_stats.common.rx_oversize_packets +
7220 ha->hw_stats.common.rx_undersize_packets;
7221 break;
7222
7223 case IFCOUNTER_OPACKETS:
7224 count = ha->hw_stats.common.tx_ucast_pkts +
7225 ha->hw_stats.common.tx_mcast_pkts +
7226 ha->hw_stats.common.tx_bcast_pkts;
7227 break;
7228
7229 case IFCOUNTER_OERRORS:
7230 count = ha->hw_stats.common.tx_err_drop_pkts;
7231 break;
7232
7233 case IFCOUNTER_COLLISIONS:
7234 return (0);
7235
7236 case IFCOUNTER_IBYTES:
7237 count = ha->hw_stats.common.rx_ucast_bytes +
7238 ha->hw_stats.common.rx_mcast_bytes +
7239 ha->hw_stats.common.rx_bcast_bytes;
7240 break;
7241
7242 case IFCOUNTER_OBYTES:
7243 count = ha->hw_stats.common.tx_ucast_bytes +
7244 ha->hw_stats.common.tx_mcast_bytes +
7245 ha->hw_stats.common.tx_bcast_bytes;
7246 break;
7247
7248 case IFCOUNTER_IMCASTS:
7249 count = ha->hw_stats.common.rx_mcast_bytes;
7250 break;
7251
7252 case IFCOUNTER_OMCASTS:
7253 count = ha->hw_stats.common.tx_mcast_bytes;
7254 break;
7255
7256 case IFCOUNTER_IQDROPS:
7257 case IFCOUNTER_OQDROPS:
7258 case IFCOUNTER_NOPROTO:
7259
7260 default:
7261 return (if_get_counter_default(ifp, cnt));
7262 }
7263 return (count);
7264 }
7265 #endif
7266
7267 static void
7268 qlnx_timer(void *arg)
7269 {
7270 qlnx_host_t *ha;
7271
7272 ha = (qlnx_host_t *)arg;
7273
7274 if (ha->error_recovery) {
7275 ha->error_recovery = 0;
7276 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7277 return;
7278 }
7279
7280 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7281
7282 if (ha->storm_stats_gather)
7283 qlnx_sample_storm_stats(ha);
7284
7285 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7286
7287 return;
7288 }
7289
7290 static int
7291 qlnx_load(qlnx_host_t *ha)
7292 {
7293 int i;
7294 int rc = 0;
7295 device_t dev;
7296
7297 dev = ha->pci_dev;
7298
7299 QL_DPRINT2(ha, "enter\n");
7300
7301 rc = qlnx_alloc_mem_arrays(ha);
7302 if (rc)
7303 goto qlnx_load_exit0;
7304
7305 qlnx_init_fp(ha);
7306
7307 rc = qlnx_alloc_mem_load(ha);
7308 if (rc)
7309 goto qlnx_load_exit1;
7310
7311 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7312 ha->num_rss, ha->num_tc);
7313
7314 for (i = 0; i < ha->num_rss; i++) {
7315 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7316 (INTR_TYPE_NET | INTR_MPSAFE),
7317 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7318 &ha->irq_vec[i].handle))) {
7319 QL_DPRINT1(ha, "could not setup interrupt\n");
7320 goto qlnx_load_exit2;
7321 }
7322
7323 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7324 irq %p handle %p\n", i,
7325 ha->irq_vec[i].irq_rid,
7326 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7327
7328 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7329 }
7330
7331 rc = qlnx_start_queues(ha);
7332 if (rc)
7333 goto qlnx_load_exit2;
7334
7335 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7336
7337 /* Add primary mac and set Rx filters */
7338 rc = qlnx_set_rx_mode(ha);
7339 if (rc)
7340 goto qlnx_load_exit2;
7341
7342 /* Ask for link-up using current configuration */
7343 qlnx_set_link(ha, true);
7344
7345 if (qlnx_vf_device(ha) == 0)
7346 qlnx_link_update(&ha->cdev.hwfns[0]);
7347
7348 ha->state = QLNX_STATE_OPEN;
7349
7350 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7351
7352 if (ha->flags.callout_init)
7353 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7354
7355 goto qlnx_load_exit0;
7356
7357 qlnx_load_exit2:
7358 qlnx_free_mem_load(ha);
7359
7360 qlnx_load_exit1:
7361 ha->num_rss = 0;
7362
7363 qlnx_load_exit0:
7364 QL_DPRINT2(ha, "exit [%d]\n", rc);
7365 return rc;
7366 }
7367
7368 static void
7369 qlnx_drain_soft_lro(qlnx_host_t *ha)
7370 {
7371 #ifdef QLNX_SOFT_LRO
7372
7373 struct ifnet *ifp;
7374 int i;
7375
7376 ifp = ha->ifp;
7377
7378 if (ifp->if_capenable & IFCAP_LRO) {
7379 for (i = 0; i < ha->num_rss; i++) {
7380 struct qlnx_fastpath *fp = &ha->fp_array[i];
7381 struct lro_ctrl *lro;
7382
7383 lro = &fp->rxq->lro;
7384
7385 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7386
7387 tcp_lro_flush_all(lro);
7388
7389 #else
7390 struct lro_entry *queued;
7391
7392 while ((!SLIST_EMPTY(&lro->lro_active))){
7393 queued = SLIST_FIRST(&lro->lro_active);
7394 SLIST_REMOVE_HEAD(&lro->lro_active, next);
7395 tcp_lro_flush(lro, queued);
7396 }
7397
7398 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7399 }
7400 }
7401
7402 #endif /* #ifdef QLNX_SOFT_LRO */
7403
7404 return;
7405 }
7406
7407 static void
7408 qlnx_unload(qlnx_host_t *ha)
7409 {
7410 struct ecore_dev *cdev;
7411 device_t dev;
7412 int i;
7413
7414 cdev = &ha->cdev;
7415 dev = ha->pci_dev;
7416
7417 QL_DPRINT2(ha, "enter\n");
7418 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7419
7420 if (ha->state == QLNX_STATE_OPEN) {
7421 qlnx_set_link(ha, false);
7422 qlnx_clean_filters(ha);
7423 qlnx_stop_queues(ha);
7424 ecore_hw_stop_fastpath(cdev);
7425
7426 for (i = 0; i < ha->num_rss; i++) {
7427 if (ha->irq_vec[i].handle) {
7428 (void)bus_teardown_intr(dev,
7429 ha->irq_vec[i].irq,
7430 ha->irq_vec[i].handle);
7431 ha->irq_vec[i].handle = NULL;
7432 }
7433 }
7434
7435 qlnx_drain_fp_taskqueues(ha);
7436 qlnx_drain_soft_lro(ha);
7437 qlnx_free_mem_load(ha);
7438 }
7439
7440 if (ha->flags.callout_init)
7441 callout_drain(&ha->qlnx_callout);
7442
7443 qlnx_mdelay(__func__, 1000);
7444
7445 ha->state = QLNX_STATE_CLOSED;
7446
7447 QL_DPRINT2(ha, "exit\n");
7448 return;
7449 }
7450
7451 static int
7452 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7453 {
7454 int rval = -1;
7455 struct ecore_hwfn *p_hwfn;
7456 struct ecore_ptt *p_ptt;
7457
7458 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7459
7460 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7461 p_ptt = ecore_ptt_acquire(p_hwfn);
7462
7463 if (!p_ptt) {
7464 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7465 return (rval);
7466 }
7467
7468 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7469
7470 if (rval == DBG_STATUS_OK)
7471 rval = 0;
7472 else {
7473 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7474 "[0x%x]\n", rval);
7475 }
7476
7477 ecore_ptt_release(p_hwfn, p_ptt);
7478
7479 return (rval);
7480 }
7481
7482 static int
7483 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7484 {
7485 int rval = -1;
7486 struct ecore_hwfn *p_hwfn;
7487 struct ecore_ptt *p_ptt;
7488
7489 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7490
7491 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7492 p_ptt = ecore_ptt_acquire(p_hwfn);
7493
7494 if (!p_ptt) {
7495 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7496 return (rval);
7497 }
7498
7499 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7500
7501 if (rval == DBG_STATUS_OK)
7502 rval = 0;
7503 else {
7504 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7505 " [0x%x]\n", rval);
7506 }
7507
7508 ecore_ptt_release(p_hwfn, p_ptt);
7509
7510 return (rval);
7511 }
7512
7513 static void
7514 qlnx_sample_storm_stats(qlnx_host_t *ha)
7515 {
7516 int i, index;
7517 struct ecore_dev *cdev;
7518 qlnx_storm_stats_t *s_stats;
7519 uint32_t reg;
7520 struct ecore_ptt *p_ptt;
7521 struct ecore_hwfn *hwfn;
7522
7523 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7524 ha->storm_stats_gather = 0;
7525 return;
7526 }
7527
7528 cdev = &ha->cdev;
7529
7530 for_each_hwfn(cdev, i) {
7531 hwfn = &cdev->hwfns[i];
7532
7533 p_ptt = ecore_ptt_acquire(hwfn);
7534 if (!p_ptt)
7535 return;
7536
7537 index = ha->storm_stats_index +
7538 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7539
7540 s_stats = &ha->storm_stats[index];
7541
7542 /* XSTORM */
7543 reg = XSEM_REG_FAST_MEMORY +
7544 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7545 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7546
7547 reg = XSEM_REG_FAST_MEMORY +
7548 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7549 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7550
7551 reg = XSEM_REG_FAST_MEMORY +
7552 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7553 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7554
7555 reg = XSEM_REG_FAST_MEMORY +
7556 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7557 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7558
7559 /* YSTORM */
7560 reg = YSEM_REG_FAST_MEMORY +
7561 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7562 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7563
7564 reg = YSEM_REG_FAST_MEMORY +
7565 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7566 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7567
7568 reg = YSEM_REG_FAST_MEMORY +
7569 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7570 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7571
7572 reg = YSEM_REG_FAST_MEMORY +
7573 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7574 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7575
7576 /* PSTORM */
7577 reg = PSEM_REG_FAST_MEMORY +
7578 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7579 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7580
7581 reg = PSEM_REG_FAST_MEMORY +
7582 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7583 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7584
7585 reg = PSEM_REG_FAST_MEMORY +
7586 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7587 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7588
7589 reg = PSEM_REG_FAST_MEMORY +
7590 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7591 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7592
7593 /* TSTORM */
7594 reg = TSEM_REG_FAST_MEMORY +
7595 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7596 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7597
7598 reg = TSEM_REG_FAST_MEMORY +
7599 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7600 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7601
7602 reg = TSEM_REG_FAST_MEMORY +
7603 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7604 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7605
7606 reg = TSEM_REG_FAST_MEMORY +
7607 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7608 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7609
7610 /* MSTORM */
7611 reg = MSEM_REG_FAST_MEMORY +
7612 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7613 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7614
7615 reg = MSEM_REG_FAST_MEMORY +
7616 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7617 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7618
7619 reg = MSEM_REG_FAST_MEMORY +
7620 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7621 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7622
7623 reg = MSEM_REG_FAST_MEMORY +
7624 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7625 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7626
7627 /* USTORM */
7628 reg = USEM_REG_FAST_MEMORY +
7629 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7630 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7631
7632 reg = USEM_REG_FAST_MEMORY +
7633 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7634 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7635
7636 reg = USEM_REG_FAST_MEMORY +
7637 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7638 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7639
7640 reg = USEM_REG_FAST_MEMORY +
7641 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7642 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7643
7644 ecore_ptt_release(hwfn, p_ptt);
7645 }
7646
7647 ha->storm_stats_index++;
7648
7649 return;
7650 }
7651
7652 /*
7653 * Name: qlnx_dump_buf8
7654 * Function: dumps a buffer as bytes
7655 */
7656 static void
7657 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7658 {
7659 device_t dev;
7660 uint32_t i = 0;
7661 uint8_t *buf;
7662
7663 dev = ha->pci_dev;
7664 buf = dbuf;
7665
7666 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7667
7668 while (len >= 16) {
7669 device_printf(dev,"0x%08x:"
7670 " %02x %02x %02x %02x %02x %02x %02x %02x"
7671 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7672 buf[0], buf[1], buf[2], buf[3],
7673 buf[4], buf[5], buf[6], buf[7],
7674 buf[8], buf[9], buf[10], buf[11],
7675 buf[12], buf[13], buf[14], buf[15]);
7676 i += 16;
7677 len -= 16;
7678 buf += 16;
7679 }
7680 switch (len) {
7681 case 1:
7682 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7683 break;
7684 case 2:
7685 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7686 break;
7687 case 3:
7688 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7689 i, buf[0], buf[1], buf[2]);
7690 break;
7691 case 4:
7692 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7693 buf[0], buf[1], buf[2], buf[3]);
7694 break;
7695 case 5:
7696 device_printf(dev,"0x%08x:"
7697 " %02x %02x %02x %02x %02x\n", i,
7698 buf[0], buf[1], buf[2], buf[3], buf[4]);
7699 break;
7700 case 6:
7701 device_printf(dev,"0x%08x:"
7702 " %02x %02x %02x %02x %02x %02x\n", i,
7703 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7704 break;
7705 case 7:
7706 device_printf(dev,"0x%08x:"
7707 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7708 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7709 break;
7710 case 8:
7711 device_printf(dev,"0x%08x:"
7712 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7713 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7714 buf[7]);
7715 break;
7716 case 9:
7717 device_printf(dev,"0x%08x:"
7718 " %02x %02x %02x %02x %02x %02x %02x %02x"
7719 " %02x\n", i,
7720 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7721 buf[7], buf[8]);
7722 break;
7723 case 10:
7724 device_printf(dev,"0x%08x:"
7725 " %02x %02x %02x %02x %02x %02x %02x %02x"
7726 " %02x %02x\n", i,
7727 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7728 buf[7], buf[8], buf[9]);
7729 break;
7730 case 11:
7731 device_printf(dev,"0x%08x:"
7732 " %02x %02x %02x %02x %02x %02x %02x %02x"
7733 " %02x %02x %02x\n", i,
7734 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7735 buf[7], buf[8], buf[9], buf[10]);
7736 break;
7737 case 12:
7738 device_printf(dev,"0x%08x:"
7739 " %02x %02x %02x %02x %02x %02x %02x %02x"
7740 " %02x %02x %02x %02x\n", i,
7741 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7742 buf[7], buf[8], buf[9], buf[10], buf[11]);
7743 break;
7744 case 13:
7745 device_printf(dev,"0x%08x:"
7746 " %02x %02x %02x %02x %02x %02x %02x %02x"
7747 " %02x %02x %02x %02x %02x\n", i,
7748 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7749 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7750 break;
7751 case 14:
7752 device_printf(dev,"0x%08x:"
7753 " %02x %02x %02x %02x %02x %02x %02x %02x"
7754 " %02x %02x %02x %02x %02x %02x\n", i,
7755 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7756 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7757 buf[13]);
7758 break;
7759 case 15:
7760 device_printf(dev,"0x%08x:"
7761 " %02x %02x %02x %02x %02x %02x %02x %02x"
7762 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7763 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7764 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7765 buf[13], buf[14]);
7766 break;
7767 default:
7768 break;
7769 }
7770
7771 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7772
7773 return;
7774 }
7775
7776 #ifdef CONFIG_ECORE_SRIOV
7777
7778 static void
7779 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7780 {
7781 struct ecore_public_vf_info *vf_info;
7782
7783 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7784
7785 if (!vf_info)
7786 return;
7787
7788 /* Clear the VF mac */
7789 memset(vf_info->forced_mac, 0, ETH_ALEN);
7790
7791 vf_info->forced_vlan = 0;
7792
7793 return;
7794 }
7795
7796 void
7797 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7798 {
7799 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7800 return;
7801 }
7802
7803 static int
7804 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7805 struct ecore_filter_ucast *params)
7806 {
7807 struct ecore_public_vf_info *vf;
7808
7809 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7810 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7811 "VF[%d] vport not initialized\n", vfid);
7812 return ECORE_INVAL;
7813 }
7814
7815 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7816 if (!vf)
7817 return -EINVAL;
7818
7819 /* No real decision to make; Store the configured MAC */
7820 if (params->type == ECORE_FILTER_MAC ||
7821 params->type == ECORE_FILTER_MAC_VLAN)
7822 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7823
7824 return 0;
7825 }
7826
7827 int
7828 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7829 {
7830 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7831 }
7832
7833 static int
7834 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7835 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7836 {
7837 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7838 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7839 "VF[%d] vport not initialized\n", vfid);
7840 return ECORE_INVAL;
7841 }
7842
7843 /* Untrusted VFs can't even be trusted to know that fact.
7844 * Simply indicate everything is configured fine, and trace
7845 * configuration 'behind their back'.
7846 */
7847 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7848 return 0;
7849
7850 return 0;
7851
7852 }
7853 int
7854 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7855 {
7856 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7857 }
7858
7859 static int
7860 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7861 {
7862 int i;
7863 struct ecore_dev *cdev;
7864
7865 cdev = p_hwfn->p_dev;
7866
7867 for (i = 0; i < cdev->num_hwfns; i++) {
7868 if (&cdev->hwfns[i] == p_hwfn)
7869 break;
7870 }
7871
7872 if (i >= cdev->num_hwfns)
7873 return (-1);
7874
7875 return (i);
7876 }
7877
7878 static int
7879 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7880 {
7881 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7882 int i;
7883
7884 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7885 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7886
7887 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7888 return (-1);
7889
7890 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7891 atomic_testandset_32(&ha->sriov_task[i].flags,
7892 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7893
7894 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7895 &ha->sriov_task[i].pf_task);
7896 }
7897
7898 return (ECORE_SUCCESS);
7899 }
7900
7901 int
7902 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7903 {
7904 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7905 }
7906
7907 static void
7908 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7909 {
7910 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7911 int i;
7912
7913 if (!ha->sriov_initialized)
7914 return;
7915
7916 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7917 ha, p_hwfn->p_dev, p_hwfn);
7918
7919 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7920 return;
7921
7922 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7923 atomic_testandset_32(&ha->sriov_task[i].flags,
7924 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7925
7926 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7927 &ha->sriov_task[i].pf_task);
7928 }
7929
7930 return;
7931 }
7932
7933 void
7934 qlnx_vf_flr_update(void *p_hwfn)
7935 {
7936 __qlnx_vf_flr_update(p_hwfn);
7937
7938 return;
7939 }
7940
7941 #ifndef QLNX_VF
7942
7943 static void
7944 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7945 {
7946 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7947 int i;
7948
7949 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7950 ha, p_hwfn->p_dev, p_hwfn);
7951
7952 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7953 return;
7954
7955 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7956 ha, p_hwfn->p_dev, p_hwfn, i);
7957
7958 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7959 atomic_testandset_32(&ha->sriov_task[i].flags,
7960 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7961
7962 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7963 &ha->sriov_task[i].pf_task);
7964 }
7965 }
7966
7967 static void
7968 qlnx_initialize_sriov(qlnx_host_t *ha)
7969 {
7970 device_t dev;
7971 nvlist_t *pf_schema, *vf_schema;
7972 int iov_error;
7973
7974 dev = ha->pci_dev;
7975
7976 pf_schema = pci_iov_schema_alloc_node();
7977 vf_schema = pci_iov_schema_alloc_node();
7978
7979 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7980 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7981 IOV_SCHEMA_HASDEFAULT, FALSE);
7982 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7983 IOV_SCHEMA_HASDEFAULT, FALSE);
7984 pci_iov_schema_add_uint16(vf_schema, "num-queues",
7985 IOV_SCHEMA_HASDEFAULT, 1);
7986
7987 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7988
7989 if (iov_error != 0) {
7990 ha->sriov_initialized = 0;
7991 } else {
7992 device_printf(dev, "SRIOV initialized\n");
7993 ha->sriov_initialized = 1;
7994 }
7995
7996 return;
7997 }
7998
7999 static void
8000 qlnx_sriov_disable(qlnx_host_t *ha)
8001 {
8002 struct ecore_dev *cdev;
8003 int i, j;
8004
8005 cdev = &ha->cdev;
8006
8007 ecore_iov_set_vfs_to_disable(cdev, true);
8008
8009 for_each_hwfn(cdev, i) {
8010 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
8011 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8012
8013 if (!ptt) {
8014 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8015 return;
8016 }
8017 /* Clean WFQ db and configure equal weight for all vports */
8018 ecore_clean_wfq_db(hwfn, ptt);
8019
8020 ecore_for_each_vf(hwfn, j) {
8021 int k = 0;
8022
8023 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
8024 continue;
8025
8026 if (ecore_iov_is_vf_started(hwfn, j)) {
8027 /* Wait until VF is disabled before releasing */
8028
8029 for (k = 0; k < 100; k++) {
8030 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
8031 qlnx_mdelay(__func__, 10);
8032 } else
8033 break;
8034 }
8035 }
8036
8037 if (k < 100)
8038 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
8039 ptt, j);
8040 else {
8041 QL_DPRINT1(ha,
8042 "Timeout waiting for VF's FLR to end\n");
8043 }
8044 }
8045 ecore_ptt_release(hwfn, ptt);
8046 }
8047
8048 ecore_iov_set_vfs_to_disable(cdev, false);
8049
8050 return;
8051 }
8052
8053 static void
8054 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
8055 struct ecore_iov_vf_init_params *params)
8056 {
8057 u16 base, i;
8058
8059 /* Since we have an equal resource distribution per-VF, and we assume
8060 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
8061 * sequentially from there.
8062 */
8063 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
8064
8065 params->rel_vf_id = vfid;
8066
8067 for (i = 0; i < params->num_queues; i++) {
8068 params->req_rx_queue[i] = base + i;
8069 params->req_tx_queue[i] = base + i;
8070 }
8071
8072 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
8073 params->vport_id = vfid + 1;
8074 params->rss_eng_id = vfid + 1;
8075
8076 return;
8077 }
8078
8079 static int
8080 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
8081 {
8082 qlnx_host_t *ha;
8083 struct ecore_dev *cdev;
8084 struct ecore_iov_vf_init_params params;
8085 int ret, j, i;
8086 uint32_t max_vfs;
8087
8088 if ((ha = device_get_softc(dev)) == NULL) {
8089 device_printf(dev, "%s: cannot get softc\n", __func__);
8090 return (-1);
8091 }
8092
8093 if (qlnx_create_pf_taskqueues(ha) != 0)
8094 goto qlnx_iov_init_err0;
8095
8096 cdev = &ha->cdev;
8097
8098 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8099
8100 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8101 dev, num_vfs, max_vfs);
8102
8103 if (num_vfs >= max_vfs) {
8104 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8105 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8106 goto qlnx_iov_init_err0;
8107 }
8108
8109 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8110 M_NOWAIT);
8111
8112 if (ha->vf_attr == NULL)
8113 goto qlnx_iov_init_err0;
8114
8115 memset(¶ms, 0, sizeof(params));
8116
8117 /* Initialize HW for VF access */
8118 for_each_hwfn(cdev, j) {
8119 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8120 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8121
8122 /* Make sure not to use more than 16 queues per VF */
8123 params.num_queues = min_t(int,
8124 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8125 16);
8126
8127 if (!ptt) {
8128 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8129 goto qlnx_iov_init_err1;
8130 }
8131
8132 for (i = 0; i < num_vfs; i++) {
8133 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8134 continue;
8135
8136 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
8137
8138 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
8139
8140 if (ret) {
8141 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8142 ecore_ptt_release(hwfn, ptt);
8143 goto qlnx_iov_init_err1;
8144 }
8145 }
8146
8147 ecore_ptt_release(hwfn, ptt);
8148 }
8149
8150 ha->num_vfs = num_vfs;
8151 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8152
8153 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8154
8155 return (0);
8156
8157 qlnx_iov_init_err1:
8158 qlnx_sriov_disable(ha);
8159
8160 qlnx_iov_init_err0:
8161 qlnx_destroy_pf_taskqueues(ha);
8162 ha->num_vfs = 0;
8163
8164 return (-1);
8165 }
8166
8167 static void
8168 qlnx_iov_uninit(device_t dev)
8169 {
8170 qlnx_host_t *ha;
8171
8172 if ((ha = device_get_softc(dev)) == NULL) {
8173 device_printf(dev, "%s: cannot get softc\n", __func__);
8174 return;
8175 }
8176
8177 QL_DPRINT2(ha," dev = %p enter\n", dev);
8178
8179 qlnx_sriov_disable(ha);
8180 qlnx_destroy_pf_taskqueues(ha);
8181
8182 free(ha->vf_attr, M_QLNXBUF);
8183 ha->vf_attr = NULL;
8184
8185 ha->num_vfs = 0;
8186
8187 QL_DPRINT2(ha," dev = %p exit\n", dev);
8188 return;
8189 }
8190
8191 static int
8192 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8193 {
8194 qlnx_host_t *ha;
8195 qlnx_vf_attr_t *vf_attr;
8196 unsigned const char *mac;
8197 size_t size;
8198 struct ecore_hwfn *p_hwfn;
8199
8200 if ((ha = device_get_softc(dev)) == NULL) {
8201 device_printf(dev, "%s: cannot get softc\n", __func__);
8202 return (-1);
8203 }
8204
8205 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8206
8207 if (vfnum > (ha->num_vfs - 1)) {
8208 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8209 vfnum, (ha->num_vfs - 1));
8210 }
8211
8212 vf_attr = &ha->vf_attr[vfnum];
8213
8214 if (nvlist_exists_binary(params, "mac-addr")) {
8215 mac = nvlist_get_binary(params, "mac-addr", &size);
8216 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8217 device_printf(dev,
8218 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8219 __func__, vf_attr->mac_addr[0],
8220 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8221 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8222 vf_attr->mac_addr[5]);
8223 p_hwfn = &ha->cdev.hwfns[0];
8224 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8225 vfnum);
8226 }
8227
8228 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8229 return (0);
8230 }
8231
8232 static void
8233 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8234 {
8235 uint64_t events[ECORE_VF_ARRAY_LENGTH];
8236 struct ecore_ptt *ptt;
8237 int i;
8238
8239 ptt = ecore_ptt_acquire(p_hwfn);
8240 if (!ptt) {
8241 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8242 __qlnx_pf_vf_msg(p_hwfn, 0);
8243 return;
8244 }
8245
8246 ecore_iov_pf_get_pending_events(p_hwfn, events);
8247
8248 QL_DPRINT2(ha, "Event mask of VF events:"
8249 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8250 events[0], events[1], events[2]);
8251
8252 ecore_for_each_vf(p_hwfn, i) {
8253 /* Skip VFs with no pending messages */
8254 if (!(events[i / 64] & (1ULL << (i % 64))))
8255 continue;
8256
8257 QL_DPRINT2(ha,
8258 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8259 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8260
8261 /* Copy VF's message to PF's request buffer for that VF */
8262 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8263 continue;
8264
8265 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8266 }
8267
8268 ecore_ptt_release(p_hwfn, ptt);
8269
8270 return;
8271 }
8272
8273 static void
8274 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8275 {
8276 struct ecore_ptt *ptt;
8277 int ret;
8278
8279 ptt = ecore_ptt_acquire(p_hwfn);
8280
8281 if (!ptt) {
8282 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8283 __qlnx_vf_flr_update(p_hwfn);
8284 return;
8285 }
8286
8287 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8288
8289 if (ret) {
8290 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8291 }
8292
8293 ecore_ptt_release(p_hwfn, ptt);
8294
8295 return;
8296 }
8297
8298 static void
8299 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8300 {
8301 struct ecore_ptt *ptt;
8302 int i;
8303
8304 ptt = ecore_ptt_acquire(p_hwfn);
8305
8306 if (!ptt) {
8307 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8308 qlnx_vf_bulleting_update(p_hwfn);
8309 return;
8310 }
8311
8312 ecore_for_each_vf(p_hwfn, i) {
8313 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8314 p_hwfn, i);
8315 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8316 }
8317
8318 ecore_ptt_release(p_hwfn, ptt);
8319
8320 return;
8321 }
8322
8323 static void
8324 qlnx_pf_taskqueue(void *context, int pending)
8325 {
8326 struct ecore_hwfn *p_hwfn;
8327 qlnx_host_t *ha;
8328 int i;
8329
8330 p_hwfn = context;
8331
8332 if (p_hwfn == NULL)
8333 return;
8334
8335 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8336
8337 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8338 return;
8339
8340 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8341 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8342 qlnx_handle_vf_msg(ha, p_hwfn);
8343
8344 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8345 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8346 qlnx_handle_vf_flr_update(ha, p_hwfn);
8347
8348 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8349 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8350 qlnx_handle_bulletin_update(ha, p_hwfn);
8351
8352 return;
8353 }
8354
8355 static int
8356 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8357 {
8358 int i;
8359 uint8_t tq_name[32];
8360
8361 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8362 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8363
8364 bzero(tq_name, sizeof (tq_name));
8365 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8366
8367 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8368
8369 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8370 taskqueue_thread_enqueue,
8371 &ha->sriov_task[i].pf_taskqueue);
8372
8373 if (ha->sriov_task[i].pf_taskqueue == NULL)
8374 return (-1);
8375
8376 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8377 PI_NET, "%s", tq_name);
8378
8379 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8380 }
8381
8382 return (0);
8383 }
8384
8385 static void
8386 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8387 {
8388 int i;
8389
8390 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8391 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8392 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8393 &ha->sriov_task[i].pf_task);
8394 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8395 ha->sriov_task[i].pf_taskqueue = NULL;
8396 }
8397 }
8398 return;
8399 }
8400
8401 static void
8402 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8403 {
8404 struct ecore_mcp_link_capabilities caps;
8405 struct ecore_mcp_link_params params;
8406 struct ecore_mcp_link_state link;
8407 int i;
8408
8409 if (!p_hwfn->pf_iov_info)
8410 return;
8411
8412 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8413 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8414 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8415
8416 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8417 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8418 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8419
8420 QL_DPRINT2(ha, "called\n");
8421
8422 /* Update bulletin of all future possible VFs with link configuration */
8423 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8424 /* Modify link according to the VF's configured link state */
8425
8426 link.link_up = false;
8427
8428 if (ha->link_up) {
8429 link.link_up = true;
8430 /* Set speed according to maximum supported by HW.
8431 * that is 40G for regular devices and 100G for CMT
8432 * mode devices.
8433 */
8434 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8435 100000 : link.speed;
8436 }
8437 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8438 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8439 }
8440
8441 qlnx_vf_bulleting_update(p_hwfn);
8442
8443 return;
8444 }
8445 #endif /* #ifndef QLNX_VF */
8446 #endif /* #ifdef CONFIG_ECORE_SRIOV */
Cache object: 16a2c0f0b1ec088e091e7acb6519afae
|