1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 *
29 */
30
31 /*
32 * File: qlnx_def.h
33 * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
34 */
35
36 #ifndef _QLNX_DEF_H_
37 #define _QLNX_DEF_H_
38
39 #define VER_SIZE 16
40
41 struct qlnx_ivec {
42 uint32_t rss_idx;
43 void *ha;
44 struct resource *irq;
45 void *handle;
46 int irq_rid;
47 };
48
49 typedef struct qlnx_ivec qlnx_ivec_t;
50
51 //#define QLNX_MAX_RSS 30
52 #define QLNX_MAX_VF_RSS 4
53 #define QLNX_MAX_RSS 36
54 #define QLNX_DEFAULT_RSS 16
55 #define QLNX_MAX_TC 1
56
57 enum QLNX_STATE {
58 QLNX_STATE_CLOSED,
59 QLNX_STATE_OPEN,
60 };
61
62 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
63
64 #define MAX_NUM_TC 8
65 #define MAX_NUM_PRI 8
66
67 #ifndef BITS_PER_BYTE
68 #define BITS_PER_BYTE 8
69 #endif /* #ifndef BITS_PER_BYTE */
70
71 /*
72 * RX ring buffer contains pointer to kmalloc() data only,
73 */
74 struct sw_rx_data {
75 void *data;
76 bus_dmamap_t map;
77 dma_addr_t dma_addr;
78 };
79
80 enum qlnx_agg_state {
81 QLNX_AGG_STATE_NONE = 0,
82 QLNX_AGG_STATE_START = 1,
83 QLNX_AGG_STATE_ERROR = 2
84 };
85
86 struct qlnx_agg_info {
87 /* rx_buf is a data buffer that can be placed /consumed from rx bd
88 * chain. It has two purposes: We will preallocate the data buffer
89 * for each aggregation when we open the interface and will place this
90 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
91 * to be in a state where allocation fails, as we can't reuse the
92 * consumer buffer in the rx-chain since FW may still be writing to it
93 * (since header needs to be modified for TPA.
94 * The second purpose is to keep a pointer to the bd buffer during
95 * aggregation.
96 */
97 struct sw_rx_data rx_buf;
98 enum qlnx_agg_state agg_state;
99 uint16_t placement_offset;
100 struct mbuf *mpf; /* first mbuf in chain */
101 struct mbuf *mpl; /* last mbuf in chain */
102 };
103
104 #define RX_RING_SIZE_POW 13
105 #define RX_RING_SIZE (1 << RX_RING_SIZE_POW)
106
107 #define TX_RING_SIZE_POW 14
108 #define TX_RING_SIZE (1 << TX_RING_SIZE_POW)
109
110 struct qlnx_rx_queue {
111 volatile __le16 *hw_cons_ptr;
112 struct sw_rx_data sw_rx_ring[RX_RING_SIZE];
113 uint16_t sw_rx_cons;
114 uint16_t sw_rx_prod;
115 struct ecore_chain rx_bd_ring;
116 struct ecore_chain rx_comp_ring;
117 void __iomem *hw_rxq_prod_addr;
118 void *handle;
119
120 /* LRO */
121 struct qlnx_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
122
123 uint32_t rx_buf_size;
124
125 uint16_t num_rx_buffers;
126 uint16_t rxq_id;
127
128 #ifdef QLNX_SOFT_LRO
129 struct lro_ctrl lro;
130 #endif
131 };
132
133 union db_prod {
134 struct eth_db_data data;
135 uint32_t raw;
136 };
137
138 struct sw_tx_bd {
139 struct mbuf *mp;
140 bus_dmamap_t map;
141 uint8_t flags;
142 int nsegs;
143
144 /* Set on the first BD descriptor when there is a split BD */
145 #define QLNX_TSO_SPLIT_BD (1<<0)
146 };
147
148 #define QLNX_MAX_SEGMENTS 255
149 struct qlnx_tx_queue {
150 int index; /* Queue index */
151 volatile __le16 *hw_cons_ptr;
152 struct sw_tx_bd sw_tx_ring[TX_RING_SIZE];
153 uint16_t sw_tx_cons;
154 uint16_t sw_tx_prod;
155 struct ecore_chain tx_pbl;
156 void __iomem *doorbell_addr;
157 void *handle;
158 union db_prod tx_db;
159
160 bus_dma_segment_t segs[QLNX_MAX_SEGMENTS];
161
162 uint16_t num_tx_buffers;
163 };
164
165 #define BD_UNMAP_ADDR(bd) HILO_U64(le32toh((bd)->addr.hi), \
166 le32toh((bd)->addr.lo))
167 #define BD_UNMAP_LEN(bd) (le16toh((bd)->nbytes))
168
169 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
170 do { \
171 (bd)->addr.hi = htole32(U64_HI(maddr)); \
172 (bd)->addr.lo = htole32(U64_LO(maddr)); \
173 (bd)->nbytes = htole16(len); \
174 } while (0);
175
176 #define QLNX_FP_MAX_SEGS 24
177
178 struct qlnx_fastpath {
179 void *edev;
180 uint8_t rss_id;
181 struct ecore_sb_info *sb_info;
182 struct qlnx_rx_queue *rxq;
183 struct qlnx_tx_queue *txq[MAX_NUM_TC];
184 char name[64];
185
186 struct mtx tx_mtx;
187 char tx_mtx_name[32];
188 struct buf_ring *tx_br;
189 uint32_t tx_ring_full;
190
191 struct task fp_task;
192 struct taskqueue *fp_taskqueue;
193
194 /* transmit statistics */
195 uint64_t tx_pkts_processed;
196 uint64_t tx_pkts_freed;
197 uint64_t tx_pkts_transmitted;
198 uint64_t tx_pkts_completed;
199 uint64_t tx_tso_pkts;
200 uint64_t tx_non_tso_pkts;
201
202 #ifdef QLNX_TRACE_PERF_DATA
203 uint64_t tx_pkts_trans_ctx;
204 uint64_t tx_pkts_compl_ctx;
205 uint64_t tx_pkts_trans_fp;
206 uint64_t tx_pkts_compl_fp;
207 uint64_t tx_pkts_compl_intr;
208 #endif
209
210 uint64_t tx_lso_wnd_min_len;
211 uint64_t tx_defrag;
212 uint64_t tx_nsegs_gt_elem_left;
213 uint32_t tx_tso_max_nsegs;
214 uint32_t tx_tso_min_nsegs;
215 uint32_t tx_tso_max_pkt_len;
216 uint32_t tx_tso_min_pkt_len;
217 uint64_t tx_pkts[QLNX_FP_MAX_SEGS];
218
219 #ifdef QLNX_TRACE_PERF_DATA
220 uint64_t tx_pkts_hist[QLNX_FP_MAX_SEGS];
221 uint64_t tx_comInt[QLNX_FP_MAX_SEGS];
222 uint64_t tx_pkts_q[QLNX_FP_MAX_SEGS];
223 #endif
224
225 uint64_t err_tx_nsegs_gt_elem_left;
226 uint64_t err_tx_dmamap_create;
227 uint64_t err_tx_defrag_dmamap_load;
228 uint64_t err_tx_non_tso_max_seg;
229 uint64_t err_tx_dmamap_load;
230 uint64_t err_tx_defrag;
231 uint64_t err_tx_free_pkt_null;
232 uint64_t err_tx_cons_idx_conflict;
233
234 uint64_t lro_cnt_64;
235 uint64_t lro_cnt_128;
236 uint64_t lro_cnt_256;
237 uint64_t lro_cnt_512;
238 uint64_t lro_cnt_1024;
239
240 /* receive statistics */
241 uint64_t rx_pkts;
242 uint64_t tpa_start;
243 uint64_t tpa_cont;
244 uint64_t tpa_end;
245 uint64_t err_m_getcl;
246 uint64_t err_m_getjcl;
247 uint64_t err_rx_hw_errors;
248 uint64_t err_rx_alloc_errors;
249 uint64_t err_rx_jumbo_chain_pkts;
250 uint64_t err_rx_mp_null;
251 uint64_t err_rx_tpa_invalid_agg_num;
252 };
253
254 struct qlnx_update_vport_params {
255 uint8_t vport_id;
256 uint8_t update_vport_active_rx_flg;
257 uint8_t vport_active_rx_flg;
258 uint8_t update_vport_active_tx_flg;
259 uint8_t vport_active_tx_flg;
260 uint8_t update_inner_vlan_removal_flg;
261 uint8_t inner_vlan_removal_flg;
262 struct ecore_rss_params *rss_params;
263 struct ecore_sge_tpa_params *sge_tpa_params;
264 };
265
266 /*
267 * link related
268 */
269 struct qlnx_link_output {
270 bool link_up;
271 uint32_t supported_caps;
272 uint32_t advertised_caps;
273 uint32_t link_partner_caps;
274 uint32_t speed; /* In Mb/s */
275 bool autoneg;
276 uint32_t media_type;
277 uint32_t duplex;
278 };
279 typedef struct qlnx_link_output qlnx_link_output_t;
280
281 #define QLNX_LINK_DUPLEX 0x0001
282
283 #define QLNX_LINK_CAP_FIBRE 0x0001
284 #define QLNX_LINK_CAP_Autoneg 0x0002
285 #define QLNX_LINK_CAP_Pause 0x0004
286 #define QLNX_LINK_CAP_Asym_Pause 0x0008
287 #define QLNX_LINK_CAP_1000baseT_Half 0x0010
288 #define QLNX_LINK_CAP_1000baseT_Full 0x0020
289 #define QLNX_LINK_CAP_10000baseKR_Full 0x0040
290 #define QLNX_LINK_CAP_25000baseKR_Full 0x0080
291 #define QLNX_LINK_CAP_40000baseLR4_Full 0x0100
292 #define QLNX_LINK_CAP_50000baseKR2_Full 0x0200
293 #define QLNX_LINK_CAP_100000baseKR4_Full 0x0400
294
295 /* Functions definition */
296
297 #define XMIT_PLAIN 0
298 #define XMIT_L4_CSUM (1 << 0)
299 #define XMIT_LSO (1 << 1)
300
301 #define CQE_FLAGS_ERR (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << \
302 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT | \
303 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << \
304 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT | \
305 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
306 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
307 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
308 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
309
310 #define RX_COPY_THRESH 92
311 #define ETH_MAX_PACKET_SIZE 1500
312
313 #define QLNX_MFW_VERSION_LENGTH 32
314 #define QLNX_STORMFW_VERSION_LENGTH 32
315
316 #define QLNX_TX_ELEM_RESERVE 2
317 #define QLNX_TX_ELEM_THRESH 128
318 #define QLNX_TX_ELEM_MAX_THRESH 512
319 #define QLNX_TX_ELEM_MIN_THRESH 32
320 #define QLNX_TX_COMPL_THRESH 32
321
322 #define QLNX_TPA_MAX_AGG_BUFFERS (20)
323
324 #define QLNX_MAX_NUM_MULTICAST_ADDRS ECORE_MAX_MC_ADDRS
325 typedef struct _qlnx_mcast {
326 uint16_t rsrvd;
327 uint8_t addr[6];
328 } __packed qlnx_mcast_t;
329
330 typedef struct _qlnx_vf_attr {
331 uint8_t mac_addr[ETHER_ADDR_LEN];
332 uint32_t num_rings;
333 } qlnx_vf_attr_t;
334
335 typedef struct _qlnx_sriov_task {
336 struct task pf_task;
337 struct taskqueue *pf_taskqueue;
338
339 #define QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG 0x01
340 #define QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE 0x02
341 #define QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE 0x04
342 volatile uint32_t flags;
343
344 } qlnx_sriov_task_t;
345
346 /*
347 * Adapter structure contains the hardware independent information of the
348 * pci function.
349 */
350 struct qlnx_host {
351 /* interface to ecore */
352
353 struct ecore_dev cdev;
354
355 uint32_t state;
356
357 /* some flags */
358 volatile struct {
359 volatile uint32_t
360 hw_init :1,
361 callout_init :1,
362 slowpath_start :1,
363 parent_tag :1,
364 lock_init :1;
365 } flags;
366
367 /* interface to o.s */
368
369 device_t pci_dev;
370 uint8_t pci_func;
371 uint8_t dev_unit;
372 uint16_t device_id;
373
374 struct ifnet *ifp;
375 int if_flags;
376 volatile int link_up;
377 struct ifmedia media;
378 uint16_t max_frame_size;
379
380 struct cdev *ioctl_dev;
381
382 /* resources */
383 struct resource *pci_reg;
384 int reg_rid;
385
386 struct resource *pci_dbells;
387 int dbells_rid;
388 uint64_t dbells_phys_addr;
389 uint32_t dbells_size;
390
391 struct resource *msix_bar;
392 int msix_rid;
393
394 int msix_count;
395
396 struct mtx hw_lock;
397
398 /* debug */
399
400 uint32_t dbg_level;
401 uint32_t dbg_trace_lro_cnt;
402 uint32_t dbg_trace_tso_pkt_len;
403 uint32_t dp_level;
404 uint32_t dp_module;
405
406 /* misc */
407 uint8_t mfw_ver[QLNX_MFW_VERSION_LENGTH];
408 uint8_t stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
409 uint32_t flash_size;
410
411 /* dma related */
412
413 bus_dma_tag_t parent_tag;
414 bus_dma_tag_t tx_tag;
415 bus_dma_tag_t rx_tag;
416
417 struct ecore_sb_info sb_array[QLNX_MAX_RSS];
418 struct qlnx_rx_queue rxq_array[QLNX_MAX_RSS];
419 struct qlnx_tx_queue txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
420 struct qlnx_fastpath fp_array[QLNX_MAX_RSS];
421
422 /* tx related */
423 struct callout tx_callout;
424 uint32_t txr_idx;
425
426 /* rx related */
427 uint32_t rx_pkt_threshold;
428 uint32_t rx_jumbo_buf_eq_mtu;
429
430 /* slow path related */
431 struct resource *sp_irq[MAX_HWFNS_PER_DEVICE];
432 void *sp_handle[MAX_HWFNS_PER_DEVICE];
433 int sp_irq_rid[MAX_HWFNS_PER_DEVICE];
434 struct task sp_task[MAX_HWFNS_PER_DEVICE];
435 struct taskqueue *sp_taskqueue[MAX_HWFNS_PER_DEVICE];
436
437 struct callout qlnx_callout;
438
439 /* fast path related */
440 int num_rss;
441 int num_tc;
442
443 #define QLNX_MAX_TSS_CNT(ha) ((ha->num_rss) * (ha->num_tc))
444
445 qlnx_ivec_t irq_vec[QLNX_MAX_RSS];
446
447 uint8_t filter;
448 uint32_t nmcast;
449 qlnx_mcast_t mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
450 struct ecore_filter_mcast ecore_mcast;
451 uint8_t primary_mac[ETH_ALEN];
452 uint8_t prio_to_tc[MAX_NUM_PRI];
453 struct ecore_eth_stats hw_stats;
454 struct ecore_rss_params rss_params;
455 uint32_t rx_buf_size;
456 bool rx_csum_offload;
457
458 uint32_t rx_coalesce_usecs;
459 uint32_t tx_coalesce_usecs;
460
461 /* link related */
462 qlnx_link_output_t if_link;
463
464 /* global counters */
465 uint64_t sp_interrupts;
466 uint64_t err_illegal_intr;
467 uint64_t err_fp_null;
468 uint64_t err_get_proto_invalid_type;
469
470 /* error recovery related */
471 uint32_t error_recovery;
472 struct task err_task;
473 struct taskqueue *err_taskqueue;
474
475 /* grcdump related */
476 uint32_t err_inject;
477 uint32_t grcdump_taken;
478 uint32_t grcdump_dwords[QLNX_MAX_HW_FUNCS];
479 uint32_t grcdump_size[QLNX_MAX_HW_FUNCS];
480 void *grcdump[QLNX_MAX_HW_FUNCS];
481
482 uint32_t idle_chk_taken;
483 uint32_t idle_chk_dwords[QLNX_MAX_HW_FUNCS];
484 uint32_t idle_chk_size[QLNX_MAX_HW_FUNCS];
485 void *idle_chk[QLNX_MAX_HW_FUNCS];
486
487 /* storm stats related */
488 #define QLNX_STORM_STATS_TOTAL \
489 (QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
490 qlnx_storm_stats_t storm_stats[QLNX_STORM_STATS_TOTAL];
491 uint32_t storm_stats_index;
492 uint32_t storm_stats_enable;
493 uint32_t storm_stats_gather;
494
495 uint32_t personality;
496
497 uint16_t sriov_initialized;
498 uint16_t num_vfs;
499 qlnx_vf_attr_t *vf_attr;
500 qlnx_sriov_task_t sriov_task[MAX_HWFNS_PER_DEVICE];
501 uint32_t curr_vf;
502
503 void *next;
504 void *qlnx_rdma;
505 volatile int qlnxr_debug;
506 };
507
508 typedef struct qlnx_host qlnx_host_t;
509
510 /* note that align has to be a power of 2 */
511 #define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1)));
512 #define QL_MIN(x, y) ((x < y) ? x : y)
513
514 #define QL_RUNNING(ifp) \
515 ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
516 IFF_DRV_RUNNING)
517
518 #define QLNX_MAX_MTU 9000
519 #define QLNX_MAX_SEGMENTS_NON_TSO (ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
520 //#define QLNX_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22)
521 #define QLNX_MAX_TSO_FRAME_SIZE 65536
522 #define QLNX_MAX_TX_MBUF_SIZE 65536 /* bytes - bd_len = 16bits */
523
524 #define QL_MAC_CMP(mac1, mac2) \
525 ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
526 (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
527 #define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
528
529 /*
530 * Debug Related
531 */
532
533 #ifdef QLNX_DEBUG
534
535 #define QL_DPRINT1(ha, x, ...) \
536 do { \
537 if ((ha)->dbg_level & 0x0001) { \
538 device_printf ((ha)->pci_dev, \
539 "[%s:%d]" x, \
540 __func__, __LINE__, \
541 ## __VA_ARGS__); \
542 } \
543 } while (0)
544
545 #define QL_DPRINT2(ha, x, ...) \
546 do { \
547 if ((ha)->dbg_level & 0x0002) { \
548 device_printf ((ha)->pci_dev, \
549 "[%s:%d]" x, \
550 __func__, __LINE__, \
551 ## __VA_ARGS__); \
552 } \
553 } while (0)
554
555 #define QL_DPRINT3(ha, x, ...) \
556 do { \
557 if ((ha)->dbg_level & 0x0004) { \
558 device_printf ((ha)->pci_dev, \
559 "[%s:%d]" x, \
560 __func__, __LINE__, \
561 ## __VA_ARGS__); \
562 } \
563 } while (0)
564
565 #define QL_DPRINT4(ha, x, ...) \
566 do { \
567 if ((ha)->dbg_level & 0x0008) { \
568 device_printf ((ha)->pci_dev, \
569 "[%s:%d]" x, \
570 __func__, __LINE__, \
571 ## __VA_ARGS__); \
572 } \
573 } while (0)
574
575 #define QL_DPRINT5(ha, x, ...) \
576 do { \
577 if ((ha)->dbg_level & 0x0010) { \
578 device_printf ((ha)->pci_dev, \
579 "[%s:%d]" x, \
580 __func__, __LINE__, \
581 ## __VA_ARGS__); \
582 } \
583 } while (0)
584
585 #define QL_DPRINT6(ha, x, ...) \
586 do { \
587 if ((ha)->dbg_level & 0x0020) { \
588 device_printf ((ha)->pci_dev, \
589 "[%s:%d]" x, \
590 __func__, __LINE__, \
591 ## __VA_ARGS__); \
592 } \
593 } while (0)
594
595 #define QL_DPRINT7(ha, x, ...) \
596 do { \
597 if ((ha)->dbg_level & 0x0040) { \
598 device_printf ((ha)->pci_dev, \
599 "[%s:%d]" x, \
600 __func__, __LINE__, \
601 ## __VA_ARGS__); \
602 } \
603 } while (0)
604
605 #define QL_DPRINT8(ha, x, ...) \
606 do { \
607 if ((ha)->dbg_level & 0x0080) { \
608 device_printf ((ha)->pci_dev, \
609 "[%s:%d]" x, \
610 __func__, __LINE__, \
611 ## __VA_ARGS__); \
612 } \
613 } while (0)
614
615 #define QL_DPRINT9(ha, x, ...) \
616 do { \
617 if ((ha)->dbg_level & 0x0100) { \
618 device_printf ((ha)->pci_dev, \
619 "[%s:%d]" x, \
620 __func__, __LINE__, \
621 ## __VA_ARGS__); \
622 } \
623 } while (0)
624
625 #define QL_DPRINT11(ha, x, ...) \
626 do { \
627 if ((ha)->dbg_level & 0x0400) { \
628 device_printf ((ha)->pci_dev, \
629 "[%s:%d]" x, \
630 __func__, __LINE__, \
631 ## __VA_ARGS__); \
632 } \
633 } while (0)
634
635 #define QL_DPRINT12(ha, x, ...) \
636 do { \
637 if ((ha)->dbg_level & 0x0800) { \
638 device_printf ((ha)->pci_dev, \
639 "[%s:%d]" x, \
640 __func__, __LINE__, \
641 ## __VA_ARGS__); \
642 } \
643 } while (0)
644
645 #define QL_DPRINT13(ha, x, ...) \
646 do { \
647 if ((ha)->dbg_level & 0x1000) { \
648 device_printf ((ha)->pci_dev, \
649 "[%s:%d]" x, \
650 __func__, __LINE__, \
651 ## __VA_ARGS__); \
652 } \
653 } while (0)
654
655 #else
656
657 #define QL_DPRINT1(ha, x, ...)
658 #define QL_DPRINT2(ha, x, ...)
659 #define QL_DPRINT3(ha, x, ...)
660 #define QL_DPRINT4(ha, x, ...)
661 #define QL_DPRINT5(ha, x, ...)
662 #define QL_DPRINT6(ha, x, ...)
663 #define QL_DPRINT7(ha, x, ...)
664 #define QL_DPRINT8(ha, x, ...)
665 #define QL_DPRINT9(ha, x, ...)
666 #define QL_DPRINT11(ha, x, ...)
667 #define QL_DPRINT12(ha, x, ...)
668 #define QL_DPRINT13(ha, x, ...)
669
670 #endif /* #ifdef QLNX_DEBUG */
671
672 #define QL_ASSERT(ha, x, y) if (!x) panic y
673
674 #define QL_ERR_INJECT(ha, val) (ha->err_inject == val)
675 #define QL_RESET_ERR_INJECT(ha, val) {if (ha->err_inject == val) ha->err_inject = 0;}
676 #define QL_ERR_INJCT_TX_INT_DIFF 0x0001
677 #define QL_ERR_INJCT_TX_INT_MBUF_NULL 0x0002
678
679 /*
680 * exported functions
681 */
682 extern int qlnx_make_cdev(qlnx_host_t *ha);
683 extern void qlnx_del_cdev(qlnx_host_t *ha);
684 extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
685 int hwfn_index);
686 extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
687 int hwfn_index);
688 extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
689 extern void qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
690 struct qlnx_link_output *if_link);
691 extern int qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs);
692 extern int qlnx_vf_device(qlnx_host_t *ha);
693 extern void qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info);
694 extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
695 u16 sb_id);
696
697 /*
698 * Some OS specific stuff
699 */
700
701 #if (defined IFM_100G_SR4)
702 #define QLNX_IFM_100G_SR4 IFM_100G_SR4
703 #define QLNX_IFM_100G_LR4 IFM_100G_LR4
704 #define QLNX_IFM_100G_CR4 IFM_100G_CR4
705 #else
706 #define QLNX_IFM_100G_SR4 IFM_UNKNOWN
707 #define QLNX_IFM_100G_LR4 IFM_UNKNOWN
708 #endif /* #if (defined IFM_100G_SR4) */
709
710 #if (defined IFM_25G_SR)
711 #define QLNX_IFM_25G_SR IFM_25G_SR
712 #define QLNX_IFM_25G_CR IFM_25G_CR
713 #else
714 #define QLNX_IFM_25G_SR IFM_UNKNOWN
715 #define QLNX_IFM_25G_CR IFM_UNKNOWN
716 #endif /* #if (defined IFM_25G_SR) */
717
718 #if __FreeBSD_version < 1100000
719
720 #define QLNX_INC_IERRORS(ifp) ifp->if_ierrors++
721 #define QLNX_INC_IQDROPS(ifp) ifp->if_iqdrops++
722 #define QLNX_INC_IPACKETS(ifp) ifp->if_ipackets++
723 #define QLNX_INC_OPACKETS(ifp) ifp->if_opackets++
724 #define QLNX_INC_OBYTES(ifp, len) ifp->if_obytes += len
725 #define QLNX_INC_IBYTES(ifp, len) ifp->if_ibytes += len
726
727 #else
728
729 #define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
730 #define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
731 #define QLNX_INC_IPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
732 #define QLNX_INC_OPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
733
734 #define QLNX_INC_OBYTES(ifp, len) \
735 if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
736 #define QLNX_INC_IBYTES(ifp, len) \
737 if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
738
739 #endif /* #if __FreeBSD_version < 1100000 */
740
741 #define CQE_L3_PACKET(flags) \
742 ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv4) || \
743 (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv6))
744
745 #define CQE_IP_HDR_ERR(flags) \
746 ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
747 << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
748
749 #define CQE_L4_HAS_CSUM(flags) \
750 ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
751 << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
752
753 #define CQE_HAS_VLAN(flags) \
754 ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
755 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
756
757 #ifndef QLNX_RDMA
758 #if defined(__i386__) || defined(__amd64__)
759
760 static __inline
761 void prefetch(void *x)
762 {
763 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
764 }
765
766 #else
767 #define prefetch(x)
768 #endif
769 #endif
770
771 #endif /* #ifndef _QLNX_DEF_H_ */
Cache object: 2312b355568e1567ab55828dc7e952f5
|